problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_2210 | rasdani/github-patches | git_diff | ARM-DOE__ACT-673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feedstock failing due to pandas datetime
### Description
CI is failing due to datetime units not being set for csv reader
### What I Did
See the PR here that was failing
https://github.com/conda-forge/act-atmos-feedstock/pull/63
</issue>
<code>
[start of act/io/csvfiles.py]
1 """
2 This module contains I/O operations for loading csv files.
3
4 """
5
6 import pathlib
7
8 import pandas as pd
9
10 from .armfiles import check_arm_standards
11
12
13 def read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):
14
15 """
16 Returns an `xarray.Dataset` with stored data and metadata from user-defined
17 query of CSV files.
18
19 Parameters
20 ----------
21 filenames : str or list
22 Name of file(s) to read.
23 sep : str
24 The separator between columns in the csv file.
25 column_names : list or None
26 The list of column names in the csv file.
27 verbose : bool
28 If true, will print if a file is not found.
29 ignore_index : bool
30 Keyword for pandas concat function. If True, do not use the index
31 values along the concatenation axis. The resulting axis will be labeled
32 0, …, n - 1. This is useful if you are concatenating datasets where the
33 concatenation axis does not have meaningful indexing information. Note
34 the index values on the other axes are still respected in the join.
35
36 Additional keyword arguments will be passed into pandas.read_csv.
37
38 Returns
39 -------
40 ds : xarray.Dataset
41 ACT Xarray dataset. Will be None if the file is not found.
42
43 Examples
44 --------
45 This example will load the example sounding data used for unit testing:
46
47 .. code-block:: python
48
49 import act
50
51 ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)
52
53 """
54
55 # Convert to string if filename is a pathlib or not a list
56 if isinstance(filename, (pathlib.PurePath, str)):
57 filename = [str(filename)]
58
59 if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):
60 filename = [str(ii) for ii in filename]
61
62 # Read data using pandas read_csv one file at a time and append to
63 # list. Then concatinate the list into one pandas dataframe.
64 li = []
65 for fl in filename:
66 df = pd.read_csv(
67 fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs
68 )
69 li.append(df)
70
71 if len(li) == 1:
72 df = li[0]
73 else:
74 df = pd.concat(li, axis=0, ignore_index=ignore_index)
75
76 # Set Coordinates if there's a variable date_time
77 if 'date_time' in df:
78 df.date_time = df.date_time.astype('datetime64')
79 df.time = df.date_time
80 df = df.set_index('time')
81
82 # Convert to xarray DataSet
83 ds = df.to_xarray()
84
85 # Set additional variables
86 # Since we cannot assume a standard naming convention setting
87 # file_date and file_time to the first time in the file
88 x_coord = ds.coords.to_index().values[0]
89 if isinstance(x_coord, str):
90 x_coord_dt = pd.to_datetime(x_coord)
91 ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')
92 ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')
93
94 # Check for standard ARM datastream name, if none, assume the file is ARM
95 # standard format.
96 is_arm_file_flag = check_arm_standards(ds)
97 if is_arm_file_flag == 0:
98
99 ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])
100
101 # Add additional attributes, site, standards flag, etc...
102 ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]
103 ds.attrs['_arm_standards_flag'] = is_arm_file_flag
104
105 return ds
106
[end of act/io/csvfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/act/io/csvfiles.py b/act/io/csvfiles.py
--- a/act/io/csvfiles.py
+++ b/act/io/csvfiles.py
@@ -75,7 +75,7 @@
# Set Coordinates if there's a variable date_time
if 'date_time' in df:
- df.date_time = df.date_time.astype('datetime64')
+ df.date_time = df.date_time.astype('datetime64[ns]')
df.time = df.date_time
df = df.set_index('time')
| {"golden_diff": "diff --git a/act/io/csvfiles.py b/act/io/csvfiles.py\n--- a/act/io/csvfiles.py\n+++ b/act/io/csvfiles.py\n@@ -75,7 +75,7 @@\n \n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n- df.date_time = df.date_time.astype('datetime64')\n+ df.date_time = df.date_time.astype('datetime64[ns]')\n df.time = df.date_time\n df = df.set_index('time')\n", "issue": "Feedstock failing due to pandas datetime\n### Description\r\nCI is failing due to datetime units not being set for csv reader\r\n\r\n### What I Did\r\n\r\nSee the PR here that was failing\r\nhttps://github.com/conda-forge/act-atmos-feedstock/pull/63\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains I/O operations for loading csv files.\n\n\"\"\"\n\nimport pathlib\n\nimport pandas as pd\n\nfrom .armfiles import check_arm_standards\n\n\ndef read_csv(filename, sep=',', engine='python', column_names=None, skipfooter=0, ignore_index=True, **kwargs):\n\n \"\"\"\n Returns an `xarray.Dataset` with stored data and metadata from user-defined\n query of CSV files.\n\n Parameters\n ----------\n filenames : str or list\n Name of file(s) to read.\n sep : str\n The separator between columns in the csv file.\n column_names : list or None\n The list of column names in the csv file.\n verbose : bool\n If true, will print if a file is not found.\n ignore_index : bool\n Keyword for pandas concat function. If True, do not use the index\n values along the concatenation axis. The resulting axis will be labeled\n 0, \u2026, n - 1. This is useful if you are concatenating datasets where the\n concatenation axis does not have meaningful indexing information. Note\n the index values on the other axes are still respected in the join.\n\n Additional keyword arguments will be passed into pandas.read_csv.\n\n Returns\n -------\n ds : xarray.Dataset\n ACT Xarray dataset. Will be None if the file is not found.\n\n Examples\n --------\n This example will load the example sounding data used for unit testing:\n\n .. code-block:: python\n\n import act\n\n ds = act.io.csvfiles.read(act.tests.sample_files.EXAMPLE_CSV_WILDCARD)\n\n \"\"\"\n\n # Convert to string if filename is a pathlib or not a list\n if isinstance(filename, (pathlib.PurePath, str)):\n filename = [str(filename)]\n\n if isinstance(filename, list) and isinstance(filename[0], pathlib.PurePath):\n filename = [str(ii) for ii in filename]\n\n # Read data using pandas read_csv one file at a time and append to\n # list. Then concatinate the list into one pandas dataframe.\n li = []\n for fl in filename:\n df = pd.read_csv(\n fl, sep=sep, names=column_names, skipfooter=skipfooter, engine=engine, **kwargs\n )\n li.append(df)\n\n if len(li) == 1:\n df = li[0]\n else:\n df = pd.concat(li, axis=0, ignore_index=ignore_index)\n\n # Set Coordinates if there's a variable date_time\n if 'date_time' in df:\n df.date_time = df.date_time.astype('datetime64')\n df.time = df.date_time\n df = df.set_index('time')\n\n # Convert to xarray DataSet\n ds = df.to_xarray()\n\n # Set additional variables\n # Since we cannot assume a standard naming convention setting\n # file_date and file_time to the first time in the file\n x_coord = ds.coords.to_index().values[0]\n if isinstance(x_coord, str):\n x_coord_dt = pd.to_datetime(x_coord)\n ds.attrs['_file_dates'] = x_coord_dt.strftime('%Y%m%d')\n ds.attrs['_file_times'] = x_coord_dt.strftime('%H%M%S')\n\n # Check for standard ARM datastream name, if none, assume the file is ARM\n # standard format.\n is_arm_file_flag = check_arm_standards(ds)\n if is_arm_file_flag == 0:\n\n ds.attrs['_datastream'] = '.'.join(filename[0].split('/')[-1].split('.')[0:2])\n\n # Add additional attributes, site, standards flag, etc...\n ds.attrs['_site'] = str(ds.attrs['_datastream'])[0:3]\n ds.attrs['_arm_standards_flag'] = is_arm_file_flag\n\n return ds\n", "path": "act/io/csvfiles.py"}]} | 1,639 | 120 |
gh_patches_debug_14245 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SciPy in requirements in README but not in install_requires
Hey!
I'm wondering why SciPy is listed as a requirement in README but not in setup.py install_require argument.
Cheers,
Mike
</issue>
<code>
[start of examples/MultiPlotWidget.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 ## Add path to library (just for examples; you do not need this)
4 import initExample
5
6
7 from scipy import random
8 from numpy import linspace
9 from pyqtgraph.Qt import QtGui, QtCore
10 import pyqtgraph as pg
11 from pyqtgraph import MultiPlotWidget
12 try:
13 from pyqtgraph.metaarray import *
14 except:
15 print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
16 exit()
17
18 app = QtGui.QApplication([])
19 mw = QtGui.QMainWindow()
20 mw.resize(800,800)
21 pw = MultiPlotWidget()
22 mw.setCentralWidget(pw)
23 mw.show()
24
25 data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
26 ma = MetaArray(data, info=[
27 {'name': 'Signal', 'cols': [
28 {'name': 'Col1', 'units': 'V'},
29 {'name': 'Col2', 'units': 'A'},
30 {'name': 'Col3'},
31 ]},
32 {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
33 ])
34 pw.plot(ma)
35
36 ## Start Qt event loop unless running in interactive mode.
37 if __name__ == '__main__':
38 import sys
39 if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
40 QtGui.QApplication.instance().exec_()
41
42
[end of examples/MultiPlotWidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/MultiPlotWidget.py b/examples/MultiPlotWidget.py
--- a/examples/MultiPlotWidget.py
+++ b/examples/MultiPlotWidget.py
@@ -3,8 +3,7 @@
## Add path to library (just for examples; you do not need this)
import initExample
-
-from scipy import random
+import numpy as np
from numpy import linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
@@ -22,7 +21,7 @@
mw.setCentralWidget(pw)
mw.show()
-data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
+data = np.random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
| {"golden_diff": "diff --git a/examples/MultiPlotWidget.py b/examples/MultiPlotWidget.py\n--- a/examples/MultiPlotWidget.py\n+++ b/examples/MultiPlotWidget.py\n@@ -3,8 +3,7 @@\n ## Add path to library (just for examples; you do not need this)\n import initExample\n \n-\n-from scipy import random\n+import numpy as np\n from numpy import linspace\n from pyqtgraph.Qt import QtGui, QtCore\n import pyqtgraph as pg\n@@ -22,7 +21,7 @@\n mw.setCentralWidget(pw)\n mw.show()\n \n-data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\n+data = np.random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\n ma = MetaArray(data, info=[\n {'name': 'Signal', 'cols': [\n {'name': 'Col1', 'units': 'V'},\n", "issue": "SciPy in requirements in README but not in install_requires\nHey!\r\nI'm wondering why SciPy is listed as a requirement in README but not in setup.py install_require argument.\r\n\r\nCheers,\r\nMike\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n## Add path to library (just for examples; you do not need this)\nimport initExample\n\n\nfrom scipy import random\nfrom numpy import linspace\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nfrom pyqtgraph import MultiPlotWidget\ntry:\n from pyqtgraph.metaarray import *\nexcept:\n print(\"MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)\")\n exit()\n\napp = QtGui.QApplication([])\nmw = QtGui.QMainWindow()\nmw.resize(800,800)\npw = MultiPlotWidget()\nmw.setCentralWidget(pw)\nmw.show()\n\ndata = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\nma = MetaArray(data, info=[\n {'name': 'Signal', 'cols': [\n {'name': 'Col1', 'units': 'V'}, \n {'name': 'Col2', 'units': 'A'}, \n {'name': 'Col3'},\n ]}, \n {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}\n ])\npw.plot(ma)\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n\n", "path": "examples/MultiPlotWidget.py"}]} | 988 | 221 |
gh_patches_debug_29127 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2384 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
L'API ne retourne pas toujours les mêmes infos pour un membre
> Un autre truc, quand on met un jour un membre on peut spécifier deux champs qui ne sont pas fournit par le get classique : `hover_or_click` et `show_sign`. Est ce normal ?
Source:[Kje](http://zestedesavoir.com/forums/sujet/1365/zep-17-elaboration-de-lapi-des-membres/?page=18#p45095)
</issue>
<code>
[start of zds/member/api/serializers.py]
1 # -*- coding: utf-8 -*-
2
3 from rest_framework import serializers
4
5 from zds.member.commons import ProfileUsernameValidator, ProfileEmailValidator, \
6 ProfileCreate
7 from zds.member.models import Profile
8
9
10 class ProfileListSerializer(serializers.ModelSerializer):
11 """
12 Serializers of a user object.
13 """
14
15 username = serializers.CharField(source='user.username')
16 is_active = serializers.BooleanField(source='user.is_active')
17 date_joined = serializers.DateTimeField(source='user.date_joined')
18
19 class Meta:
20 model = Profile
21 fields = ('pk', 'username', 'is_active', 'date_joined')
22
23
24 class ProfileCreateSerializer(serializers.ModelSerializer, ProfileCreate, ProfileUsernameValidator,
25 ProfileEmailValidator):
26 """
27 Serializers of a user object to create one.
28 """
29
30 username = serializers.CharField(source='user.username')
31 email = serializers.EmailField(source='user.email')
32 password = serializers.CharField(source='user.password')
33
34 class Meta:
35 model = Profile
36 fields = ('pk', 'username', 'email', 'password')
37 write_only_fields = ('password')
38
39 def create(self, validated_data):
40 profile = self.create_profile(validated_data.get('user'))
41 self.save_profile(profile)
42 return profile
43
44 def throw_error(self, key=None, message=None):
45 raise serializers.ValidationError(message)
46
47
48 class ProfileDetailSerializer(serializers.ModelSerializer):
49 """
50 Serializers of a profile object.
51 """
52
53 username = serializers.CharField(source='user.username')
54 email = serializers.EmailField(source='user.email')
55 is_active = serializers.BooleanField(source='user.is_active')
56 date_joined = serializers.DateTimeField(source='user.date_joined')
57
58 class Meta:
59 model = Profile
60 fields = ('pk', 'username', 'show_email', 'email', 'is_active',
61 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',
62 'last_visit', 'date_joined')
63
64 def __init__(self, *args, **kwargs):
65 """
66 Create the serializer with or without email field, depending on the show_email argument.
67 """
68 show_email = kwargs.pop('show_email', False)
69 is_authenticated = kwargs.pop('is_authenticated', False)
70
71 super(ProfileDetailSerializer, self).__init__(*args, **kwargs)
72
73 if not show_email or not is_authenticated:
74 # Drop email field.
75 self.fields.pop('email')
76
77
78 class ProfileValidatorSerializer(serializers.ModelSerializer, ProfileUsernameValidator, ProfileEmailValidator):
79 """
80 Serializers of a profile object used to update a member.
81 """
82
83 username = serializers.CharField(source='user.username', required=False, allow_blank=True)
84 email = serializers.EmailField(source='user.email', required=False, allow_blank=True)
85
86 class Meta:
87 model = Profile
88 fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',
89 'sign', 'show_email', 'show_sign', 'hover_or_click',
90 'email_for_answer')
91
92 def update(self, instance, validated_data):
93 """
94 Update and return an existing `Profile` instance, given the validated data.
95 """
96 instance.user.username = validated_data.get('user').get('username',
97 instance.user.username) or instance.user.username
98 instance.user.email = validated_data.get('user').get('email', instance.user.email) or instance.user.email
99 instance.site = validated_data.get('site', instance.site) or instance.site
100 instance.avatar_url = validated_data.get('avatar_url', instance.avatar_url) or instance.avatar_url
101 instance.biography = validated_data.get('biography', instance.biography) or instance.biography
102 instance.sign = validated_data.get('sign', instance.sign) or instance.sign
103 instance.show_email = validated_data.get('show_email', instance.show_email) or instance.show_email
104 instance.show_sign = validated_data.get('show_sign', instance.show_sign) or instance.show_sign
105 instance.hover_or_click = validated_data.get('hover_or_click',
106 instance.hover_or_click) or instance.hover_or_click
107 instance.email_for_answer = validated_data.get('email_for_answer',
108 instance.email_for_answer) or instance.email_for_answer
109 instance.user.save()
110 instance.save()
111 return instance
112
113 def throw_error(self, key=None, message=None):
114 raise serializers.ValidationError(message)
115
116
117 class ProfileSanctionSerializer(serializers.ModelSerializer):
118 """
119 Serializers of a profile object to set the user in reading only access.
120 """
121
122 username = serializers.ReadOnlyField(source='user.username')
123 email = serializers.ReadOnlyField(source='user.email')
124
125 class Meta:
126 model = Profile
127 fields = ('pk', 'username', 'email', 'can_write', 'end_ban_write', 'can_read', 'end_ban_read')
128 read_only_fields = ('can_write', 'end_ban_write', 'can_read', 'end_ban_read')
129
[end of zds/member/api/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/member/api/serializers.py b/zds/member/api/serializers.py
--- a/zds/member/api/serializers.py
+++ b/zds/member/api/serializers.py
@@ -57,9 +57,9 @@
class Meta:
model = Profile
- fields = ('pk', 'username', 'show_email', 'email', 'is_active',
- 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',
- 'last_visit', 'date_joined')
+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',
+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',
+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')
def __init__(self, *args, **kwargs):
"""
@@ -82,12 +82,15 @@
username = serializers.CharField(source='user.username', required=False, allow_blank=True)
email = serializers.EmailField(source='user.email', required=False, allow_blank=True)
+ is_active = serializers.BooleanField(source='user.is_active', required=False)
+ date_joined = serializers.DateTimeField(source='user.date_joined', required=False)
class Meta:
model = Profile
- fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',
- 'sign', 'show_email', 'show_sign', 'hover_or_click',
- 'email_for_answer')
+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',
+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',
+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')
+ read_only_fields = ('is_active', 'date_joined', 'last_visit',)
def update(self, instance, validated_data):
"""
| {"golden_diff": "diff --git a/zds/member/api/serializers.py b/zds/member/api/serializers.py\n--- a/zds/member/api/serializers.py\n+++ b/zds/member/api/serializers.py\n@@ -57,9 +57,9 @@\n \n class Meta:\n model = Profile\n- fields = ('pk', 'username', 'show_email', 'email', 'is_active',\n- 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',\n- 'last_visit', 'date_joined')\n+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',\n+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',\n+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n@@ -82,12 +82,15 @@\n \n username = serializers.CharField(source='user.username', required=False, allow_blank=True)\n email = serializers.EmailField(source='user.email', required=False, allow_blank=True)\n+ is_active = serializers.BooleanField(source='user.is_active', required=False)\n+ date_joined = serializers.DateTimeField(source='user.date_joined', required=False)\n \n class Meta:\n model = Profile\n- fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',\n- 'sign', 'show_email', 'show_sign', 'hover_or_click',\n- 'email_for_answer')\n+ fields = ('pk', 'username', 'email', 'is_active', 'date_joined',\n+ 'site', 'avatar_url', 'biography', 'sign', 'show_email',\n+ 'show_sign', 'hover_or_click', 'email_for_answer', 'last_visit')\n+ read_only_fields = ('is_active', 'date_joined', 'last_visit',)\n \n def update(self, instance, validated_data):\n \"\"\"\n", "issue": "L'API ne retourne pas toujours les m\u00eames infos pour un membre\n> Un autre truc, quand on met un jour un membre on peut sp\u00e9cifier deux champs qui ne sont pas fournit par le get classique : `hover_or_click` et `show_sign`. Est ce normal ?\n\nSource:[Kje](http://zestedesavoir.com/forums/sujet/1365/zep-17-elaboration-de-lapi-des-membres/?page=18#p45095)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom rest_framework import serializers\n\nfrom zds.member.commons import ProfileUsernameValidator, ProfileEmailValidator, \\\n ProfileCreate\nfrom zds.member.models import Profile\n\n\nclass ProfileListSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a user object.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n is_active = serializers.BooleanField(source='user.is_active')\n date_joined = serializers.DateTimeField(source='user.date_joined')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'is_active', 'date_joined')\n\n\nclass ProfileCreateSerializer(serializers.ModelSerializer, ProfileCreate, ProfileUsernameValidator,\n ProfileEmailValidator):\n \"\"\"\n Serializers of a user object to create one.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n email = serializers.EmailField(source='user.email')\n password = serializers.CharField(source='user.password')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'password')\n write_only_fields = ('password')\n\n def create(self, validated_data):\n profile = self.create_profile(validated_data.get('user'))\n self.save_profile(profile)\n return profile\n\n def throw_error(self, key=None, message=None):\n raise serializers.ValidationError(message)\n\n\nclass ProfileDetailSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a profile object.\n \"\"\"\n\n username = serializers.CharField(source='user.username')\n email = serializers.EmailField(source='user.email')\n is_active = serializers.BooleanField(source='user.is_active')\n date_joined = serializers.DateTimeField(source='user.date_joined')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'show_email', 'email', 'is_active',\n 'site', 'avatar_url', 'biography', 'sign', 'email_for_answer',\n 'last_visit', 'date_joined')\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create the serializer with or without email field, depending on the show_email argument.\n \"\"\"\n show_email = kwargs.pop('show_email', False)\n is_authenticated = kwargs.pop('is_authenticated', False)\n\n super(ProfileDetailSerializer, self).__init__(*args, **kwargs)\n\n if not show_email or not is_authenticated:\n # Drop email field.\n self.fields.pop('email')\n\n\nclass ProfileValidatorSerializer(serializers.ModelSerializer, ProfileUsernameValidator, ProfileEmailValidator):\n \"\"\"\n Serializers of a profile object used to update a member.\n \"\"\"\n\n username = serializers.CharField(source='user.username', required=False, allow_blank=True)\n email = serializers.EmailField(source='user.email', required=False, allow_blank=True)\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'site', 'avatar_url', 'biography',\n 'sign', 'show_email', 'show_sign', 'hover_or_click',\n 'email_for_answer')\n\n def update(self, instance, validated_data):\n \"\"\"\n Update and return an existing `Profile` instance, given the validated data.\n \"\"\"\n instance.user.username = validated_data.get('user').get('username',\n instance.user.username) or instance.user.username\n instance.user.email = validated_data.get('user').get('email', instance.user.email) or instance.user.email\n instance.site = validated_data.get('site', instance.site) or instance.site\n instance.avatar_url = validated_data.get('avatar_url', instance.avatar_url) or instance.avatar_url\n instance.biography = validated_data.get('biography', instance.biography) or instance.biography\n instance.sign = validated_data.get('sign', instance.sign) or instance.sign\n instance.show_email = validated_data.get('show_email', instance.show_email) or instance.show_email\n instance.show_sign = validated_data.get('show_sign', instance.show_sign) or instance.show_sign\n instance.hover_or_click = validated_data.get('hover_or_click',\n instance.hover_or_click) or instance.hover_or_click\n instance.email_for_answer = validated_data.get('email_for_answer',\n instance.email_for_answer) or instance.email_for_answer\n instance.user.save()\n instance.save()\n return instance\n\n def throw_error(self, key=None, message=None):\n raise serializers.ValidationError(message)\n\n\nclass ProfileSanctionSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializers of a profile object to set the user in reading only access.\n \"\"\"\n\n username = serializers.ReadOnlyField(source='user.username')\n email = serializers.ReadOnlyField(source='user.email')\n\n class Meta:\n model = Profile\n fields = ('pk', 'username', 'email', 'can_write', 'end_ban_write', 'can_read', 'end_ban_read')\n read_only_fields = ('can_write', 'end_ban_write', 'can_read', 'end_ban_read')\n", "path": "zds/member/api/serializers.py"}]} | 1,971 | 435 |
gh_patches_debug_27067 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow 0 as value for funding amount in partnerships
It should be possible to fill in 0 as a funding amount in the project editor, and then publish a project. This is based on Plan Finland feedback:
"Are you able to give us an estimate on when the suggestions we made to Geert could be published (the changes to the results section and possibility for 0€ budget project)."
</issue>
<code>
[start of akvo/rsr/models/publishing_status.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.conf import settings
8 from django.core.exceptions import ValidationError
9 from django.core.mail import send_mail
10 from django.db import models
11 from django.db.models.signals import post_save
12 from django.dispatch import receiver
13 from django.utils.translation import ugettext_lazy as _
14 from .partnership import Partnership
15
16 from ..fields import ValidXMLCharField
17
18
19 class PublishingStatus(models.Model):
20 """Keep track of publishing status."""
21 STATUS_PUBLISHED = 'published'
22 STATUS_UNPUBLISHED = 'unpublished'
23 PUBLISHING_STATUS = (
24 (STATUS_UNPUBLISHED, _(u'Unpublished')),
25 (STATUS_PUBLISHED, _(u'Published')),
26 )
27
28 project = models.OneToOneField('Project',)
29 status = ValidXMLCharField(max_length=30,
30 choices=PUBLISHING_STATUS,
31 db_index=True, default=STATUS_UNPUBLISHED)
32
33 def clean(self):
34 """Projects can only be published, when several checks have been performed."""
35 if self.status == 'published':
36 validation_errors = []
37
38 if not self.project.title:
39 validation_errors.append(
40 ValidationError(_('Project needs to have a title.'),
41 code='title')
42 )
43
44 if not self.project.subtitle:
45 validation_errors.append(
46 ValidationError(_('Project needs to have a subtitle.'),
47 code='subtitle')
48 )
49
50 if self.project.iati_status == '6':
51 validation_errors.append(
52 ValidationError(_('Project needs to have non-suspended status.'),
53 code='status')
54 )
55
56 if not (self.project.date_start_planned or self.project.date_start_actual):
57 validation_errors.append(
58 ValidationError(
59 _('Project needs to have the planned or actual start date field filled '
60 'in.'), code='start_date')
61 )
62
63 if not self.project.current_image:
64 validation_errors.append(
65 ValidationError(_('Project needs to have a photo.'),
66 code='current_image')
67 )
68
69 if not self.project.partnerships.filter(
70 organisation__can_create_projects__exact=True).exists():
71 validation_errors.append(
72 ValidationError(
73 _('Project has no partner that is allowed to publish it.'),
74 code='partners'
75 )
76 )
77
78 if not self.project.partnerships.filter(
79 iati_organisation_role__in=[Partnership.IATI_FUNDING_PARTNER,
80 Partnership.IATI_IMPLEMENTING_PARTNER,
81 Partnership.IATI_ACCOUNTABLE_PARTNER]
82 ).exists():
83 validation_errors.append(
84 ValidationError(
85 _('Project needs to have at least one funding, implementing or accountable '
86 'partner.'),
87 code='partners'
88 )
89 )
90 else:
91 for funding_partner in self.project.partnerships.filter(
92 iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):
93 if not funding_partner.funding_amount:
94 validation_errors.append(
95 ValidationError(_('All funding partners should have a funding amount.'),
96 code='partners'
97 )
98 )
99 break
100
101 if not self.project.project_plan_summary:
102 validation_errors.append(
103 ValidationError(_('Project needs to have the project plan summary filled in.'),
104 code='summary')
105 )
106
107 if not self.project.goals_overview:
108 validation_errors.append(
109 ValidationError(_('Project needs to have the goals overview field filled in.'),
110 code='goals_overview')
111 )
112
113 if not self.project.locations.all():
114 validation_errors.append(
115 ValidationError(_('Project needs to have at least one location.'),
116 code='location')
117 )
118 else:
119 for location in self.project.locations.all():
120 if not (location.latitude and location.longitude):
121 validation_errors.append(
122 ValidationError(
123 _('All locations need to have a latitude and longitude specified.'),
124 code='location')
125 )
126 break
127
128 if not self.project.budget_items.all():
129 validation_errors.append(
130 ValidationError(_('Project needs to have at least one budget item.'),
131 code='budget_item')
132 )
133 elif not self.project.budget_items.filter(amount__gt=0).exists():
134 validation_errors.append(
135 ValidationError(
136 _('Project needs to have at least one budget item with an amount.'),
137 code='budget_item'
138 )
139 )
140
141 if validation_errors:
142 raise ValidationError(validation_errors)
143
144 class Meta:
145 app_label = 'rsr'
146 verbose_name = _(u'publishing status')
147 verbose_name_plural = _(u'publishing statuses')
148 ordering = ('-status', 'project')
149
[end of akvo/rsr/models/publishing_status.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/models/publishing_status.py b/akvo/rsr/models/publishing_status.py
--- a/akvo/rsr/models/publishing_status.py
+++ b/akvo/rsr/models/publishing_status.py
@@ -90,7 +90,7 @@
else:
for funding_partner in self.project.partnerships.filter(
iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):
- if not funding_partner.funding_amount:
+ if not funding_partner.funding_amount and not funding_partner.funding_amount == 0:
validation_errors.append(
ValidationError(_('All funding partners should have a funding amount.'),
code='partners'
@@ -130,7 +130,7 @@
ValidationError(_('Project needs to have at least one budget item.'),
code='budget_item')
)
- elif not self.project.budget_items.filter(amount__gt=0).exists():
+ elif not self.project.budget_items.filter(amount__gte=0).exists():
validation_errors.append(
ValidationError(
_('Project needs to have at least one budget item with an amount.'),
| {"golden_diff": "diff --git a/akvo/rsr/models/publishing_status.py b/akvo/rsr/models/publishing_status.py\n--- a/akvo/rsr/models/publishing_status.py\n+++ b/akvo/rsr/models/publishing_status.py\n@@ -90,7 +90,7 @@\n else:\n for funding_partner in self.project.partnerships.filter(\n iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):\n- if not funding_partner.funding_amount:\n+ if not funding_partner.funding_amount and not funding_partner.funding_amount == 0:\n validation_errors.append(\n ValidationError(_('All funding partners should have a funding amount.'),\n code='partners'\n@@ -130,7 +130,7 @@\n ValidationError(_('Project needs to have at least one budget item.'),\n code='budget_item')\n )\n- elif not self.project.budget_items.filter(amount__gt=0).exists():\n+ elif not self.project.budget_items.filter(amount__gte=0).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one budget item with an amount.'),\n", "issue": "Allow 0 as value for funding amount in partnerships\nIt should be possible to fill in 0 as a funding amount in the project editor, and then publish a project. This is based on Plan Finland feedback:\n\n\"Are you able to give us an estimate on when the suggestions we made to Geert could be published (the changes to the results section and possibility for 0\u20ac budget project).\"\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import send_mail\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom .partnership import Partnership\n\nfrom ..fields import ValidXMLCharField\n\n\nclass PublishingStatus(models.Model):\n \"\"\"Keep track of publishing status.\"\"\"\n STATUS_PUBLISHED = 'published'\n STATUS_UNPUBLISHED = 'unpublished'\n PUBLISHING_STATUS = (\n (STATUS_UNPUBLISHED, _(u'Unpublished')),\n (STATUS_PUBLISHED, _(u'Published')),\n )\n\n project = models.OneToOneField('Project',)\n status = ValidXMLCharField(max_length=30,\n choices=PUBLISHING_STATUS,\n db_index=True, default=STATUS_UNPUBLISHED)\n\n def clean(self):\n \"\"\"Projects can only be published, when several checks have been performed.\"\"\"\n if self.status == 'published':\n validation_errors = []\n\n if not self.project.title:\n validation_errors.append(\n ValidationError(_('Project needs to have a title.'),\n code='title')\n )\n\n if not self.project.subtitle:\n validation_errors.append(\n ValidationError(_('Project needs to have a subtitle.'),\n code='subtitle')\n )\n\n if self.project.iati_status == '6':\n validation_errors.append(\n ValidationError(_('Project needs to have non-suspended status.'),\n code='status')\n )\n\n if not (self.project.date_start_planned or self.project.date_start_actual):\n validation_errors.append(\n ValidationError(\n _('Project needs to have the planned or actual start date field filled '\n 'in.'), code='start_date')\n )\n\n if not self.project.current_image:\n validation_errors.append(\n ValidationError(_('Project needs to have a photo.'),\n code='current_image')\n )\n\n if not self.project.partnerships.filter(\n organisation__can_create_projects__exact=True).exists():\n validation_errors.append(\n ValidationError(\n _('Project has no partner that is allowed to publish it.'),\n code='partners'\n )\n )\n\n if not self.project.partnerships.filter(\n iati_organisation_role__in=[Partnership.IATI_FUNDING_PARTNER,\n Partnership.IATI_IMPLEMENTING_PARTNER,\n Partnership.IATI_ACCOUNTABLE_PARTNER]\n ).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one funding, implementing or accountable '\n 'partner.'),\n code='partners'\n )\n )\n else:\n for funding_partner in self.project.partnerships.filter(\n iati_organisation_role=Partnership.IATI_FUNDING_PARTNER):\n if not funding_partner.funding_amount:\n validation_errors.append(\n ValidationError(_('All funding partners should have a funding amount.'),\n code='partners'\n )\n )\n break\n\n if not self.project.project_plan_summary:\n validation_errors.append(\n ValidationError(_('Project needs to have the project plan summary filled in.'),\n code='summary')\n )\n\n if not self.project.goals_overview:\n validation_errors.append(\n ValidationError(_('Project needs to have the goals overview field filled in.'),\n code='goals_overview')\n )\n\n if not self.project.locations.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one location.'),\n code='location')\n )\n else:\n for location in self.project.locations.all():\n if not (location.latitude and location.longitude):\n validation_errors.append(\n ValidationError(\n _('All locations need to have a latitude and longitude specified.'),\n code='location')\n )\n break\n\n if not self.project.budget_items.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one budget item.'),\n code='budget_item')\n )\n elif not self.project.budget_items.filter(amount__gt=0).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one budget item with an amount.'),\n code='budget_item'\n )\n )\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'publishing status')\n verbose_name_plural = _(u'publishing statuses')\n ordering = ('-status', 'project')\n", "path": "akvo/rsr/models/publishing_status.py"}]} | 1,956 | 249 |
gh_patches_debug_6638 | rasdani/github-patches | git_diff | zulip__zulip-28016 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Onboarding hotspots are misplaced
I think our grid rewrites of the sidebars have resulted in the onboarding hotspots being somewhat misplaced:

(The `offset_x` and `offset_y` values may need updating).
I'm not entirely sure where the best place for these are. The main one that seems very wrong is the compose box one.
That said, we should aim to spend pretty minimal time on this system because we plan to rip it out in favor of a totally different onboarding system.
See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html for notes on how to test using the `ALWAYS_SEND_ALL_HOTSPOTS` setting as shown in this screenshot. (Usually, they're shown only one at a time in sequence).
@sayamsamal can you pick this one up?
</issue>
<code>
[start of zerver/lib/hotspots.py]
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from dataclasses import dataclass
4 from typing import Dict, List, Optional, Union
5
6 from django.conf import settings
7 from django.utils.translation import gettext_lazy
8 from django_stubs_ext import StrPromise
9
10 from zerver.models import UserHotspot, UserProfile
11
12
13 @dataclass
14 class Hotspot:
15 name: str
16 title: Optional[StrPromise]
17 description: Optional[StrPromise]
18 has_trigger: bool = False
19
20 def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:
21 return {
22 "name": self.name,
23 "title": str(self.title),
24 "description": str(self.description),
25 "delay": delay,
26 "has_trigger": self.has_trigger,
27 }
28
29
30 INTRO_HOTSPOTS: List[Hotspot] = [
31 Hotspot(
32 name="intro_streams",
33 title=gettext_lazy("Catch up on a stream"),
34 description=gettext_lazy(
35 "Messages sent to a stream are seen by everyone subscribed "
36 "to that stream. Try clicking on one of the stream links below."
37 ),
38 ),
39 Hotspot(
40 name="intro_topics",
41 title=gettext_lazy("Topics"),
42 description=gettext_lazy(
43 "Every message has a topic. Topics keep conversations "
44 "easy to follow, and make it easy to reply to conversations that start "
45 "while you are offline."
46 ),
47 ),
48 Hotspot(
49 name="intro_gear",
50 title=gettext_lazy("Settings"),
51 description=gettext_lazy("Go to Settings to configure your notifications and preferences."),
52 ),
53 Hotspot(
54 name="intro_compose",
55 title=gettext_lazy("Compose"),
56 description=gettext_lazy(
57 "Click here to start a new conversation. Pick a topic "
58 "(2-3 words is best), and give it a go!"
59 ),
60 ),
61 ]
62
63
64 NON_INTRO_HOTSPOTS: List[Hotspot] = []
65
66 # We would most likely implement new hotspots in the future that aren't
67 # a part of the initial tutorial. To that end, classifying them into
68 # categories which are aggregated in ALL_HOTSPOTS, seems like a good start.
69 ALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]
70
71
72 def get_next_hotspots(user: UserProfile) -> List[Dict[str, Union[str, float, bool]]]:
73 # For manual testing, it can be convenient to set
74 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
75 # make it easy to click on all of the hotspots.
76 #
77 # Since this is just for development purposes, it's convenient for us to send
78 # all the hotspots rather than any specific category.
79 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
80 return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]
81
82 # If a Zulip server has disabled the tutorial, never send hotspots.
83 if not settings.TUTORIAL_ENABLED:
84 return []
85
86 seen_hotspots = frozenset(
87 UserHotspot.objects.filter(user=user).values_list("hotspot", flat=True)
88 )
89
90 hotspots = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]
91
92 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
93 return hotspots
94
95 for hotspot in INTRO_HOTSPOTS:
96 if hotspot.name in seen_hotspots:
97 continue
98
99 hotspots.append(hotspot.to_dict(delay=0.5))
100 return hotspots
101
102 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
103 user.save(update_fields=["tutorial_status"])
104 return hotspots
105
106
107 def copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:
108 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
109 UserHotspot.objects.create(
110 user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp
111 )
112
113 target_profile.tutorial_status = source_profile.tutorial_status
114 target_profile.onboarding_steps = source_profile.onboarding_steps
115 target_profile.save(update_fields=["tutorial_status", "onboarding_steps"])
116
[end of zerver/lib/hotspots.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -46,6 +46,9 @@
),
),
Hotspot(
+ # In theory, this should be renamed to intro_personal, since
+ # it's no longer attached to the gear menu, but renaming these
+ # requires a migration that is not worth doing at this time.
name="intro_gear",
title=gettext_lazy("Settings"),
description=gettext_lazy("Go to Settings to configure your notifications and preferences."),
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -46,6 +46,9 @@\n ),\n ),\n Hotspot(\n+ # In theory, this should be renamed to intro_personal, since\n+ # it's no longer attached to the gear menu, but renaming these\n+ # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n", "issue": "Onboarding hotspots are misplaced\nI think our grid rewrites of the sidebars have resulted in the onboarding hotspots being somewhat misplaced:\r\n\r\n\r\n\r\n(The `offset_x` and `offset_y` values may need updating).\r\n\r\nI'm not entirely sure where the best place for these are. The main one that seems very wrong is the compose box one.\r\n\r\nThat said, we should aim to spend pretty minimal time on this system because we plan to rip it out in favor of a totally different onboarding system.\r\n\r\nSee https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html for notes on how to test using the `ALWAYS_SEND_ALL_HOTSPOTS` setting as shown in this screenshot. (Usually, they're shown only one at a time in sequence).\r\n\r\n@sayamsamal can you pick this one up?\r\n\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import UserHotspot, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\n\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, Union[str, float, bool]]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_hotspots = frozenset(\n UserHotspot.objects.filter(user=user).values_list(\"hotspot\", flat=True)\n )\n\n hotspots = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return hotspots\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_hotspots:\n continue\n\n hotspots.append(hotspot.to_dict(delay=0.5))\n return hotspots\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return hotspots\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(\n user=target_profile, hotspot=userhotspot.hotspot, timestamp=userhotspot.timestamp\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}]} | 1,932 | 140 |
gh_patches_debug_1624 | rasdani/github-patches | git_diff | pypa__cibuildwheel-977 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
on windows, setup_py_python_requires attempts to open utf-8 setup.py as Windows-1252 and fails
### Description
This [setup.py file](https://github.com/fgregg/fastcluster/blob/master/setup.py) is valid utf-8, and has a few non-ascii characters. In a windows build, `setup_py_python_requires` appears to be opening this file as if it was encoded like Windows-1252 and thus fails on some non-ascii characters.
### Build log
https://github.com/fgregg/fastcluster/runs/4660766954?check_suite_focus=true#step:5:40
### CI config
https://github.com/fgregg/fastcluster/blob/master/.github/workflows/pythonpackage.yml#L41-L47
</issue>
<code>
[start of cibuildwheel/projectfiles.py]
1 import ast
2 import sys
3 from configparser import ConfigParser
4 from pathlib import Path
5 from typing import Any, Optional
6
7 import tomli
8
9 if sys.version_info < (3, 8):
10 Constant = ast.Str
11
12 def get_constant(x: ast.Str) -> str:
13 return x.s
14
15 else:
16 Constant = ast.Constant
17
18 def get_constant(x: ast.Constant) -> Any:
19 return x.value
20
21
22 class Analyzer(ast.NodeVisitor):
23 def __init__(self) -> None:
24 self.requires_python: Optional[str] = None
25
26 def visit(self, content: ast.AST) -> None:
27 for node in ast.walk(content):
28 for child in ast.iter_child_nodes(node):
29 child.parent = node # type: ignore[attr-defined]
30 super().visit(content)
31
32 def visit_keyword(self, node: ast.keyword) -> None:
33 self.generic_visit(node)
34 if node.arg == "python_requires":
35 # Must not be nested in an if or other structure
36 # This will be Module -> Expr -> Call -> keyword
37 if not hasattr(node.parent.parent.parent, "parent") and isinstance( # type: ignore[attr-defined]
38 node.value, Constant
39 ):
40 self.requires_python = get_constant(node.value)
41
42
43 def setup_py_python_requires(content: str) -> Optional[str]:
44 try:
45 tree = ast.parse(content)
46 analyzer = Analyzer()
47 analyzer.visit(tree)
48 return analyzer.requires_python or None
49 except Exception:
50 return None
51
52
53 def get_requires_python_str(package_dir: Path) -> Optional[str]:
54 """Return the python requires string from the most canonical source available, or None"""
55
56 # Read in from pyproject.toml:project.requires-python
57 try:
58 with (package_dir / "pyproject.toml").open("rb") as f1:
59 info = tomli.load(f1)
60 return str(info["project"]["requires-python"])
61 except (FileNotFoundError, KeyError, IndexError, TypeError):
62 pass
63
64 # Read in from setup.cfg:options.python_requires
65 try:
66 config = ConfigParser()
67 config.read(package_dir / "setup.cfg")
68 return str(config["options"]["python_requires"])
69 except (FileNotFoundError, KeyError, IndexError, TypeError):
70 pass
71
72 try:
73 with (package_dir / "setup.py").open() as f2:
74 return setup_py_python_requires(f2.read())
75 except FileNotFoundError:
76 pass
77
78 return None
79
[end of cibuildwheel/projectfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py
--- a/cibuildwheel/projectfiles.py
+++ b/cibuildwheel/projectfiles.py
@@ -70,7 +70,7 @@
pass
try:
- with (package_dir / "setup.py").open() as f2:
+ with (package_dir / "setup.py").open(encoding="utf8") as f2:
return setup_py_python_requires(f2.read())
except FileNotFoundError:
pass
| {"golden_diff": "diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py\n--- a/cibuildwheel/projectfiles.py\n+++ b/cibuildwheel/projectfiles.py\n@@ -70,7 +70,7 @@\n pass\n \n try:\n- with (package_dir / \"setup.py\").open() as f2:\n+ with (package_dir / \"setup.py\").open(encoding=\"utf8\") as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n", "issue": "on windows, setup_py_python_requires attempts to open utf-8 setup.py as Windows-1252 and fails\n### Description\r\n\r\nThis [setup.py file](https://github.com/fgregg/fastcluster/blob/master/setup.py) is valid utf-8, and has a few non-ascii characters. In a windows build, `setup_py_python_requires` appears to be opening this file as if it was encoded like Windows-1252 and thus fails on some non-ascii characters.\r\n\r\n### Build log\r\n\r\nhttps://github.com/fgregg/fastcluster/runs/4660766954?check_suite_focus=true#step:5:40\r\n\r\n### CI config\r\n\r\nhttps://github.com/fgregg/fastcluster/blob/master/.github/workflows/pythonpackage.yml#L41-L47\n", "before_files": [{"content": "import ast\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom typing import Any, Optional\n\nimport tomli\n\nif sys.version_info < (3, 8):\n Constant = ast.Str\n\n def get_constant(x: ast.Str) -> str:\n return x.s\n\nelse:\n Constant = ast.Constant\n\n def get_constant(x: ast.Constant) -> Any:\n return x.value\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: Optional[str] = None\n\n def visit(self, content: ast.AST) -> None:\n for node in ast.walk(content):\n for child in ast.iter_child_nodes(node):\n child.parent = node # type: ignore[attr-defined]\n super().visit(content)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n if node.arg == \"python_requires\":\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if not hasattr(node.parent.parent.parent, \"parent\") and isinstance( # type: ignore[attr-defined]\n node.value, Constant\n ):\n self.requires_python = get_constant(node.value)\n\n\ndef setup_py_python_requires(content: str) -> Optional[str]:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception:\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> Optional[str]:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n try:\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomli.load(f1)\n return str(info[\"project\"][\"requires-python\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n # Read in from setup.cfg:options.python_requires\n try:\n config = ConfigParser()\n config.read(package_dir / \"setup.cfg\")\n return str(config[\"options\"][\"python_requires\"])\n except (FileNotFoundError, KeyError, IndexError, TypeError):\n pass\n\n try:\n with (package_dir / \"setup.py\").open() as f2:\n return setup_py_python_requires(f2.read())\n except FileNotFoundError:\n pass\n\n return None\n", "path": "cibuildwheel/projectfiles.py"}]} | 1,388 | 114 |
gh_patches_debug_1169 | rasdani/github-patches | git_diff | sosreport__sos-3483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Obtain CNI files for containerd
Containerd uses the CNI configuration present in the defined folders by the configuration
```
[plugins."io.containerd.grpc.v1.cri".cni]
conf_dir = "/etc/cni/net.d
```
It will be very useful to obtain the cni configurations present on the folder for debugging networking related problems
https://github.com/sosreport/sos/blob/b94ced8370824bd62f3c7573ae33fcb96c5da531/sos/report/plugins/containerd.py#L12-L28
</issue>
<code>
[start of sos/report/plugins/containerd.py]
1 # This file is part of the sos project: https://github.com/sosreport/sos
2 #
3 # This copyrighted material is made available to anyone wishing to use,
4 # modify, copy, or redistribute it subject to the terms and conditions of
5 # version 2 of the GNU General Public License.
6 #
7 # See the LICENSE file in the source distribution for further information.
8
9 from sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)
10
11
12 class Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):
13
14 short_desc = 'Containerd containers'
15 plugin_name = 'containerd'
16 profiles = ('container',)
17 packages = ('containerd', 'containerd.io',)
18
19 def setup(self):
20 self.add_copy_spec([
21 "/etc/containerd/",
22 ])
23
24 self.add_cmd_output('containerd config dump')
25
26 # collect the containerd logs.
27 self.add_journal(units='containerd')
28
29 # vim: set et ts=4 sw=4 :
30
[end of sos/report/plugins/containerd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py
--- a/sos/report/plugins/containerd.py
+++ b/sos/report/plugins/containerd.py
@@ -19,6 +19,7 @@
def setup(self):
self.add_copy_spec([
"/etc/containerd/",
+ "/etc/cni/net.d/",
])
self.add_cmd_output('containerd config dump')
| {"golden_diff": "diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py\n--- a/sos/report/plugins/containerd.py\n+++ b/sos/report/plugins/containerd.py\n@@ -19,6 +19,7 @@\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n+ \"/etc/cni/net.d/\",\n ])\n \n self.add_cmd_output('containerd config dump')\n", "issue": "Obtain CNI files for containerd\nContainerd uses the CNI configuration present in the defined folders by the configuration\r\n\r\n```\r\n [plugins.\"io.containerd.grpc.v1.cri\".cni]\r\n conf_dir = \"/etc/cni/net.d\r\n```\r\n\r\nIt will be very useful to obtain the cni configurations present on the folder for debugging networking related problems \r\n\r\n\r\nhttps://github.com/sosreport/sos/blob/b94ced8370824bd62f3c7573ae33fcb96c5da531/sos/report/plugins/containerd.py#L12-L28\n", "before_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd', 'containerd.io',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/containerd.py"}]} | 937 | 93 |
gh_patches_debug_35606 | rasdani/github-patches | git_diff | Kinto__kinto-972 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a configuration of collections that the history plugin needs to keep track on
Today the history plugin applies to all the collection but most of them don't need it.
For instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.
The same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.
The same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.
Add a configuration of collections that the history plugin needs to keep track on
Today the history plugin applies to all the collection but most of them don't need it.
For instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.
The same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.
The same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.
</issue>
<code>
[start of kinto/plugins/history/listener.py]
1 from kinto.core.utils import instance_uri
2 from datetime import datetime
3
4
5 def on_resource_changed(event):
6 """
7 Everytime an object is created/changed/deleted, we create an entry in the
8 ``history`` resource. The entries are served as read-only in the
9 :mod:`kinto.plugins.history.views` module.
10 """
11 payload = event.payload
12 resource_name = payload['resource_name']
13 event_uri = payload['uri']
14
15 bucket_id = None
16 bucket_uri = None
17 collection_uri = None
18
19 storage = event.request.registry.storage
20 permission = event.request.registry.permission
21
22 targets = []
23 for impacted in event.impacted_records:
24 target = impacted['new']
25 obj_id = target['id']
26
27 try:
28 bucket_id = payload['bucket_id']
29 except KeyError:
30 # e.g. DELETE /buckets
31 bucket_id = obj_id
32 bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
33
34 if 'collection_id' in payload:
35 collection_id = payload['collection_id']
36 collection_uri = instance_uri(event.request,
37 'collection',
38 bucket_id=bucket_id,
39 id=collection_id)
40
41 # On POST .../records, the URI does not contain the newly created
42 # record id.
43 parts = event_uri.split('/')
44 if resource_name in parts[-1]:
45 parts.append(obj_id)
46 else:
47 # Make sure the id is correct on grouped events.
48 parts[-1] = obj_id
49 uri = '/'.join(parts)
50 targets.append((uri, target))
51
52 # Prepare a list of object ids to be fetched from permission backend,
53 # and fetch them all at once. Use a mapping for later convenience.
54 all_perms_objects_ids = [oid for (oid, _) in targets]
55 all_perms_objects_ids.append(bucket_uri)
56 if collection_uri is not None:
57 all_perms_objects_ids.append(collection_uri)
58 all_perms_objects_ids = list(set(all_perms_objects_ids))
59 all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
60 perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))
61
62 bucket_perms = perms_by_object_id[bucket_uri]
63 collection_perms = {}
64 if collection_uri is not None:
65 collection_perms = perms_by_object_id[collection_uri]
66
67 # The principals allowed to read the bucket and collection.
68 # (Note: ``write`` means ``read``)
69 read_principals = set(bucket_perms.get('read', []))
70 read_principals.update(bucket_perms.get('write', []))
71 read_principals.update(collection_perms.get('read', []))
72 read_principals.update(collection_perms.get('write', []))
73
74 # Create a history entry for each impacted record.
75 for (uri, target) in targets:
76 obj_id = target['id']
77 # Prepare the history entry attributes.
78 perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
79 eventattrs = dict(**payload)
80 eventattrs.pop('timestamp', None) # Already in target `last_modified`.
81 eventattrs.pop('bucket_id', None)
82 eventattrs['%s_id' % resource_name] = obj_id
83 eventattrs['uri'] = uri
84 attrs = dict(date=datetime.now().isoformat(),
85 target={'data': target, 'permissions': perms},
86 **eventattrs)
87
88 # Create a record for the 'history' resource, whose parent_id is
89 # the bucket URI (c.f. views.py).
90 # Note: this will be rolledback if the transaction is rolledback.
91 entry = storage.create(parent_id=bucket_uri,
92 collection_id='history',
93 record=attrs)
94
95 # The read permission on the newly created history entry is the union
96 # of the record permissions with the one from bucket and collection.
97 entry_principals = set(read_principals)
98 entry_principals.update(perms.get('read', []))
99 entry_principals.update(perms.get('write', []))
100 entry_perms = {'read': list(entry_principals)}
101 # /buckets/{id}/history is the URI for the list of history entries.
102 entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])
103 permission.replace_object_permissions(entry_perm_id, entry_perms)
104
[end of kinto/plugins/history/listener.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py
--- a/kinto/plugins/history/listener.py
+++ b/kinto/plugins/history/listener.py
@@ -1,3 +1,5 @@
+from pyramid.settings import aslist
+
from kinto.core.utils import instance_uri
from datetime import datetime
@@ -18,6 +20,9 @@
storage = event.request.registry.storage
permission = event.request.registry.permission
+ settings = event.request.registry.settings
+
+ excluded_resources = aslist(settings.get('history.exclude_resources', ''))
targets = []
for impacted in event.impacted_records:
@@ -31,12 +36,17 @@
bucket_id = obj_id
bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
+ if bucket_uri in excluded_resources:
+ continue
+
if 'collection_id' in payload:
collection_id = payload['collection_id']
collection_uri = instance_uri(event.request,
'collection',
bucket_id=bucket_id,
id=collection_id)
+ if collection_uri in excluded_resources:
+ continue
# On POST .../records, the URI does not contain the newly created
# record id.
@@ -47,8 +57,15 @@
# Make sure the id is correct on grouped events.
parts[-1] = obj_id
uri = '/'.join(parts)
+
+ if uri in excluded_resources:
+ continue
+
targets.append((uri, target))
+ if not targets:
+ return # Nothing to do.
+
# Prepare a list of object ids to be fetched from permission backend,
# and fetch them all at once. Use a mapping for later convenience.
all_perms_objects_ids = [oid for (oid, _) in targets]
| {"golden_diff": "diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py\n--- a/kinto/plugins/history/listener.py\n+++ b/kinto/plugins/history/listener.py\n@@ -1,3 +1,5 @@\n+from pyramid.settings import aslist\n+\n from kinto.core.utils import instance_uri\n from datetime import datetime\n \n@@ -18,6 +20,9 @@\n \n storage = event.request.registry.storage\n permission = event.request.registry.permission\n+ settings = event.request.registry.settings\n+\n+ excluded_resources = aslist(settings.get('history.exclude_resources', ''))\n \n targets = []\n for impacted in event.impacted_records:\n@@ -31,12 +36,17 @@\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n \n+ if bucket_uri in excluded_resources:\n+ continue\n+\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n+ if collection_uri in excluded_resources:\n+ continue\n \n # On POST .../records, the URI does not contain the newly created\n # record id.\n@@ -47,8 +57,15 @@\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n+\n+ if uri in excluded_resources:\n+ continue\n+\n targets.append((uri, target))\n \n+ if not targets:\n+ return # Nothing to do.\n+\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n", "issue": "Add a configuration of collections that the history plugin needs to keep track on\nToday the history plugin applies to all the collection but most of them don't need it.\r\nFor instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.\r\nThe same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.\r\n\r\nThe same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.\nAdd a configuration of collections that the history plugin needs to keep track on\nToday the history plugin applies to all the collection but most of them don't need it.\r\nFor instance with the kinto-signer plugin we don't want to track history of changes in the preview and signed collection.\r\nThe same goes with the kinto-changes plugin when we don't want to track monitor changes modifications.\r\n\r\nThe same way we can configure the kinto-signer resources we want to track, we should be able to configure the list of collections we want the history plugin to track.\n", "before_files": [{"content": "from kinto.core.utils import instance_uri\nfrom datetime import datetime\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we create an entry in the\n ``history`` resource. The entries are served as read-only in the\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n bucket_id = None\n bucket_uri = None\n collection_uri = None\n\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new']\n obj_id = target['id']\n\n try:\n bucket_id = payload['bucket_id']\n except KeyError:\n # e.g. DELETE /buckets\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # On POST .../records, the URI does not contain the newly created\n # record id.\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n targets.append((uri, target))\n\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n all_perms_objects_ids.append(bucket_uri)\n if collection_uri is not None:\n all_perms_objects_ids.append(collection_uri)\n all_perms_objects_ids = list(set(all_perms_objects_ids))\n all_permissions = permission.get_objects_permissions(all_perms_objects_ids)\n perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))\n\n bucket_perms = perms_by_object_id[bucket_uri]\n collection_perms = {}\n if collection_uri is not None:\n collection_perms = perms_by_object_id[collection_uri]\n\n # The principals allowed to read the bucket and collection.\n # (Note: ``write`` means ``read``)\n read_principals = set(bucket_perms.get('read', []))\n read_principals.update(bucket_perms.get('write', []))\n read_principals.update(collection_perms.get('read', []))\n read_principals.update(collection_perms.get('write', []))\n\n # Create a history entry for each impacted record.\n for (uri, target) in targets:\n obj_id = target['id']\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n eventattrs.pop('timestamp', None) # Already in target `last_modified`.\n eventattrs.pop('bucket_id', None)\n eventattrs['%s_id' % resource_name] = obj_id\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n **eventattrs)\n\n # Create a record for the 'history' resource, whose parent_id is\n # the bucket URI (c.f. views.py).\n # Note: this will be rolledback if the transaction is rolledback.\n entry = storage.create(parent_id=bucket_uri,\n collection_id='history',\n record=attrs)\n\n # The read permission on the newly created history entry is the union\n # of the record permissions with the one from bucket and collection.\n entry_principals = set(read_principals)\n entry_principals.update(perms.get('read', []))\n entry_principals.update(perms.get('write', []))\n entry_perms = {'read': list(entry_principals)}\n # /buckets/{id}/history is the URI for the list of history entries.\n entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])\n permission.replace_object_permissions(entry_perm_id, entry_perms)\n", "path": "kinto/plugins/history/listener.py"}]} | 1,903 | 403 |
gh_patches_debug_25576 | rasdani/github-patches | git_diff | sublimelsp__LSP-1772 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On Windows, the drive letter in server responsed file URIs are lowercase.
**Describe the bug**
I tried both intelephense and pyright, they both returned lowercased drive letter thus I suspect it's a standard. (or maybe VSCode's LSP lib does it)
https://user-images.githubusercontent.com/6594915/123961095-96286c80-d9e2-11eb-8ada-0da9af754a55.mp4
In "Goto Definition...", this causes ST to open a file whose drive letter is in lowercase. And that may cause various mysterious problem sometimes... Or maybe, this should be fixed in ST core.
**To Reproduce**
Steps to reproduce the behavior:
1. Install LSP-intelephense with a Windows build ST
2. Open a PHP project
3. Make sure the definition file is not opened in a tab already
4. Do "Goto Definition"
5. The newly opened tab should have a lower drive letter
**Expected behavior**
The drive letter should be uppercase.
**Environment (please complete the following information):**
- OS: Win10 21H1 x64
- Sublime Text version: 4109
- LSP version: 4070-1.6.1
- Language servers used: intelephense, pyright
**Additional context**
This is a Windows-only issue as it's case-insensitive.
</issue>
<code>
[start of plugin/core/url.py]
1 from .typing import Any, Tuple
2 from urllib.parse import quote
3 from urllib.parse import urljoin
4 from urllib.parse import urlparse
5 from urllib.request import pathname2url
6 from urllib.request import url2pathname
7 import os
8 import re
9
10 import sublime
11
12
13 def filename_to_uri(file_name: str) -> str:
14 """
15 Convert a file name obtained from view.file_name() into an URI
16 """
17 prefix = sublime.installed_packages_path()
18 if file_name.startswith(prefix):
19 return _to_resource_uri(file_name, prefix)
20 prefix = sublime.packages_path()
21 if file_name.startswith(prefix) and not os.path.exists(file_name):
22 return _to_resource_uri(file_name, prefix)
23 path = pathname2url(file_name)
24 re.sub(r"^([A-Z]):/", _lowercase_driveletter, path)
25 return urljoin("file:", path)
26
27
28 def view_to_uri(view: sublime.View) -> str:
29 file_name = view.file_name()
30 if not file_name:
31 return "buffer://sublime/{}".format(view.buffer_id())
32 return filename_to_uri(file_name)
33
34
35 def uri_to_filename(uri: str) -> str:
36 """
37 DEPRECATED: An URI associated to a view does not necessarily have a "file:" scheme.
38 Use urllib.parse.urlparse to determine the scheme and go from there.
39 Use urllib.parse.unquote to unquote the path.
40 """
41 parsed = urlparse(uri)
42 assert parsed.scheme == "file"
43 if os.name == 'nt':
44 # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)
45 return url2pathname(parsed.path).strip('\\')
46 else:
47 return url2pathname(parsed.path)
48
49
50 def parse_uri(uri: str) -> Tuple[str, str]:
51 """
52 Parses an URI into a tuple where the first element is the URI scheme. The
53 second element is the local filesystem path if the URI is a file URI,
54 otherwise the second element is the original URI.
55 """
56 parsed = urlparse(uri)
57 if parsed.scheme == "file":
58 if os.name == 'nt':
59 # TODO: this is wrong for UNC paths
60 return parsed.scheme, url2pathname(parsed.path).strip('\\')
61 return parsed.scheme, url2pathname(parsed.path)
62 return parsed.scheme, uri
63
64
65 def _to_resource_uri(path: str, prefix: str) -> str:
66 """
67 Terrible hacks from ST core leak into packages as well.
68
69 See: https://github.com/sublimehq/sublime_text/issues/3742
70 """
71 return "res://Packages{}".format(quote(path[len(prefix):]))
72
73
74 def _lowercase_driveletter(match: Any) -> str:
75 """
76 For compatibility with certain other language clients.
77 """
78 return "{}:/".format(match.group(1).lower())
79
[end of plugin/core/url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/core/url.py b/plugin/core/url.py
--- a/plugin/core/url.py
+++ b/plugin/core/url.py
@@ -21,7 +21,6 @@
if file_name.startswith(prefix) and not os.path.exists(file_name):
return _to_resource_uri(file_name, prefix)
path = pathname2url(file_name)
- re.sub(r"^([A-Z]):/", _lowercase_driveletter, path)
return urljoin("file:", path)
@@ -42,7 +41,8 @@
assert parsed.scheme == "file"
if os.name == 'nt':
# url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)
- return url2pathname(parsed.path).strip('\\')
+ path = url2pathname(parsed.path).strip('\\')
+ return re.sub(r"^([a-z]):", _uppercase_driveletter, path)
else:
return url2pathname(parsed.path)
@@ -71,8 +71,8 @@
return "res://Packages{}".format(quote(path[len(prefix):]))
-def _lowercase_driveletter(match: Any) -> str:
+def _uppercase_driveletter(match: Any) -> str:
"""
- For compatibility with certain other language clients.
+ For compatibility with Sublime's VCS status in the status bar.
"""
- return "{}:/".format(match.group(1).lower())
+ return "{}:".format(match.group(1).upper())
| {"golden_diff": "diff --git a/plugin/core/url.py b/plugin/core/url.py\n--- a/plugin/core/url.py\n+++ b/plugin/core/url.py\n@@ -21,7 +21,6 @@\n if file_name.startswith(prefix) and not os.path.exists(file_name):\n return _to_resource_uri(file_name, prefix)\n path = pathname2url(file_name)\n- re.sub(r\"^([A-Z]):/\", _lowercase_driveletter, path)\n return urljoin(\"file:\", path)\n \n \n@@ -42,7 +41,8 @@\n assert parsed.scheme == \"file\"\n if os.name == 'nt':\n # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)\n- return url2pathname(parsed.path).strip('\\\\')\n+ path = url2pathname(parsed.path).strip('\\\\')\n+ return re.sub(r\"^([a-z]):\", _uppercase_driveletter, path)\n else:\n return url2pathname(parsed.path)\n \n@@ -71,8 +71,8 @@\n return \"res://Packages{}\".format(quote(path[len(prefix):]))\n \n \n-def _lowercase_driveletter(match: Any) -> str:\n+def _uppercase_driveletter(match: Any) -> str:\n \"\"\"\n- For compatibility with certain other language clients.\n+ For compatibility with Sublime's VCS status in the status bar.\n \"\"\"\n- return \"{}:/\".format(match.group(1).lower())\n+ return \"{}:\".format(match.group(1).upper())\n", "issue": "On Windows, the drive letter in server responsed file URIs are lowercase.\n**Describe the bug**\r\n\r\nI tried both intelephense and pyright, they both returned lowercased drive letter thus I suspect it's a standard. (or maybe VSCode's LSP lib does it)\r\n\r\nhttps://user-images.githubusercontent.com/6594915/123961095-96286c80-d9e2-11eb-8ada-0da9af754a55.mp4\r\n\r\nIn \"Goto Definition...\", this causes ST to open a file whose drive letter is in lowercase. And that may cause various mysterious problem sometimes... Or maybe, this should be fixed in ST core.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install LSP-intelephense with a Windows build ST\r\n2. Open a PHP project\r\n3. Make sure the definition file is not opened in a tab already\r\n4. Do \"Goto Definition\"\r\n5. The newly opened tab should have a lower drive letter\r\n\r\n**Expected behavior**\r\n\r\nThe drive letter should be uppercase.\r\n\r\n**Environment (please complete the following information):**\r\n- OS: Win10 21H1 x64\r\n- Sublime Text version: 4109\r\n- LSP version: 4070-1.6.1\r\n- Language servers used: intelephense, pyright\r\n\r\n**Additional context**\r\n\r\nThis is a Windows-only issue as it's case-insensitive.\r\n\n", "before_files": [{"content": "from .typing import Any, Tuple\nfrom urllib.parse import quote\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\nfrom urllib.request import pathname2url\nfrom urllib.request import url2pathname\nimport os\nimport re\n\nimport sublime\n\n\ndef filename_to_uri(file_name: str) -> str:\n \"\"\"\n Convert a file name obtained from view.file_name() into an URI\n \"\"\"\n prefix = sublime.installed_packages_path()\n if file_name.startswith(prefix):\n return _to_resource_uri(file_name, prefix)\n prefix = sublime.packages_path()\n if file_name.startswith(prefix) and not os.path.exists(file_name):\n return _to_resource_uri(file_name, prefix)\n path = pathname2url(file_name)\n re.sub(r\"^([A-Z]):/\", _lowercase_driveletter, path)\n return urljoin(\"file:\", path)\n\n\ndef view_to_uri(view: sublime.View) -> str:\n file_name = view.file_name()\n if not file_name:\n return \"buffer://sublime/{}\".format(view.buffer_id())\n return filename_to_uri(file_name)\n\n\ndef uri_to_filename(uri: str) -> str:\n \"\"\"\n DEPRECATED: An URI associated to a view does not necessarily have a \"file:\" scheme.\n Use urllib.parse.urlparse to determine the scheme and go from there.\n Use urllib.parse.unquote to unquote the path.\n \"\"\"\n parsed = urlparse(uri)\n assert parsed.scheme == \"file\"\n if os.name == 'nt':\n # url2pathname does not understand %3A (VS Code's encoding forced on all servers :/)\n return url2pathname(parsed.path).strip('\\\\')\n else:\n return url2pathname(parsed.path)\n\n\ndef parse_uri(uri: str) -> Tuple[str, str]:\n \"\"\"\n Parses an URI into a tuple where the first element is the URI scheme. The\n second element is the local filesystem path if the URI is a file URI,\n otherwise the second element is the original URI.\n \"\"\"\n parsed = urlparse(uri)\n if parsed.scheme == \"file\":\n if os.name == 'nt':\n # TODO: this is wrong for UNC paths\n return parsed.scheme, url2pathname(parsed.path).strip('\\\\')\n return parsed.scheme, url2pathname(parsed.path)\n return parsed.scheme, uri\n\n\ndef _to_resource_uri(path: str, prefix: str) -> str:\n \"\"\"\n Terrible hacks from ST core leak into packages as well.\n\n See: https://github.com/sublimehq/sublime_text/issues/3742\n \"\"\"\n return \"res://Packages{}\".format(quote(path[len(prefix):]))\n\n\ndef _lowercase_driveletter(match: Any) -> str:\n \"\"\"\n For compatibility with certain other language clients.\n \"\"\"\n return \"{}:/\".format(match.group(1).lower())\n", "path": "plugin/core/url.py"}]} | 1,612 | 322 |
gh_patches_debug_2968 | rasdani/github-patches | git_diff | ibis-project__ibis-2426 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix bigquery version
https://dev.azure.com/ibis-project/ibis/_build/results?buildId=3396&view=logs&j=8f09edc2-e3b7-52de-126a-0225c4f3efa1&t=78a72aec-b398-558e-7c0d-2d33604b9e53
I think we need to limit the upper bound of bigquery library here.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """Ibis setup module."""
3 import pathlib
4 import sys
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10 LONG_DESCRIPTION = """
11 Ibis is a productivity-centric Python big data framework.
12
13 See http://ibis-project.org
14 """
15
16 VERSION = sys.version_info.major, sys.version_info.minor
17
18 impala_requires = ['hdfs>=2.0.16', 'sqlalchemy>=1.1,<1.3.7', 'requests']
19 impala_requires.append('impyla[kerberos]>=0.15.0')
20
21 sqlite_requires = ['sqlalchemy>=1.1,<1.3.7']
22 postgres_requires = sqlite_requires + ['psycopg2']
23 mysql_requires = sqlite_requires + ['pymysql']
24
25 omniscidb_requires = ['pymapd>=0.12.0']
26 kerberos_requires = ['requests-kerberos']
27 visualization_requires = ['graphviz']
28 clickhouse_requires = [
29 'clickhouse-driver>=0.1.3',
30 'clickhouse-cityhash',
31 ]
32 bigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']
33 hdf5_requires = ['tables>=3.0.0']
34
35 parquet_requires = ['pyarrow>=0.12.0']
36 spark_requires = ['pyspark>=2.4.3']
37
38 geospatial_requires = ['geoalchemy2', 'geopandas', 'shapely']
39
40 all_requires = (
41 impala_requires
42 + postgres_requires
43 + omniscidb_requires
44 + mysql_requires
45 + kerberos_requires
46 + visualization_requires
47 + clickhouse_requires
48 + bigquery_requires
49 + hdf5_requires
50 + parquet_requires
51 + spark_requires
52 + geospatial_requires
53 )
54
55 develop_requires = all_requires + [
56 'black',
57 'click',
58 'pydocstyle==4.0.1',
59 'flake8',
60 'isort',
61 'mypy',
62 'pre-commit',
63 'pygit2',
64 'pytest>=4.5',
65 ]
66
67 install_requires = [
68 line.strip()
69 for line in pathlib.Path(__file__)
70 .parent.joinpath('requirements.txt')
71 .read_text()
72 .splitlines()
73 ]
74
75 setup(
76 name='ibis-framework',
77 url='https://github.com/ibis-project/ibis',
78 packages=find_packages(),
79 version=versioneer.get_version(),
80 cmdclass=versioneer.get_cmdclass(),
81 install_requires=install_requires,
82 python_requires='>=3.7',
83 extras_require={
84 'all': all_requires,
85 'develop': develop_requires,
86 'impala': impala_requires,
87 'kerberos': kerberos_requires,
88 'postgres': postgres_requires,
89 'omniscidb': omniscidb_requires,
90 'mysql': mysql_requires,
91 'sqlite': sqlite_requires,
92 'visualization': visualization_requires,
93 'clickhouse': clickhouse_requires,
94 'bigquery': bigquery_requires,
95 'hdf5': hdf5_requires,
96 'parquet': parquet_requires,
97 'spark': spark_requires,
98 'geospatial': geospatial_requires,
99 },
100 description="Productivity-centric Python Big Data Framework",
101 long_description=LONG_DESCRIPTION,
102 classifiers=[
103 'Development Status :: 4 - Beta',
104 'Operating System :: OS Independent',
105 'Intended Audience :: Science/Research',
106 'Programming Language :: Python',
107 'Programming Language :: Python :: 3',
108 'Topic :: Scientific/Engineering',
109 ],
110 license='Apache License, Version 2.0',
111 maintainer="Phillip Cloud",
112 maintainer_email="[email protected]",
113 )
114
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,10 @@
'clickhouse-driver>=0.1.3',
'clickhouse-cityhash',
]
-bigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']
+bigquery_requires = [
+ 'google-cloud-bigquery[bqstorage,pandas]>=1.12.0,<2.0.0dev',
+ 'pydata-google-auth',
+]
hdf5_requires = ['tables>=3.0.0']
parquet_requires = ['pyarrow>=0.12.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,10 @@\n 'clickhouse-driver>=0.1.3',\n 'clickhouse-cityhash',\n ]\n-bigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']\n+bigquery_requires = [\n+ 'google-cloud-bigquery[bqstorage,pandas]>=1.12.0,<2.0.0dev',\n+ 'pydata-google-auth',\n+]\n hdf5_requires = ['tables>=3.0.0']\n \n parquet_requires = ['pyarrow>=0.12.0']\n", "issue": "fix bigquery version\nhttps://dev.azure.com/ibis-project/ibis/_build/results?buildId=3396&view=logs&j=8f09edc2-e3b7-52de-126a-0225c4f3efa1&t=78a72aec-b398-558e-7c0d-2d33604b9e53\r\n\r\nI think we need to limit the upper bound of bigquery library here.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Ibis setup module.\"\"\"\nimport pathlib\nimport sys\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"\nIbis is a productivity-centric Python big data framework.\n\nSee http://ibis-project.org\n\"\"\"\n\nVERSION = sys.version_info.major, sys.version_info.minor\n\nimpala_requires = ['hdfs>=2.0.16', 'sqlalchemy>=1.1,<1.3.7', 'requests']\nimpala_requires.append('impyla[kerberos]>=0.15.0')\n\nsqlite_requires = ['sqlalchemy>=1.1,<1.3.7']\npostgres_requires = sqlite_requires + ['psycopg2']\nmysql_requires = sqlite_requires + ['pymysql']\n\nomniscidb_requires = ['pymapd>=0.12.0']\nkerberos_requires = ['requests-kerberos']\nvisualization_requires = ['graphviz']\nclickhouse_requires = [\n 'clickhouse-driver>=0.1.3',\n 'clickhouse-cityhash',\n]\nbigquery_requires = ['google-cloud-bigquery>=1.12.0', 'pydata-google-auth']\nhdf5_requires = ['tables>=3.0.0']\n\nparquet_requires = ['pyarrow>=0.12.0']\nspark_requires = ['pyspark>=2.4.3']\n\ngeospatial_requires = ['geoalchemy2', 'geopandas', 'shapely']\n\nall_requires = (\n impala_requires\n + postgres_requires\n + omniscidb_requires\n + mysql_requires\n + kerberos_requires\n + visualization_requires\n + clickhouse_requires\n + bigquery_requires\n + hdf5_requires\n + parquet_requires\n + spark_requires\n + geospatial_requires\n)\n\ndevelop_requires = all_requires + [\n 'black',\n 'click',\n 'pydocstyle==4.0.1',\n 'flake8',\n 'isort',\n 'mypy',\n 'pre-commit',\n 'pygit2',\n 'pytest>=4.5',\n]\n\ninstall_requires = [\n line.strip()\n for line in pathlib.Path(__file__)\n .parent.joinpath('requirements.txt')\n .read_text()\n .splitlines()\n]\n\nsetup(\n name='ibis-framework',\n url='https://github.com/ibis-project/ibis',\n packages=find_packages(),\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n install_requires=install_requires,\n python_requires='>=3.7',\n extras_require={\n 'all': all_requires,\n 'develop': develop_requires,\n 'impala': impala_requires,\n 'kerberos': kerberos_requires,\n 'postgres': postgres_requires,\n 'omniscidb': omniscidb_requires,\n 'mysql': mysql_requires,\n 'sqlite': sqlite_requires,\n 'visualization': visualization_requires,\n 'clickhouse': clickhouse_requires,\n 'bigquery': bigquery_requires,\n 'hdf5': hdf5_requires,\n 'parquet': parquet_requires,\n 'spark': spark_requires,\n 'geospatial': geospatial_requires,\n },\n description=\"Productivity-centric Python Big Data Framework\",\n long_description=LONG_DESCRIPTION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n ],\n license='Apache License, Version 2.0',\n maintainer=\"Phillip Cloud\",\n maintainer_email=\"[email protected]\",\n)\n", "path": "setup.py"}]} | 1,682 | 149 |
gh_patches_debug_28757 | rasdani/github-patches | git_diff | WordPress__openverse-api-1083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add database connectivity to healthcheck endpoint
## Problem
<!-- Describe a problem solved by this feature; or delete the section entirely. -->
The healtcheck endpoint should check that the database is accessible. If the db is inaccessible, the service is definitively not healthy.
## Description
<!-- Describe the feature and how it solves the problem. -->
Add another check (in addition to the ES check) for the database connectivity. Calling `django.db.connection.ensure_connection()` should be sufficient. It raises an error when the database connection is unavailable.
## Alternatives
<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->
## Additional context
<!-- Add any other context about the feature here; or delete the section entirely. -->
<!-- If you would like to work on this, please comment below separately. -->
</issue>
<code>
[start of api/catalog/api/views/health_views.py]
1 from django.conf import settings
2 from rest_framework import status
3 from rest_framework.exceptions import APIException
4 from rest_framework.request import Request
5 from rest_framework.response import Response
6 from rest_framework.views import APIView
7
8
9 class ElasticsearchHealthcheckException(APIException):
10 status_code = status.HTTP_503_SERVICE_UNAVAILABLE
11
12
13 class HealthCheck(APIView):
14 """
15 Return a "200 OK" response if the server is running normally, 503 otherwise.
16
17 This endpoint is used in production to ensure that the server should receive
18 traffic. If no response is provided, the server is deregistered from the
19 load balancer and destroyed.
20 """
21
22 swagger_schema = None
23
24 def _check_es(self) -> Response | None:
25 """Check ES cluster health and raise an exception if ES is not healthy."""
26
27 es_health = settings.ES.cluster.health(timeout="5s")
28
29 if es_health["timed_out"]:
30 raise ElasticsearchHealthcheckException("es_timed_out")
31
32 if (status := es_health["status"]) != "green":
33 raise ElasticsearchHealthcheckException(f"es_status_{status}")
34
35 def get(self, request: Request):
36 if "check_es" in request.query_params:
37 self._check_es()
38
39 return Response({"status": "200 OK"}, status=200)
40
[end of api/catalog/api/views/health_views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/catalog/api/views/health_views.py b/api/catalog/api/views/health_views.py
--- a/api/catalog/api/views/health_views.py
+++ b/api/catalog/api/views/health_views.py
@@ -1,4 +1,5 @@
from django.conf import settings
+from django.db import connection
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework.request import Request
@@ -21,19 +22,33 @@
swagger_schema = None
- def _check_es(self) -> Response | None:
- """Check ES cluster health and raise an exception if ES is not healthy."""
+ @staticmethod
+ def _check_db() -> None:
+ """
+ Check that the database is available.
+ Returns nothing if everything is OK, throws error otherwise.
+ """
+ connection.ensure_connection()
+
+ @staticmethod
+ def _check_es() -> None:
+ """
+ Check Elasticsearch cluster health.
+
+ Raises an exception if ES is not healthy.
+ """
es_health = settings.ES.cluster.health(timeout="5s")
if es_health["timed_out"]:
raise ElasticsearchHealthcheckException("es_timed_out")
- if (status := es_health["status"]) != "green":
- raise ElasticsearchHealthcheckException(f"es_status_{status}")
+ if (es_status := es_health["status"]) != "green":
+ raise ElasticsearchHealthcheckException(f"es_status_{es_status}")
def get(self, request: Request):
if "check_es" in request.query_params:
self._check_es()
+ self._check_db()
return Response({"status": "200 OK"}, status=200)
| {"golden_diff": "diff --git a/api/catalog/api/views/health_views.py b/api/catalog/api/views/health_views.py\n--- a/api/catalog/api/views/health_views.py\n+++ b/api/catalog/api/views/health_views.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.db import connection\n from rest_framework import status\n from rest_framework.exceptions import APIException\n from rest_framework.request import Request\n@@ -21,19 +22,33 @@\n \n swagger_schema = None\n \n- def _check_es(self) -> Response | None:\n- \"\"\"Check ES cluster health and raise an exception if ES is not healthy.\"\"\"\n+ @staticmethod\n+ def _check_db() -> None:\n+ \"\"\"\n+ Check that the database is available.\n \n+ Returns nothing if everything is OK, throws error otherwise.\n+ \"\"\"\n+ connection.ensure_connection()\n+\n+ @staticmethod\n+ def _check_es() -> None:\n+ \"\"\"\n+ Check Elasticsearch cluster health.\n+\n+ Raises an exception if ES is not healthy.\n+ \"\"\"\n es_health = settings.ES.cluster.health(timeout=\"5s\")\n \n if es_health[\"timed_out\"]:\n raise ElasticsearchHealthcheckException(\"es_timed_out\")\n \n- if (status := es_health[\"status\"]) != \"green\":\n- raise ElasticsearchHealthcheckException(f\"es_status_{status}\")\n+ if (es_status := es_health[\"status\"]) != \"green\":\n+ raise ElasticsearchHealthcheckException(f\"es_status_{es_status}\")\n \n def get(self, request: Request):\n if \"check_es\" in request.query_params:\n self._check_es()\n+ self._check_db()\n \n return Response({\"status\": \"200 OK\"}, status=200)\n", "issue": "Add database connectivity to healthcheck endpoint\n## Problem\r\n\r\n<!-- Describe a problem solved by this feature; or delete the section entirely. -->\r\nThe healtcheck endpoint should check that the database is accessible. If the db is inaccessible, the service is definitively not healthy.\r\n\r\n## Description\r\n\r\n<!-- Describe the feature and how it solves the problem. -->\r\nAdd another check (in addition to the ES check) for the database connectivity. Calling `django.db.connection.ensure_connection()` should be sufficient. It raises an error when the database connection is unavailable.\r\n\r\n## Alternatives\r\n\r\n<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the feature here; or delete the section entirely. -->\r\n\r\n<!-- If you would like to work on this, please comment below separately. -->\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import status\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nclass ElasticsearchHealthcheckException(APIException):\n status_code = status.HTTP_503_SERVICE_UNAVAILABLE\n\n\nclass HealthCheck(APIView):\n \"\"\"\n Return a \"200 OK\" response if the server is running normally, 503 otherwise.\n\n This endpoint is used in production to ensure that the server should receive\n traffic. If no response is provided, the server is deregistered from the\n load balancer and destroyed.\n \"\"\"\n\n swagger_schema = None\n\n def _check_es(self) -> Response | None:\n \"\"\"Check ES cluster health and raise an exception if ES is not healthy.\"\"\"\n\n es_health = settings.ES.cluster.health(timeout=\"5s\")\n\n if es_health[\"timed_out\"]:\n raise ElasticsearchHealthcheckException(\"es_timed_out\")\n\n if (status := es_health[\"status\"]) != \"green\":\n raise ElasticsearchHealthcheckException(f\"es_status_{status}\")\n\n def get(self, request: Request):\n if \"check_es\" in request.query_params:\n self._check_es()\n\n return Response({\"status\": \"200 OK\"}, status=200)\n", "path": "api/catalog/api/views/health_views.py"}]} | 1,071 | 381 |
gh_patches_debug_40029 | rasdani/github-patches | git_diff | watchdogpolska__small_eod-919 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Niekompletny wykaz endpointów API w /api
Na `/api` (np. https://dev.small-eod.siecobywatelska.pl/api/ ) nie mamy kompletnego wykazu endpointów API. Kompletny jest dostępny przez ReDoc np. na https://dev.small-eod.siecobywatelska.pl/api/redoc/ .
Powinniśmy to naprawić, bo wprowadza ryzyko mylnego wrażenia co do zakresu API.
</issue>
<code>
[start of backend-project/config/urls.py]
1 """small_eod URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/3.0/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16 from django.conf import settings
17 from django.conf.urls.static import static
18 from django.contrib import admin
19 from django.urls import include, path, re_path
20 from drf_yasg2.views import get_schema_view
21 from rest_framework import permissions, routers
22
23 from small_eod.channels.views import ChannelViewSet
24 from small_eod.events.views import EventViewSet
25 from small_eod.institutions.views import InstitutionViewSet
26 from small_eod.notes.views import NoteViewSet
27 from small_eod.tags.views import TagViewSet
28 from small_eod.users.views import UserViewSet
29
30 from .swagger import info
31
32 router = routers.DefaultRouter()
33 router.register(r"channels", ChannelViewSet)
34 router.register(r"events", EventViewSet)
35 router.register(r"institutions", InstitutionViewSet)
36 router.register(r"notes", NoteViewSet)
37 router.register(r"tags", TagViewSet)
38 router.register(r"users", UserViewSet)
39
40 schema_view = get_schema_view(
41 info,
42 # validators=['flex', 'ssv'],
43 public=True,
44 permission_classes=(permissions.AllowAny,),
45 )
46
47 urlpatterns = [
48 path("admin/", admin.site.urls),
49 path("api/", include("small_eod.collections.urls")),
50 path("api/", include("small_eod.cases.urls")),
51 path("api/", include("small_eod.letters.urls")),
52 path("api/", include("small_eod.features.urls")),
53 path("api/", include("small_eod.administrative_units.urls")),
54 path("api/", include("small_eod.autocomplete.urls")),
55 path("api/docs/", schema_view.with_ui("swagger"), name="api_docs"),
56 path("api/redoc/", schema_view.with_ui("redoc"), name="api_redocs"),
57 re_path(
58 "^api/swagger(?P<format>.json|.yaml)$",
59 schema_view.without_ui(),
60 name="schema_swagger",
61 ),
62 path("api/", include(router.urls)),
63 ]
64
65
66 if settings.DEBUG:
67 import debug_toolbar
68
69 urlpatterns += [
70 path("__debug__/", include(debug_toolbar.urls)),
71 ]
72
73 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
74 urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL)
75
[end of backend-project/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend-project/config/urls.py b/backend-project/config/urls.py
--- a/backend-project/config/urls.py
+++ b/backend-project/config/urls.py
@@ -13,6 +13,9 @@
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
+
+import re
+
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
@@ -29,13 +32,56 @@
from .swagger import info
-router = routers.DefaultRouter()
+
+class BetterDefaultRouter(routers.DefaultRouter):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.include_urls = []
+ self.api_root_dict = {}
+
+ def get_urls(self):
+ urls = super().get_urls()
+ urls.extend(self.include_urls)
+ return urls
+
+ def include(self, module):
+ urlpatterns = getattr(include(module)[0], "urlpatterns")
+ viewnames = set()
+ for urlpattern in urlpatterns:
+ self.include_urls.append(urlpattern)
+ if hasattr(urlpattern, "url_patterns"):
+ viewnames.update([pattern.name for pattern in urlpattern.url_patterns])
+ elif hasattr(urlpattern, "name"):
+ viewnames.add(urlpattern.name)
+ self.api_root_dict.update(
+ {re.sub(r"-list$", "", viewname): viewname for viewname in viewnames}
+ )
+
+ def get_api_root_view(self, api_urls=None):
+ api_root_dict = {}
+ list_name = self.routes[0].name
+
+ for prefix, viewset, basename in self.registry:
+ api_root_dict[prefix] = list_name.format(basename=basename)
+ api_root_dict.update(self.api_root_dict)
+
+ return self.APIRootView.as_view(api_root_dict=api_root_dict)
+
+
+router = BetterDefaultRouter()
+
router.register(r"channels", ChannelViewSet)
router.register(r"events", EventViewSet)
router.register(r"institutions", InstitutionViewSet)
router.register(r"notes", NoteViewSet)
router.register(r"tags", TagViewSet)
router.register(r"users", UserViewSet)
+router.include("small_eod.cases.urls")
+router.include("small_eod.features.urls")
+router.include("small_eod.collections.urls")
+router.include("small_eod.letters.urls")
+router.include("small_eod.administrative_units.urls")
+router.include("small_eod.autocomplete.urls")
schema_view = get_schema_view(
info,
@@ -46,12 +92,6 @@
urlpatterns = [
path("admin/", admin.site.urls),
- path("api/", include("small_eod.collections.urls")),
- path("api/", include("small_eod.cases.urls")),
- path("api/", include("small_eod.letters.urls")),
- path("api/", include("small_eod.features.urls")),
- path("api/", include("small_eod.administrative_units.urls")),
- path("api/", include("small_eod.autocomplete.urls")),
path("api/docs/", schema_view.with_ui("swagger"), name="api_docs"),
path("api/redoc/", schema_view.with_ui("redoc"), name="api_redocs"),
re_path(
@@ -62,7 +102,6 @@
path("api/", include(router.urls)),
]
-
if settings.DEBUG:
import debug_toolbar
| {"golden_diff": "diff --git a/backend-project/config/urls.py b/backend-project/config/urls.py\n--- a/backend-project/config/urls.py\n+++ b/backend-project/config/urls.py\n@@ -13,6 +13,9 @@\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n \"\"\"\n+\n+import re\n+\n from django.conf import settings\n from django.conf.urls.static import static\n from django.contrib import admin\n@@ -29,13 +32,56 @@\n \n from .swagger import info\n \n-router = routers.DefaultRouter()\n+\n+class BetterDefaultRouter(routers.DefaultRouter):\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.include_urls = []\n+ self.api_root_dict = {}\n+\n+ def get_urls(self):\n+ urls = super().get_urls()\n+ urls.extend(self.include_urls)\n+ return urls\n+\n+ def include(self, module):\n+ urlpatterns = getattr(include(module)[0], \"urlpatterns\")\n+ viewnames = set()\n+ for urlpattern in urlpatterns:\n+ self.include_urls.append(urlpattern)\n+ if hasattr(urlpattern, \"url_patterns\"):\n+ viewnames.update([pattern.name for pattern in urlpattern.url_patterns])\n+ elif hasattr(urlpattern, \"name\"):\n+ viewnames.add(urlpattern.name)\n+ self.api_root_dict.update(\n+ {re.sub(r\"-list$\", \"\", viewname): viewname for viewname in viewnames}\n+ )\n+\n+ def get_api_root_view(self, api_urls=None):\n+ api_root_dict = {}\n+ list_name = self.routes[0].name\n+\n+ for prefix, viewset, basename in self.registry:\n+ api_root_dict[prefix] = list_name.format(basename=basename)\n+ api_root_dict.update(self.api_root_dict)\n+\n+ return self.APIRootView.as_view(api_root_dict=api_root_dict)\n+\n+\n+router = BetterDefaultRouter()\n+\n router.register(r\"channels\", ChannelViewSet)\n router.register(r\"events\", EventViewSet)\n router.register(r\"institutions\", InstitutionViewSet)\n router.register(r\"notes\", NoteViewSet)\n router.register(r\"tags\", TagViewSet)\n router.register(r\"users\", UserViewSet)\n+router.include(\"small_eod.cases.urls\")\n+router.include(\"small_eod.features.urls\")\n+router.include(\"small_eod.collections.urls\")\n+router.include(\"small_eod.letters.urls\")\n+router.include(\"small_eod.administrative_units.urls\")\n+router.include(\"small_eod.autocomplete.urls\")\n \n schema_view = get_schema_view(\n info,\n@@ -46,12 +92,6 @@\n \n urlpatterns = [\n path(\"admin/\", admin.site.urls),\n- path(\"api/\", include(\"small_eod.collections.urls\")),\n- path(\"api/\", include(\"small_eod.cases.urls\")),\n- path(\"api/\", include(\"small_eod.letters.urls\")),\n- path(\"api/\", include(\"small_eod.features.urls\")),\n- path(\"api/\", include(\"small_eod.administrative_units.urls\")),\n- path(\"api/\", include(\"small_eod.autocomplete.urls\")),\n path(\"api/docs/\", schema_view.with_ui(\"swagger\"), name=\"api_docs\"),\n path(\"api/redoc/\", schema_view.with_ui(\"redoc\"), name=\"api_redocs\"),\n re_path(\n@@ -62,7 +102,6 @@\n path(\"api/\", include(router.urls)),\n ]\n \n-\n if settings.DEBUG:\n import debug_toolbar\n", "issue": "Niekompletny wykaz endpoint\u00f3w API w /api\nNa `/api` (np. https://dev.small-eod.siecobywatelska.pl/api/ ) nie mamy kompletnego wykazu endpoint\u00f3w API. Kompletny jest dost\u0119pny przez ReDoc np. na https://dev.small-eod.siecobywatelska.pl/api/redoc/ .\r\n\r\nPowinni\u015bmy to naprawi\u0107, bo wprowadza ryzyko mylnego wra\u017cenia co do zakresu API.\n", "before_files": [{"content": "\"\"\"small_eod URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom drf_yasg2.views import get_schema_view\nfrom rest_framework import permissions, routers\n\nfrom small_eod.channels.views import ChannelViewSet\nfrom small_eod.events.views import EventViewSet\nfrom small_eod.institutions.views import InstitutionViewSet\nfrom small_eod.notes.views import NoteViewSet\nfrom small_eod.tags.views import TagViewSet\nfrom small_eod.users.views import UserViewSet\n\nfrom .swagger import info\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"channels\", ChannelViewSet)\nrouter.register(r\"events\", EventViewSet)\nrouter.register(r\"institutions\", InstitutionViewSet)\nrouter.register(r\"notes\", NoteViewSet)\nrouter.register(r\"tags\", TagViewSet)\nrouter.register(r\"users\", UserViewSet)\n\nschema_view = get_schema_view(\n info,\n # validators=['flex', 'ssv'],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/\", include(\"small_eod.collections.urls\")),\n path(\"api/\", include(\"small_eod.cases.urls\")),\n path(\"api/\", include(\"small_eod.letters.urls\")),\n path(\"api/\", include(\"small_eod.features.urls\")),\n path(\"api/\", include(\"small_eod.administrative_units.urls\")),\n path(\"api/\", include(\"small_eod.autocomplete.urls\")),\n path(\"api/docs/\", schema_view.with_ui(\"swagger\"), name=\"api_docs\"),\n path(\"api/redoc/\", schema_view.with_ui(\"redoc\"), name=\"api_redocs\"),\n re_path(\n \"^api/swagger(?P<format>.json|.yaml)$\",\n schema_view.without_ui(),\n name=\"schema_swagger\",\n ),\n path(\"api/\", include(router.urls)),\n]\n\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n ]\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_URL)\n", "path": "backend-project/config/urls.py"}]} | 1,410 | 774 |
gh_patches_debug_12882 | rasdani/github-patches | git_diff | open-mmlab__mmpretrain-147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: 'LinearHead is not in the head registry'
use config
```python
model = dict(
head=dict(
type='LinearHead',
num_classes=1000,
in_channels=2048,
loss=dict(
type='LabelSmoothLoss',
loss_weight=1.0,
label_smooth_val=0.1,
num_classes=1000),
))
```
got trackback
```python
Traceback (most recent call last):
File "/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py", line 177, in <module>
main()
File "/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py", line 151, in main
model = build_classifier(cfg.model)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 38, in build_classifier
return build(cfg, CLASSIFIERS)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 18, in build
return build_from_cfg(cfg, registry, default_args)
File "/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py", line 171, in build_from_cfg
return obj_cls(**args)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/classifiers/image.py", line 18, in __init__
self.head = build_head(head)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 26, in build_head
return build(cfg, HEADS)
File "/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py", line 18, in build
return build_from_cfg(cfg, registry, default_args)
File "/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py", line 164, in build_from_cfg
f'{obj_type} is not in the {registry.name} registry')
KeyError: 'LinearHead is not in the head registry'
```
__check /mmcls/models/heads/*.py, not exist `LinearHead` registered__
</issue>
<code>
[start of configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py]
1 _base_ = ['./resnet50_batch2048_warmup.py']
2 model = dict(
3 head=dict(
4 type='LinearHead',
5 num_classes=1000,
6 in_channels=2048,
7 loss=dict(
8 type='LabelSmoothLoss',
9 loss_weight=1.0,
10 label_smooth_val=0.1,
11 num_classes=1000),
12 ))
13
[end of configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py]
[start of configs/resnet/resnet50_b32x8_label_smooth_imagenet.py]
1 _base_ = ['./resnet50_imagenet_bs256.py']
2 model = dict(
3 head=dict(
4 type='LinearHead',
5 num_classes=1000,
6 in_channels=2048,
7 loss=dict(
8 type='LabelSmoothLoss',
9 loss_weight=1.0,
10 label_smooth_val=0.1,
11 num_classes=1000),
12 ))
13
[end of configs/resnet/resnet50_b32x8_label_smooth_imagenet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
--- a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
+++ b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py
@@ -1,7 +1,7 @@
_base_ = ['./resnet50_imagenet_bs256.py']
model = dict(
head=dict(
- type='LinearHead',
+ type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(
diff --git a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
--- a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
+++ b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py
@@ -1,7 +1,7 @@
_base_ = ['./resnet50_batch2048_warmup.py']
model = dict(
head=dict(
- type='LinearHead',
+ type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(
| {"golden_diff": "diff --git a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py\n--- a/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py\n+++ b/configs/resnet/resnet50_b32x8_label_smooth_imagenet.py\n@@ -1,7 +1,7 @@\n _base_ = ['./resnet50_imagenet_bs256.py']\n model = dict(\n head=dict(\n- type='LinearHead',\n+ type='LinearClsHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\ndiff --git a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py\n--- a/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py\n+++ b/configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py\n@@ -1,7 +1,7 @@\n _base_ = ['./resnet50_batch2048_warmup.py']\n model = dict(\n head=dict(\n- type='LinearHead',\n+ type='LinearClsHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n", "issue": "KeyError: 'LinearHead is not in the head registry'\nuse config\r\n```python\r\nmodel = dict(\r\n head=dict(\r\n type='LinearHead',\r\n num_classes=1000,\r\n in_channels=2048,\r\n loss=dict(\r\n type='LabelSmoothLoss',\r\n loss_weight=1.0,\r\n label_smooth_val=0.1,\r\n num_classes=1000),\r\n ))\r\n```\r\n\r\ngot trackback\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py\", line 177, in <module>\r\n main()\r\n File \"/home/code/open_mmlab_codebase/huatian_bump_blur_cls/tools/train.py\", line 151, in main\r\n model = build_classifier(cfg.model)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 38, in build_classifier\r\n return build(cfg, CLASSIFIERS)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 18, in build\r\n return build_from_cfg(cfg, registry, default_args)\r\n File \"/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py\", line 171, in build_from_cfg\r\n return obj_cls(**args)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/classifiers/image.py\", line 18, in __init__\r\n self.head = build_head(head)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 26, in build_head\r\n return build(cfg, HEADS)\r\n File \"/home/code/open_mmlab_codebase/mmclassification/mmcls/models/builder.py\", line 18, in build\r\n return build_from_cfg(cfg, registry, default_args)\r\n File \"/opt/conda/lib/python3.7/site-packages/mmcv/utils/registry.py\", line 164, in build_from_cfg\r\n f'{obj_type} is not in the {registry.name} registry')\r\nKeyError: 'LinearHead is not in the head registry'\r\n```\r\n\r\n__check /mmcls/models/heads/*.py, not exist `LinearHead` registered__\n", "before_files": [{"content": "_base_ = ['./resnet50_batch2048_warmup.py']\nmodel = dict(\n head=dict(\n type='LinearHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n type='LabelSmoothLoss',\n loss_weight=1.0,\n label_smooth_val=0.1,\n num_classes=1000),\n ))\n", "path": "configs/resnet/resnet50_b64x32_warmup_label_smooth_imagenet.py"}, {"content": "_base_ = ['./resnet50_imagenet_bs256.py']\nmodel = dict(\n head=dict(\n type='LinearHead',\n num_classes=1000,\n in_channels=2048,\n loss=dict(\n type='LabelSmoothLoss',\n loss_weight=1.0,\n label_smooth_val=0.1,\n num_classes=1000),\n ))\n", "path": "configs/resnet/resnet50_b32x8_label_smooth_imagenet.py"}]} | 1,324 | 340 |
gh_patches_debug_12800 | rasdani/github-patches | git_diff | mindsdb__mindsdb-712 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: 'mongodb' on start
Starting Mindsdb(python -m mindsdb) version 2.8.1 throws:
```
Failed to start mongodb API with exception 'mongodb'
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py", line 83, in <module>
p = ctx.Process(target=start_functions[api], args=(config_path, True,))
KeyError: 'mongodb'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py", line 83, in <module>
p = ctx.Process(target=start_functions[api], args=(config_path, True,))
KeyError: 'mongodb'
```
</issue>
<code>
[start of mindsdb/__main__.py]
1 import atexit
2 import traceback
3 import sys
4 import os
5
6 import torch.multiprocessing as mp
7
8 from mindsdb_native.config import CONFIG
9
10 from mindsdb.utilities.config import Config
11 from mindsdb.interfaces.native.mindsdb import MindsdbNative
12 from mindsdb.interfaces.custom.custom_models import CustomModels
13 from mindsdb.api.http.start import start as start_http
14 from mindsdb.api.mysql.start import start as start_mysql
15 from mindsdb.api.mongo.start import start as start_mongo
16 from mindsdb.utilities.fs import get_or_create_dir_struct
17 from mindsdb.interfaces.database.database import DatabaseWrapper
18 from mindsdb.utilities.functions import args_parse
19
20
21 def close_api_gracefully(p_arr):
22 for p in p_arr:
23 sys.stdout.flush()
24 p.terminate()
25 p.join()
26 sys.stdout.flush()
27
28
29 if __name__ == '__main__':
30 mp.freeze_support()
31
32 args = args_parse()
33
34 config_path = args.config
35 if config_path is None:
36 config_dir, _ = get_or_create_dir_struct()
37 config_path = os.path.join(config_dir, 'config.json')
38
39 print(f'Using configuration file: {config_path}')
40 config = Config(config_path)
41
42 if args.api is None:
43 api_arr = [api for api in config['api']]
44 else:
45 api_arr = args.api.split(',')
46
47 start_functions = {
48 'http': start_http,
49 'mysql': start_mysql,
50 'mongo': start_mongo
51 }
52
53 mdb = MindsdbNative(config)
54 cst = CustomModels(config)
55 # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction
56 model_data_arr = [
57 {
58 'name': x['name'],
59 'predict': x['predict'],
60 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']
61 } for x in mdb.get_models()
62 ]
63
64 for m in model_data_arr:
65 if 'columns_to_ignore' in m['data_analysis']:
66 del m['data_analysis']['columns_to_ignore']
67 if 'train_std_dev' in m['data_analysis']:
68 del m['data_analysis']['train_std_dev']
69
70 model_data_arr.extend(cst.get_models())
71
72 dbw = DatabaseWrapper(config)
73 dbw.register_predictors(model_data_arr)
74
75 for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:
76 print(f'Error failed to integrate with database aliased: {broken_name}')
77
78 p_arr = []
79 ctx = mp.get_context('spawn')
80 for api in api_arr:
81 print(f'Starting Mindsdb {api} API !')
82 try:
83 p = ctx.Process(target=start_functions[api], args=(config_path, True,))
84 p.start()
85 p_arr.append(p)
86 print(f'Started Mindsdb {api} API !')
87 except Exception as e:
88 close_api_gracefully(p_arr)
89 print(f'Failed to start {api} API with exception {e}')
90 print(traceback.format_exc())
91 raise
92
93 atexit.register(close_api_gracefully, p_arr=p_arr)
94
95 for p in p_arr:
96 p.join()
97
[end of mindsdb/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -40,14 +40,20 @@
config = Config(config_path)
if args.api is None:
- api_arr = [api for api in config['api']]
+ api_arr = ['http', 'mysql']
else:
api_arr = args.api.split(',')
+ for api in api_arr:
+ if api not in config:
+ print(f"Trying run '{api}' API, but is no config for this api.")
+ print(f"Please, fill config['api']['{api}']")
+ sys.exit(0)
+
start_functions = {
'http': start_http,
'mysql': start_mysql,
- 'mongo': start_mongo
+ 'mongodb': start_mongo
}
mdb = MindsdbNative(config)
| {"golden_diff": "diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py\n--- a/mindsdb/__main__.py\n+++ b/mindsdb/__main__.py\n@@ -40,14 +40,20 @@\n config = Config(config_path)\n \n if args.api is None:\n- api_arr = [api for api in config['api']]\n+ api_arr = ['http', 'mysql']\n else:\n api_arr = args.api.split(',')\n \n+ for api in api_arr:\n+ if api not in config:\n+ print(f\"Trying run '{api}' API, but is no config for this api.\")\n+ print(f\"Please, fill config['api']['{api}']\")\n+ sys.exit(0)\n+\n start_functions = {\n 'http': start_http,\n 'mysql': start_mysql,\n- 'mongo': start_mongo\n+ 'mongodb': start_mongo\n }\n \n mdb = MindsdbNative(config)\n", "issue": "KeyError: 'mongodb' on start\nStarting Mindsdb(python -m mindsdb) version 2.8.1 throws:\r\n\r\n```\r\nFailed to start mongodb API with exception 'mongodb'\r\nTraceback (most recent call last):\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py\", line 83, in <module>\r\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\r\nKeyError: 'mongodb'\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/__main__.py\", line 83, in <module>\r\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\r\nKeyError: 'mongodb'\r\n```\n", "before_files": [{"content": "import atexit\nimport traceback\nimport sys\nimport os\n\nimport torch.multiprocessing as mp\n\nfrom mindsdb_native.config import CONFIG\n\nfrom mindsdb.utilities.config import Config\nfrom mindsdb.interfaces.native.mindsdb import MindsdbNative\nfrom mindsdb.interfaces.custom.custom_models import CustomModels\nfrom mindsdb.api.http.start import start as start_http\nfrom mindsdb.api.mysql.start import start as start_mysql\nfrom mindsdb.api.mongo.start import start as start_mongo\nfrom mindsdb.utilities.fs import get_or_create_dir_struct\nfrom mindsdb.interfaces.database.database import DatabaseWrapper\nfrom mindsdb.utilities.functions import args_parse\n\n\ndef close_api_gracefully(p_arr):\n for p in p_arr:\n sys.stdout.flush()\n p.terminate()\n p.join()\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n mp.freeze_support()\n\n args = args_parse()\n\n config_path = args.config\n if config_path is None:\n config_dir, _ = get_or_create_dir_struct()\n config_path = os.path.join(config_dir, 'config.json')\n\n print(f'Using configuration file: {config_path}')\n config = Config(config_path)\n\n if args.api is None:\n api_arr = [api for api in config['api']]\n else:\n api_arr = args.api.split(',')\n\n start_functions = {\n 'http': start_http,\n 'mysql': start_mysql,\n 'mongo': start_mongo\n }\n\n mdb = MindsdbNative(config)\n cst = CustomModels(config)\n # @TODO Maybe just use `get_model_data` directly here ? Seems like a useless abstraction\n model_data_arr = [\n {\n 'name': x['name'],\n 'predict': x['predict'],\n 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']\n } for x in mdb.get_models()\n ]\n\n for m in model_data_arr:\n if 'columns_to_ignore' in m['data_analysis']:\n del m['data_analysis']['columns_to_ignore']\n if 'train_std_dev' in m['data_analysis']:\n del m['data_analysis']['train_std_dev']\n\n model_data_arr.extend(cst.get_models())\n\n dbw = DatabaseWrapper(config)\n dbw.register_predictors(model_data_arr)\n\n for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:\n print(f'Error failed to integrate with database aliased: {broken_name}')\n\n p_arr = []\n ctx = mp.get_context('spawn')\n for api in api_arr:\n print(f'Starting Mindsdb {api} API !')\n try:\n p = ctx.Process(target=start_functions[api], args=(config_path, True,))\n p.start()\n p_arr.append(p)\n print(f'Started Mindsdb {api} API !')\n except Exception as e:\n close_api_gracefully(p_arr)\n print(f'Failed to start {api} API with exception {e}')\n print(traceback.format_exc())\n raise\n\n atexit.register(close_api_gracefully, p_arr=p_arr)\n\n for p in p_arr:\n p.join()\n", "path": "mindsdb/__main__.py"}]} | 1,662 | 215 |
gh_patches_debug_4504 | rasdani/github-patches | git_diff | saleor__saleor-2803 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Grapql query for home page
### What I'm trying to achieve
I want to have a shop homepage which shows:
* new arrivals,
* product in a sale,
* featured products,
* featured collection,
* categories links
### Describe a proposed solution
```graphql
query HomePage {
shop {
featuredCollection {
id
name
}
}
featured: products(first: 10, collectionSlug: "featured") {
edges {
node {
id
name
thumbnailUrl
category {
id
name
}
price {
amount
currency
}
}
}
}
newArrivals: products(first: 10, sortBy: "creation_date") {
edges {
node {
id
name
thumbnailUrl
category {
id
name
}
price {
amount
currency
}
}
}
}
sales: products(first: 10, collectionSlug: "sales") {
edges {
node {
id
name
thumbnailUrl
category {
id
name
}
price {
amount
currency
}
}
}
}
categories {
edges {
node {
id
name
}
}
}
}
```
### Other solutions I've tried and won't work
I introduced:
* filter by collection slug for featured and sales. That is the simplest approach which I have in my mind.
* exposing homepage collection in the shop query,
* sorting products by creation data for new arrivals.
This is only a proposition. If you have a better approach in mind please share it.
</issue>
<code>
[start of saleor/product/filters.py]
1 from collections import OrderedDict
2
3 from django.db.models import Q
4 from django.forms import CheckboxSelectMultiple, ValidationError
5 from django.utils.translation import pgettext_lazy
6 from django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter
7
8 from ..core.filters import SortedFilterSet
9 from .models import Product, ProductAttribute
10
11 SORT_BY_FIELDS = OrderedDict([
12 ('name', pgettext_lazy('Product list sorting option', 'name')),
13 ('price', pgettext_lazy('Product list sorting option', 'price'))])
14
15
16 class ProductFilter(SortedFilterSet):
17 sort_by = OrderingFilter(
18 label=pgettext_lazy('Product list sorting form', 'Sort by'),
19 fields=SORT_BY_FIELDS.keys(),
20 field_labels=SORT_BY_FIELDS)
21 price = RangeFilter(
22 label=pgettext_lazy('Currency amount', 'Price'))
23
24 class Meta:
25 model = Product
26 fields = []
27
28 def __init__(self, *args, **kwargs):
29 super().__init__(*args, **kwargs)
30 self.product_attributes, self.variant_attributes = (
31 self._get_attributes())
32 self.filters.update(self._get_product_attributes_filters())
33 self.filters.update(self._get_product_variants_attributes_filters())
34 self.filters = OrderedDict(sorted(self.filters.items()))
35
36 def _get_attributes(self):
37 q_product_attributes = self._get_product_attributes_lookup()
38 q_variant_attributes = self._get_variant_attributes_lookup()
39 product_attributes = (
40 ProductAttribute.objects.all()
41 .prefetch_related('translations', 'values__translations')
42 .filter(q_product_attributes)
43 .distinct())
44 variant_attributes = (
45 ProductAttribute.objects.all()
46 .prefetch_related('translations', 'values__translations')
47 .filter(q_variant_attributes)
48 .distinct())
49 return product_attributes, variant_attributes
50
51 def _get_product_attributes_lookup(self):
52 raise NotImplementedError()
53
54 def _get_variant_attributes_lookup(self):
55 raise NotImplementedError()
56
57 def _get_product_attributes_filters(self):
58 filters = {}
59 for attribute in self.product_attributes:
60 filters[attribute.slug] = MultipleChoiceFilter(
61 name='attributes__%s' % attribute.pk,
62 label=attribute.translated.name,
63 widget=CheckboxSelectMultiple,
64 choices=self._get_attribute_choices(attribute))
65 return filters
66
67 def _get_product_variants_attributes_filters(self):
68 filters = {}
69 for attribute in self.variant_attributes:
70 filters[attribute.slug] = MultipleChoiceFilter(
71 name='variants__attributes__%s' % attribute.pk,
72 label=attribute.translated.name,
73 widget=CheckboxSelectMultiple,
74 choices=self._get_attribute_choices(attribute))
75 return filters
76
77 def _get_attribute_choices(self, attribute):
78 return [
79 (choice.pk, choice.translated.name)
80 for choice in attribute.values.all()]
81
82 def validate_sort_by(self, value):
83 if value.strip('-') not in SORT_BY_FIELDS:
84 raise ValidationError(
85 pgettext_lazy(
86 'Validation error for sort_by filter',
87 '%(value)s is not a valid sorting option'),
88 params={'value': value})
89
90
91 class ProductCategoryFilter(ProductFilter):
92 def __init__(self, *args, **kwargs):
93 self.category = kwargs.pop('category')
94 super().__init__(*args, **kwargs)
95
96 def _get_product_attributes_lookup(self):
97 return Q(product_types__products__category=self.category)
98
99 def _get_variant_attributes_lookup(self):
100 return Q(product_variant_types__products__category=self.category)
101
102
103 class ProductCollectionFilter(ProductFilter):
104 def __init__(self, *args, **kwargs):
105 self.collection = kwargs.pop('collection')
106 super().__init__(*args, **kwargs)
107
108 def _get_product_attributes_lookup(self):
109 return Q(product_types__products__collections=self.collection)
110
111 def _get_variant_attributes_lookup(self):
112 return Q(product_variant_types__products__collections=self.collection)
113
[end of saleor/product/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/product/filters.py b/saleor/product/filters.py
--- a/saleor/product/filters.py
+++ b/saleor/product/filters.py
@@ -10,7 +10,9 @@
SORT_BY_FIELDS = OrderedDict([
('name', pgettext_lazy('Product list sorting option', 'name')),
- ('price', pgettext_lazy('Product list sorting option', 'price'))])
+ ('price', pgettext_lazy('Product list sorting option', 'price')),
+ ('updated_at', pgettext_lazy(
+ 'Product list sorting option', 'last updated'))])
class ProductFilter(SortedFilterSet):
| {"golden_diff": "diff --git a/saleor/product/filters.py b/saleor/product/filters.py\n--- a/saleor/product/filters.py\n+++ b/saleor/product/filters.py\n@@ -10,7 +10,9 @@\n \n SORT_BY_FIELDS = OrderedDict([\n ('name', pgettext_lazy('Product list sorting option', 'name')),\n- ('price', pgettext_lazy('Product list sorting option', 'price'))])\n+ ('price', pgettext_lazy('Product list sorting option', 'price')),\n+ ('updated_at', pgettext_lazy(\n+ 'Product list sorting option', 'last updated'))])\n \n \n class ProductFilter(SortedFilterSet):\n", "issue": "Grapql query for home page\n### What I'm trying to achieve\r\nI want to have a shop homepage which shows:\r\n* new arrivals,\r\n* product in a sale,\r\n* featured products,\r\n* featured collection,\r\n* categories links\r\n\r\n### Describe a proposed solution\r\n```graphql\r\nquery HomePage {\r\n shop {\r\n featuredCollection {\r\n id\r\n name\r\n }\r\n }\r\n featured: products(first: 10, collectionSlug: \"featured\") {\r\n edges {\r\n node {\r\n id\r\n name\r\n thumbnailUrl\r\n category {\r\n id\r\n name\r\n }\r\n price {\r\n amount\r\n currency\r\n }\r\n }\r\n }\r\n }\r\n newArrivals: products(first: 10, sortBy: \"creation_date\") {\r\n edges {\r\n node {\r\n id\r\n name\r\n thumbnailUrl\r\n category {\r\n id\r\n name\r\n }\r\n price {\r\n amount\r\n currency\r\n }\r\n }\r\n }\r\n }\r\n sales: products(first: 10, collectionSlug: \"sales\") {\r\n edges {\r\n node {\r\n id\r\n name\r\n thumbnailUrl\r\n category {\r\n id\r\n name\r\n }\r\n price {\r\n amount\r\n currency\r\n }\r\n }\r\n }\r\n }\r\n categories {\r\n edges {\r\n node {\r\n id\r\n name\r\n }\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\n### Other solutions I've tried and won't work\r\nI introduced:\r\n* filter by collection slug for featured and sales. That is the simplest approach which I have in my mind.\r\n* exposing homepage collection in the shop query,\r\n* sorting products by creation data for new arrivals.\r\n\r\nThis is only a proposition. If you have a better approach in mind please share it.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.db.models import Q\nfrom django.forms import CheckboxSelectMultiple, ValidationError\nfrom django.utils.translation import pgettext_lazy\nfrom django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter\n\nfrom ..core.filters import SortedFilterSet\nfrom .models import Product, ProductAttribute\n\nSORT_BY_FIELDS = OrderedDict([\n ('name', pgettext_lazy('Product list sorting option', 'name')),\n ('price', pgettext_lazy('Product list sorting option', 'price'))])\n\n\nclass ProductFilter(SortedFilterSet):\n sort_by = OrderingFilter(\n label=pgettext_lazy('Product list sorting form', 'Sort by'),\n fields=SORT_BY_FIELDS.keys(),\n field_labels=SORT_BY_FIELDS)\n price = RangeFilter(\n label=pgettext_lazy('Currency amount', 'Price'))\n\n class Meta:\n model = Product\n fields = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.product_attributes, self.variant_attributes = (\n self._get_attributes())\n self.filters.update(self._get_product_attributes_filters())\n self.filters.update(self._get_product_variants_attributes_filters())\n self.filters = OrderedDict(sorted(self.filters.items()))\n\n def _get_attributes(self):\n q_product_attributes = self._get_product_attributes_lookup()\n q_variant_attributes = self._get_variant_attributes_lookup()\n product_attributes = (\n ProductAttribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_product_attributes)\n .distinct())\n variant_attributes = (\n ProductAttribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_variant_attributes)\n .distinct())\n return product_attributes, variant_attributes\n\n def _get_product_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_variant_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_product_attributes_filters(self):\n filters = {}\n for attribute in self.product_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n name='attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_product_variants_attributes_filters(self):\n filters = {}\n for attribute in self.variant_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n name='variants__attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_attribute_choices(self, attribute):\n return [\n (choice.pk, choice.translated.name)\n for choice in attribute.values.all()]\n\n def validate_sort_by(self, value):\n if value.strip('-') not in SORT_BY_FIELDS:\n raise ValidationError(\n pgettext_lazy(\n 'Validation error for sort_by filter',\n '%(value)s is not a valid sorting option'),\n params={'value': value})\n\n\nclass ProductCategoryFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.category = kwargs.pop('category')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_types__products__category=self.category)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_types__products__category=self.category)\n\n\nclass ProductCollectionFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.collection = kwargs.pop('collection')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_types__products__collections=self.collection)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_types__products__collections=self.collection)\n", "path": "saleor/product/filters.py"}]} | 1,937 | 143 |
gh_patches_debug_29292 | rasdani/github-patches | git_diff | e-valuation__EvaP-721 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only internal redirects
The platform should only redirect to internal pages after logging in.
(handled in `evaluation/views.py index`)
</issue>
<code>
[start of evap/evaluation/views.py]
1 from django.contrib import messages
2 from django.contrib.auth import login as auth_login
3 from django.shortcuts import redirect, render
4 from django.utils.translation import ugettext as _
5
6 from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
7 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
8
9
10 def index(request):
11 """Main entry page into EvaP providing all the login options available. THe username/password
12 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
13 The login key mechanism is meant to be used to include external participants, e.g. visiting
14 students or visiting contributors.
15 """
16
17 # parse the form data into the respective form
18 submit_type = request.POST.get("submit_type", "no_submit")
19 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
20 login_key_form = LoginKeyForm(request.POST if submit_type == "login_key" else None)
21 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
22
23 # process form data
24 if request.method == 'POST':
25 if new_key_form.is_valid():
26 # user wants a new login key
27 profile = new_key_form.get_user()
28 profile.generate_login_key()
29 profile.save()
30
31 EmailTemplate.send_login_key_to_user(new_key_form.get_user())
32
33 messages.success(request, _("Successfully sent email with new login key."))
34 elif login_key_form.is_valid():
35 # user would like to login with a login key and passed key test
36 auth_login(request, login_key_form.get_user())
37 elif login_username_form.is_valid():
38 # user would like to login with username and password and passed password test
39 auth_login(request, login_username_form.get_user())
40
41 # clean up our test cookie
42 if request.session.test_cookie_worked():
43 request.session.delete_test_cookie()
44
45 # if not logged in by now, render form
46 if not request.user.is_authenticated():
47 # set test cookie to verify whether they work in the next step
48 request.session.set_test_cookie()
49
50 template_data = dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form)
51 return render(request, "index.html", template_data)
52 else:
53 user, created = UserProfile.objects.get_or_create(username=request.user.username)
54
55 # check for redirect variable
56 redirect_to = request.GET.get("next", None)
57 if redirect_to is not None:
58 if redirect_to.startswith("/staff/"):
59 if request.user.is_staff:
60 return redirect(redirect_to)
61 elif redirect_to.startswith("/grades/"):
62 if request.user.is_grade_publisher:
63 return redirect(redirect_to)
64 elif redirect_to.startswith("/contributor/"):
65 if user.is_contributor:
66 return redirect(redirect_to)
67 elif redirect_to.startswith("/student/"):
68 if user.is_participant:
69 return redirect(redirect_to)
70 else:
71 return redirect(redirect_to)
72
73 # redirect user to appropriate start page
74 if request.user.is_staff:
75 return redirect('staff:index')
76 elif request.user.is_grade_publisher:
77 return redirect('grades:semester_view', Semester.active_semester().id)
78 elif user.is_contributor_or_delegate:
79 return redirect('contributor:index')
80 elif user.is_participant:
81 return redirect('student:index')
82 else:
83 return redirect('results:index')
84
85
86 def faq(request):
87 return render(request, "faq.html", dict(sections=FaqSection.objects.all()))
88
89 def legal_notice(request):
90 return render(request, "legal_notice.html", dict())
91
[end of evap/evaluation/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -2,13 +2,14 @@
from django.contrib.auth import login as auth_login
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
+from django.core.urlresolvers import resolve, Resolver404
from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
def index(request):
- """Main entry page into EvaP providing all the login options available. THe username/password
+ """Main entry page into EvaP providing all the login options available. The username/password
login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
The login key mechanism is meant to be used to include external participants, e.g. visiting
students or visiting contributors.
@@ -68,7 +69,12 @@
if user.is_participant:
return redirect(redirect_to)
else:
- return redirect(redirect_to)
+ try:
+ resolve(redirect_to)
+ except Resolver404:
+ pass
+ else:
+ return redirect(redirect_to)
# redirect user to appropriate start page
if request.user.is_staff:
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -2,13 +2,14 @@\n from django.contrib.auth import login as auth_login\n from django.shortcuts import redirect, render\n from django.utils.translation import ugettext as _\n+from django.core.urlresolvers import resolve, Resolver404\n \n from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\n from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n \n \n def index(request):\n- \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n+ \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n@@ -68,7 +69,12 @@\n if user.is_participant:\n return redirect(redirect_to)\n else:\n- return redirect(redirect_to)\n+ try:\n+ resolve(redirect_to)\n+ except Resolver404:\n+ pass\n+ else:\n+ return redirect(redirect_to)\n \n # redirect user to appropriate start page\n if request.user.is_staff:\n", "issue": "Only internal redirects\nThe platform should only redirect to internal pages after logging in.\n\n(handled in `evaluation/views.py index`)\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.send_login_key_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_authenticated():\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form)\n return render(request, \"index.html\", template_data)\n else:\n user, created = UserProfile.objects.get_or_create(username=request.user.username)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/staff/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/grades/\"):\n if request.user.is_grade_publisher:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if user.is_contributor:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/student/\"):\n if user.is_participant:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('staff:index')\n elif request.user.is_grade_publisher:\n return redirect('grades:semester_view', Semester.active_semester().id)\n elif user.is_contributor_or_delegate:\n return redirect('contributor:index')\n elif user.is_participant:\n return redirect('student:index')\n else:\n return redirect('results:index')\n\n\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n", "path": "evap/evaluation/views.py"}]} | 1,518 | 315 |
gh_patches_debug_4673 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-2112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Problem with tesseract after Bugfix: Some tesseract languages aren't detected as installed. @stumpylog (#2057)
### Description
Hi,
after Fixes [2044 ](https://github.com/paperless-ngx/paperless-ngx/issues/2044)I have problem with OCR and paperless-ngx.
Before this commit I use next ENV :
>
- PAPERLESS_OCR_LANGUAGE=srp_latn+srp
- PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn
and everything work.
After this commit if dont make any changes in ENV error is:
?: The selected ocr language srp_latn is not installed. Paperless cannot OCR your documents without it. Please fix PAPERLESS_OCR_LANGUAGE.
If i make changes in ENV, replace _ with -:
>
- PAPERLESS_OCR_LANGUAGE=srp-latn+srp
- PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn
After this change system install lang and start paperless, but if I upload any document, OCR dont work, error is:
`[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
**
Paperless-ngx 1.10.0 WORK
Paperless-ngx 1.10.1 DONT WORK
**
### Steps to reproduce
1. Add this ENV
- PAPERLESS_OCR_LANGUAGE=srp-latn+srp
- PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn
2. Upload any document
### Webserver logs
```bash
[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
Note: most languages are identified by a 3-digit ISO 639-2 Code
Traceback (most recent call last):
File "/usr/src/paperless/src/paperless_tesseract/parsers.py", line 292, in parse
ocrmypdf.ocr(**args)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/api.py", line 331, in ocr
check_options(options, plugin_manager)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py", line 246, in check_options
_check_plugin_options(options, plugin_manager)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py", line 241, in _check_plugin_options
check_options_languages(options, ocr_engine_languages)
File "/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py", line 70, in check_options_languages
raise MissingDependencyError(msg)
ocrmypdf.exceptions.MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
Note: most languages are identified by a 3-digit ISO 639-2 Code
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/src/paperless/src/documents/consumer.py", line 337, in try_consume_file
document_parser.parse(self.path, mime_type, self.filename)
File "/usr/src/paperless/src/paperless_tesseract/parsers.py", line 346, in parse
raise ParseError(f"{e.__class__.__name__}: {str(e)}") from e
documents.parsers.ParseError: MissingDependencyError: OCR engine does not have language data for the following requested languages:
srp-latn
Note: most languages are identified by a 3-digit ISO 639-2 Code
```
### Browser logs
_No response_
### Paperless-ngx version
1.10.1
### Host OS
Docker
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless_tesseract/checks.py]
1 import shutil
2 import subprocess
3
4 from django.conf import settings
5 from django.core.checks import Error
6 from django.core.checks import register
7 from django.core.checks import Warning
8
9
10 def get_tesseract_langs():
11 proc = subprocess.run(
12 [shutil.which("tesseract"), "--list-langs"],
13 capture_output=True,
14 )
15
16 # Decode bytes to string, split on newlines, trim out the header
17 proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:]
18
19 # Replace _ with - to convert two part languages to the expected code
20 return [x.replace("_", "-") for x in proc_lines]
21
22
23 @register()
24 def check_default_language_available(app_configs, **kwargs):
25 installed_langs = get_tesseract_langs()
26
27 if not settings.OCR_LANGUAGE:
28 return [
29 Warning(
30 "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. "
31 "This means that tesseract will fallback to english.",
32 ),
33 ]
34
35 specified_langs = settings.OCR_LANGUAGE.split("+")
36
37 for lang in specified_langs:
38 if lang not in installed_langs:
39 return [
40 Error(
41 f"The selected ocr language {lang} is "
42 f"not installed. Paperless cannot OCR your documents "
43 f"without it. Please fix PAPERLESS_OCR_LANGUAGE.",
44 ),
45 ]
46
47 return []
48
[end of src/paperless_tesseract/checks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py
--- a/src/paperless_tesseract/checks.py
+++ b/src/paperless_tesseract/checks.py
@@ -16,8 +16,7 @@
# Decode bytes to string, split on newlines, trim out the header
proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:]
- # Replace _ with - to convert two part languages to the expected code
- return [x.replace("_", "-") for x in proc_lines]
+ return [x.strip() for x in proc_lines]
@register()
| {"golden_diff": "diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py\n--- a/src/paperless_tesseract/checks.py\n+++ b/src/paperless_tesseract/checks.py\n@@ -16,8 +16,7 @@\n # Decode bytes to string, split on newlines, trim out the header\n proc_lines = proc.stdout.decode(\"utf8\", errors=\"ignore\").strip().split(\"\\n\")[1:]\n \n- # Replace _ with - to convert two part languages to the expected code\n- return [x.replace(\"_\", \"-\") for x in proc_lines]\n+ return [x.strip() for x in proc_lines]\n \n \n @register()\n", "issue": "[BUG] Problem with tesseract after Bugfix: Some tesseract languages aren't detected as installed. @stumpylog (#2057)\n### Description\r\n\r\nHi,\r\nafter Fixes [2044 ](https://github.com/paperless-ngx/paperless-ngx/issues/2044)I have problem with OCR and paperless-ngx.\r\n\r\nBefore this commit I use next ENV :\r\n\r\n> \r\n\r\n - PAPERLESS_OCR_LANGUAGE=srp_latn+srp\r\n - PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn \r\n\r\nand everything work.\r\n\r\nAfter this commit if dont make any changes in ENV error is:\r\n?: The selected ocr language srp_latn is not installed. Paperless cannot OCR your documents without it. Please fix PAPERLESS_OCR_LANGUAGE.\r\n\r\nIf i make changes in ENV, replace _ with -:\r\n\r\n> \r\n\r\n - PAPERLESS_OCR_LANGUAGE=srp-latn+srp\r\n - PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn\r\nAfter this change system install lang and start paperless, but if I upload any document, OCR dont work, error is:\r\n\r\n`[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\n**\r\nPaperless-ngx 1.10.0 WORK\r\nPaperless-ngx 1.10.1 DONT WORK\r\n**\r\n### Steps to reproduce\r\n\r\n1. Add this ENV\r\n - PAPERLESS_OCR_LANGUAGE=srp-latn+srp\r\n - PAPERLESS_OCR_LANGUAGES=srp-latn srp script-latn\r\n\r\n2. Upload any document\r\n\r\n### Webserver logs\r\n\r\n```bash\r\n[2022-12-04 13:05:46,369] [ERROR] [paperless.consumer] Error while consuming document apr 2022.pdf: MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\nNote: most languages are identified by a 3-digit ISO 639-2 Code\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/paperless_tesseract/parsers.py\", line 292, in parse\r\n\r\n ocrmypdf.ocr(**args)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/api.py\", line 331, in ocr\r\n\r\n check_options(options, plugin_manager)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py\", line 246, in check_options\r\n\r\n _check_plugin_options(options, plugin_manager)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py\", line 241, in _check_plugin_options\r\n\r\n check_options_languages(options, ocr_engine_languages)\r\n\r\n File \"/usr/local/lib/python3.9/site-packages/ocrmypdf/_validation.py\", line 70, in check_options_languages\r\n\r\n raise MissingDependencyError(msg)\r\n\r\nocrmypdf.exceptions.MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\nNote: most languages are identified by a 3-digit ISO 639-2 Code\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/documents/consumer.py\", line 337, in try_consume_file\r\n\r\n document_parser.parse(self.path, mime_type, self.filename)\r\n\r\n File \"/usr/src/paperless/src/paperless_tesseract/parsers.py\", line 346, in parse\r\n\r\n raise ParseError(f\"{e.__class__.__name__}: {str(e)}\") from e\r\n\r\ndocuments.parsers.ParseError: MissingDependencyError: OCR engine does not have language data for the following requested languages:\r\n\r\nsrp-latn\r\n\r\nNote: most languages are identified by a 3-digit ISO 639-2 Code\r\n```\r\n\r\n\r\n### Browser logs\r\n\r\n_No response_\r\n\r\n### Paperless-ngx version\r\n\r\n1.10.1\r\n\r\n### Host OS\r\n\r\nDocker\r\n\r\n### Installation method\r\n\r\nDocker - official image\r\n\r\n### Browser\r\n\r\n_No response_\r\n\r\n### Configuration changes\r\n\r\n_No response_\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "import shutil\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\n\ndef get_tesseract_langs():\n proc = subprocess.run(\n [shutil.which(\"tesseract\"), \"--list-langs\"],\n capture_output=True,\n )\n\n # Decode bytes to string, split on newlines, trim out the header\n proc_lines = proc.stdout.decode(\"utf8\", errors=\"ignore\").strip().split(\"\\n\")[1:]\n\n # Replace _ with - to convert two part languages to the expected code\n return [x.replace(\"_\", \"-\") for x in proc_lines]\n\n\n@register()\ndef check_default_language_available(app_configs, **kwargs):\n installed_langs = get_tesseract_langs()\n\n if not settings.OCR_LANGUAGE:\n return [\n Warning(\n \"No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. \"\n \"This means that tesseract will fallback to english.\",\n ),\n ]\n\n specified_langs = settings.OCR_LANGUAGE.split(\"+\")\n\n for lang in specified_langs:\n if lang not in installed_langs:\n return [\n Error(\n f\"The selected ocr language {lang} is \"\n f\"not installed. Paperless cannot OCR your documents \"\n f\"without it. Please fix PAPERLESS_OCR_LANGUAGE.\",\n ),\n ]\n\n return []\n", "path": "src/paperless_tesseract/checks.py"}]} | 1,894 | 152 |
gh_patches_debug_60522 | rasdani/github-patches | git_diff | streamlit__streamlit-7257 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing start_time of st.video() doesn't work for the same video
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
Changing start_time of st.video() doesn't work for the same video.
### Reproducible Code Example
```Python
import streamlit as st
timestamp = st.text_input('timestamp', '6')
st.video('local video path', start_time=int(timestamp))
```
### Steps To Reproduce
1. Replace 'local video path' with your own video path in the provided code, and run the code
2. Type different timestamp in the text input box
3. The video timestamp doesn't change
### Expected Behavior
The timestamp should change as start_time changes.
### Current Behavior
The video timestamp doesn't change. It always shows the initial timestamp. However, if you change the video to a different one in the source code and rerun the app, the timestamp will change correctly.
### Is this a regression?
- [ ] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.25.0
- Python version: Python 3.10.11
- Operating System: Windows 11 Home 22H2
- Browser: Microsoft Edge Version 115.0.1901.188 (Official build) (64-bit)
### Additional Information
_No response_
</issue>
<code>
[start of e2e/scripts/st_video.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import requests
16
17 import streamlit as st
18
19 url = "https://www.w3schools.com/html/mov_bbb.mp4"
20 file = requests.get(url).content
21 st.video(file)
22
[end of e2e/scripts/st_video.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_video.py b/e2e/scripts/st_video.py
--- a/e2e/scripts/st_video.py
+++ b/e2e/scripts/st_video.py
@@ -19,3 +19,7 @@
url = "https://www.w3schools.com/html/mov_bbb.mp4"
file = requests.get(url).content
st.video(file)
+
+# Test start time with widget
+timestamp = st.number_input("Start Time (in seconds)", min_value=0, value=6)
+st.video(url, start_time=int(timestamp))
| {"golden_diff": "diff --git a/e2e/scripts/st_video.py b/e2e/scripts/st_video.py\n--- a/e2e/scripts/st_video.py\n+++ b/e2e/scripts/st_video.py\n@@ -19,3 +19,7 @@\n url = \"https://www.w3schools.com/html/mov_bbb.mp4\"\n file = requests.get(url).content\n st.video(file)\n+\n+# Test start time with widget\n+timestamp = st.number_input(\"Start Time (in seconds)\", min_value=0, value=6)\n+st.video(url, start_time=int(timestamp))\n", "issue": "Changing start_time of st.video() doesn't work for the same video\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nChanging start_time of st.video() doesn't work for the same video.\r\n\r\n### Reproducible Code Example\r\n\r\n```Python\r\nimport streamlit as st\r\n\r\ntimestamp = st.text_input('timestamp', '6')\r\nst.video('local video path', start_time=int(timestamp))\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\n1. Replace 'local video path' with your own video path in the provided code, and run the code\r\n2. Type different timestamp in the text input box\r\n3. The video timestamp doesn't change\r\n\r\n### Expected Behavior\r\n\r\nThe timestamp should change as start_time changes.\r\n\r\n### Current Behavior\r\n\r\nThe video timestamp doesn't change. It always shows the initial timestamp. However, if you change the video to a different one in the source code and rerun the app, the timestamp will change correctly.\r\n\r\n### Is this a regression?\r\n\r\n- [ ] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.25.0\r\n- Python version: Python 3.10.11\r\n- Operating System: Windows 11 Home 22H2\r\n- Browser: Microsoft Edge Version 115.0.1901.188 (Official build) (64-bit)\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport requests\n\nimport streamlit as st\n\nurl = \"https://www.w3schools.com/html/mov_bbb.mp4\"\nfile = requests.get(url).content\nst.video(file)\n", "path": "e2e/scripts/st_video.py"}]} | 1,112 | 123 |
gh_patches_debug_9299 | rasdani/github-patches | git_diff | certbot__certbot-4857 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flesh out oldest tests
We should find the oldest versions of all our Python dependencies used in OS packages and add them to the [oldest tests](https://github.com/certbot/certbot/blob/master/tox.ini#L36) in Travis. This will prevent bugs like #3098 and #4040 from slipping into a release.
The two distros I'd check here are CentOS 7 and Debian 8.
</issue>
<code>
[start of acme/setup.py]
1 import sys
2
3 from setuptools import setup
4 from setuptools import find_packages
5
6
7 version = '0.16.0.dev0'
8
9 # Please update tox.ini when modifying dependency version requirements
10 install_requires = [
11 # load_pem_private/public_key (>=0.6)
12 # rsa_recover_prime_factors (>=0.8)
13 'cryptography>=0.8',
14 # Connection.set_tlsext_host_name (>=0.13)
15 'mock',
16 'PyOpenSSL>=0.13',
17 'pyrfc3339',
18 'pytz',
19 # requests>=2.10 is required to fix
20 # https://github.com/shazow/urllib3/issues/556. This requirement can be
21 # relaxed to 'requests[security]>=2.4.1', however, less useful errors
22 # will be raised for some network/SSL errors.
23 'requests[security]>=2.10',
24 # For pkg_resources. >=1.0 so pip resolves it to a version cryptography
25 # will tolerate; see #2599:
26 'setuptools>=1.0',
27 'six',
28 ]
29
30 # env markers cause problems with older pip and setuptools
31 if sys.version_info < (2, 7):
32 install_requires.extend([
33 'argparse',
34 'ordereddict',
35 ])
36
37 dev_extras = [
38 'nose',
39 'tox',
40 ]
41
42 docs_extras = [
43 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
44 'sphinx_rtd_theme',
45 ]
46
47
48 setup(
49 name='acme',
50 version=version,
51 description='ACME protocol implementation in Python',
52 url='https://github.com/letsencrypt/letsencrypt',
53 author="Certbot Project",
54 author_email='[email protected]',
55 license='Apache License 2.0',
56 classifiers=[
57 'Development Status :: 3 - Alpha',
58 'Intended Audience :: Developers',
59 'License :: OSI Approved :: Apache Software License',
60 'Programming Language :: Python',
61 'Programming Language :: Python :: 2',
62 'Programming Language :: Python :: 2.6',
63 'Programming Language :: Python :: 2.7',
64 'Programming Language :: Python :: 3',
65 'Programming Language :: Python :: 3.3',
66 'Programming Language :: Python :: 3.4',
67 'Programming Language :: Python :: 3.5',
68 'Programming Language :: Python :: 3.6',
69 'Topic :: Internet :: WWW/HTTP',
70 'Topic :: Security',
71 ],
72
73 packages=find_packages(),
74 include_package_data=True,
75 install_requires=install_requires,
76 extras_require={
77 'dev': dev_extras,
78 'docs': docs_extras,
79 },
80 entry_points={
81 'console_scripts': [
82 'jws = acme.jose.jws:CLI.run',
83 ],
84 },
85 test_suite='acme',
86 )
87
[end of acme/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/acme/setup.py b/acme/setup.py
--- a/acme/setup.py
+++ b/acme/setup.py
@@ -16,11 +16,7 @@
'PyOpenSSL>=0.13',
'pyrfc3339',
'pytz',
- # requests>=2.10 is required to fix
- # https://github.com/shazow/urllib3/issues/556. This requirement can be
- # relaxed to 'requests[security]>=2.4.1', however, less useful errors
- # will be raised for some network/SSL errors.
- 'requests[security]>=2.10',
+ 'requests[security]>=2.4.1', # security extras added in 2.4.1
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
| {"golden_diff": "diff --git a/acme/setup.py b/acme/setup.py\n--- a/acme/setup.py\n+++ b/acme/setup.py\n@@ -16,11 +16,7 @@\n 'PyOpenSSL>=0.13',\n 'pyrfc3339',\n 'pytz',\n- # requests>=2.10 is required to fix\n- # https://github.com/shazow/urllib3/issues/556. This requirement can be\n- # relaxed to 'requests[security]>=2.4.1', however, less useful errors\n- # will be raised for some network/SSL errors.\n- 'requests[security]>=2.10',\n+ 'requests[security]>=2.4.1', # security extras added in 2.4.1\n # For pkg_resources. >=1.0 so pip resolves it to a version cryptography\n # will tolerate; see #2599:\n 'setuptools>=1.0',\n", "issue": "Flesh out oldest tests\nWe should find the oldest versions of all our Python dependencies used in OS packages and add them to the [oldest tests](https://github.com/certbot/certbot/blob/master/tox.ini#L36) in Travis. This will prevent bugs like #3098 and #4040 from slipping into a release.\r\n\r\nThe two distros I'd check here are CentOS 7 and Debian 8.\n", "before_files": [{"content": "import sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '0.16.0.dev0'\n\n# Please update tox.ini when modifying dependency version requirements\ninstall_requires = [\n # load_pem_private/public_key (>=0.6)\n # rsa_recover_prime_factors (>=0.8)\n 'cryptography>=0.8',\n # Connection.set_tlsext_host_name (>=0.13)\n 'mock',\n 'PyOpenSSL>=0.13',\n 'pyrfc3339',\n 'pytz',\n # requests>=2.10 is required to fix\n # https://github.com/shazow/urllib3/issues/556. This requirement can be\n # relaxed to 'requests[security]>=2.4.1', however, less useful errors\n # will be raised for some network/SSL errors.\n 'requests[security]>=2.10',\n # For pkg_resources. >=1.0 so pip resolves it to a version cryptography\n # will tolerate; see #2599:\n 'setuptools>=1.0',\n 'six',\n]\n\n# env markers cause problems with older pip and setuptools\nif sys.version_info < (2, 7):\n install_requires.extend([\n 'argparse',\n 'ordereddict',\n ])\n\ndev_extras = [\n 'nose',\n 'tox',\n]\n\ndocs_extras = [\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n]\n\n\nsetup(\n name='acme',\n version=version,\n description='ACME protocol implementation in Python',\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n entry_points={\n 'console_scripts': [\n 'jws = acme.jose.jws:CLI.run',\n ],\n },\n test_suite='acme',\n)\n", "path": "acme/setup.py"}]} | 1,439 | 220 |
gh_patches_debug_27784 | rasdani/github-patches | git_diff | searx__searx-1186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bing Video search engine doesn't work
Hallo,
yesterday I've set up my own instance of searx. Thereby I discovered A problem with the Bing Video search engine. This is the shown error message:
```
Die folgenden Suchmaschinen können die Ergebnisse nicht empfangen:
bing videos (unexpected crash: list index out of range)
```
</issue>
<code>
[start of searx/engines/bing_videos.py]
1 """
2 Bing (Videos)
3
4 @website https://www.bing.com/videos
5 @provide-api yes (http://datamarket.azure.com/dataset/bing/search)
6
7 @using-api no
8 @results HTML
9 @stable no
10 @parse url, title, content, thumbnail
11 """
12
13 from json import loads
14 from lxml import html
15 from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code
16 from searx.engines.xpath import extract_text
17 from searx.url_utils import urlencode
18
19
20 categories = ['videos']
21 paging = True
22 safesearch = True
23 time_range_support = True
24 number_of_results = 10
25 language_support = True
26
27 search_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\
28 'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5'
29 time_range_string = '&qft=+filterui:videoage-lt{interval}'
30 time_range_dict = {'day': '1440',
31 'week': '10080',
32 'month': '43200',
33 'year': '525600'}
34
35 # safesearch definitions
36 safesearch_types = {2: 'STRICT',
37 1: 'DEMOTE',
38 0: 'OFF'}
39
40
41 # do search-request
42 def request(query, params):
43 offset = (params['pageno'] - 1) * 10 + 1
44
45 # safesearch cookie
46 params['cookies']['SRCHHPGUSR'] = \
47 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
48
49 # language cookie
50 region = get_region_code(params['language'], lang_list=supported_languages)
51 params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1'
52
53 # query and paging
54 params['url'] = search_url.format(query=urlencode({'q': query}),
55 offset=offset,
56 number_of_results=number_of_results)
57
58 # time range
59 if params['time_range'] in time_range_dict:
60 params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
61
62 return params
63
64
65 # get response from search-request
66 def response(resp):
67 results = []
68
69 dom = html.fromstring(resp.text)
70
71 for result in dom.xpath('//div[@class="dg_u"]'):
72
73 # try to extract the url
74 url_container = result.xpath('.//div[@class="sa_wrapper"]/@data-eventpayload')
75 if len(url_container) > 0:
76 url = loads(url_container[0])['purl']
77 else:
78 url = result.xpath('./a/@href')[0]
79
80 # discard results that do not return an external url
81 # very recent results sometimes don't return the video's url
82 if url.startswith('/videos/search?'):
83 continue
84
85 title = extract_text(result.xpath('./a//div[@class="tl"]'))
86 content = extract_text(result.xpath('.//div[@class="pubInfo"]'))
87 thumbnail = result.xpath('.//div[@class="vthumb"]/img/@src')[0]
88
89 results.append({'url': url,
90 'title': title,
91 'content': content,
92 'thumbnail': thumbnail,
93 'template': 'videos.html'})
94
95 # first page ignores requested number of results
96 if len(results) >= number_of_results:
97 break
98
99 return results
100
[end of searx/engines/bing_videos.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -69,22 +69,11 @@
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="dg_u"]'):
-
- # try to extract the url
- url_container = result.xpath('.//div[@class="sa_wrapper"]/@data-eventpayload')
- if len(url_container) > 0:
- url = loads(url_container[0])['purl']
- else:
- url = result.xpath('./a/@href')[0]
-
- # discard results that do not return an external url
- # very recent results sometimes don't return the video's url
- if url.startswith('/videos/search?'):
- continue
-
- title = extract_text(result.xpath('./a//div[@class="tl"]'))
- content = extract_text(result.xpath('.//div[@class="pubInfo"]'))
- thumbnail = result.xpath('.//div[@class="vthumb"]/img/@src')[0]
+ url = result.xpath('./div[@class="mc_vtvc"]/a/@href')[0]
+ url = 'https://bing.com' + url
+ title = extract_text(result.xpath('./div/a/div/div[@class="mc_vtvc_title"]/@title'))
+ content = extract_text(result.xpath('./div/a/div/div/div/div/text()'))
+ thumbnail = result.xpath('./div/a/div/div/img/@src')[0]
results.append({'url': url,
'title': title,
@@ -92,7 +81,6 @@
'thumbnail': thumbnail,
'template': 'videos.html'})
- # first page ignores requested number of results
if len(results) >= number_of_results:
break
| {"golden_diff": "diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py\n--- a/searx/engines/bing_videos.py\n+++ b/searx/engines/bing_videos.py\n@@ -69,22 +69,11 @@\n dom = html.fromstring(resp.text)\n \n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n-\n- # try to extract the url\n- url_container = result.xpath('.//div[@class=\"sa_wrapper\"]/@data-eventpayload')\n- if len(url_container) > 0:\n- url = loads(url_container[0])['purl']\n- else:\n- url = result.xpath('./a/@href')[0]\n-\n- # discard results that do not return an external url\n- # very recent results sometimes don't return the video's url\n- if url.startswith('/videos/search?'):\n- continue\n-\n- title = extract_text(result.xpath('./a//div[@class=\"tl\"]'))\n- content = extract_text(result.xpath('.//div[@class=\"pubInfo\"]'))\n- thumbnail = result.xpath('.//div[@class=\"vthumb\"]/img/@src')[0]\n+ url = result.xpath('./div[@class=\"mc_vtvc\"]/a/@href')[0]\n+ url = 'https://bing.com' + url\n+ title = extract_text(result.xpath('./div/a/div/div[@class=\"mc_vtvc_title\"]/@title'))\n+ content = extract_text(result.xpath('./div/a/div/div/div/div/text()'))\n+ thumbnail = result.xpath('./div/a/div/div/img/@src')[0]\n \n results.append({'url': url,\n 'title': title,\n@@ -92,7 +81,6 @@\n 'thumbnail': thumbnail,\n 'template': 'videos.html'})\n \n- # first page ignores requested number of results\n if len(results) >= number_of_results:\n break\n", "issue": "Bing Video search engine doesn't work\nHallo,\r\n\r\nyesterday I've set up my own instance of searx. Thereby I discovered A problem with the Bing Video search engine. This is the shown error message:\r\n\r\n```\r\nDie folgenden Suchmaschinen k\u00f6nnen die Ergebnisse nicht empfangen:\r\nbing videos (unexpected crash: list index out of range)\r\n```\n", "before_files": [{"content": "\"\"\"\n Bing (Videos)\n\n @website https://www.bing.com/videos\n @provide-api yes (http://datamarket.azure.com/dataset/bing/search)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, content, thumbnail\n\"\"\"\n\nfrom json import loads\nfrom lxml import html\nfrom searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import urlencode\n\n\ncategories = ['videos']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 10\nlanguage_support = True\n\nsearch_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\\\n 'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5'\ntime_range_string = '&qft=+filterui:videoage-lt{interval}'\ntime_range_dict = {'day': '1440',\n 'week': '10080',\n 'month': '43200',\n 'year': '525600'}\n\n# safesearch definitions\nsafesearch_types = {2: 'STRICT',\n 1: 'DEMOTE',\n 0: 'OFF'}\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n # safesearch cookie\n params['cookies']['SRCHHPGUSR'] = \\\n 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')\n\n # language cookie\n region = get_region_code(params['language'], lang_list=supported_languages)\n params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1'\n\n # query and paging\n params['url'] = search_url.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results)\n\n # time range\n if params['time_range'] in time_range_dict:\n params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n\n # try to extract the url\n url_container = result.xpath('.//div[@class=\"sa_wrapper\"]/@data-eventpayload')\n if len(url_container) > 0:\n url = loads(url_container[0])['purl']\n else:\n url = result.xpath('./a/@href')[0]\n\n # discard results that do not return an external url\n # very recent results sometimes don't return the video's url\n if url.startswith('/videos/search?'):\n continue\n\n title = extract_text(result.xpath('./a//div[@class=\"tl\"]'))\n content = extract_text(result.xpath('.//div[@class=\"pubInfo\"]'))\n thumbnail = result.xpath('.//div[@class=\"vthumb\"]/img/@src')[0]\n\n results.append({'url': url,\n 'title': title,\n 'content': content,\n 'thumbnail': thumbnail,\n 'template': 'videos.html'})\n\n # first page ignores requested number of results\n if len(results) >= number_of_results:\n break\n\n return results\n", "path": "searx/engines/bing_videos.py"}]} | 1,602 | 427 |
gh_patches_debug_35308 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AZ Legislator with the following id has an invalid phone number AZL000372
State: AZ (be sure to include in ticket title)
This repository is for issues with state data, for feature requests, etc.
please visit the contributor guide (see above message) to file the issue in the correct place.
</issue>
<code>
[start of openstates/az/legislators.py]
1 from billy.scrape import NoDataForPeriod
2 from billy.scrape.legislators import LegislatorScraper, Legislator
3 from lxml import html
4
5 import re, datetime
6
7 class AZLegislatorScraper(LegislatorScraper):
8 jurisdiction = 'az'
9 parties = {
10 'R': 'Republican',
11 'D': 'Democratic',
12 'L': 'Libertarian',
13 'I': 'Independent',
14 'G': 'Green'
15 }
16
17 def get_party(self, abbr):
18 return self.parties[abbr]
19
20 def scrape(self, chamber, term):
21 # TODO: old AZ scraper allowed old sessions, they seem to be gone?
22 self.validate_term(term, latest_only=True)
23
24 body = {'lower': 'H', 'upper': 'S'}[chamber]
25 url = 'http://www.azleg.gov/MemberRoster/?body=' + body
26 page = self.get(url).text
27
28 # there is a bad comment closing tag on this page
29 page = page.replace('--!>', '-->')
30
31 root = html.fromstring(page)
32
33 path = '//table//tr'
34 roster = root.xpath(path)[1:]
35 for row in roster:
36 position = ''
37 name, district, party, email, room, phone, = row.xpath('td')
38
39 if email.attrib.get('class') == 'vacantmember':
40 continue # Skip any vacant members.
41
42 link = name.xpath('string(a/@href)')
43 if len(name) == 1:
44 name = name.text_content().strip()
45 else:
46 position = name.tail.strip()
47 name = name[0].text_content().strip()
48 if '--' in name:
49 name = name.split('--')[0].strip()
50
51 linkpage = self.get(link).text
52 linkpage = linkpage.replace('--!>', '-->')
53 linkroot = html.fromstring(linkpage)
54 linkroot.make_links_absolute(link)
55
56 photos = linkroot.xpath("//img[contains(@src, 'MemberPhoto')]")
57
58 if len(photos) != 1:
59 self.warning('no photo on ' + link)
60 photo_url = ''
61 else:
62 photo_url = photos[0].attrib['src']
63
64 district = district.text_content()
65 party = party.text_content().strip()
66 email = email.text_content().strip()
67
68 if email.startswith('Email: '):
69 email = email.replace('Email: ', '').lower() + '@azleg.gov'
70 else:
71 email = ''
72
73 party = self.get_party(party)
74 room = room.text_content().strip()
75 if chamber == 'lower':
76 address = "House of Representatives\n"
77 else:
78 address = "Senate\n"
79 address = address + "1700 West Washington\n Room " + room \
80 + "\nPhoenix, AZ 85007"
81
82 phone = phone.text_content().strip()
83 if not phone.startswith('602'):
84 phone = "602-" + phone
85
86 leg = Legislator(term, chamber, district, full_name=name,
87 party=party, url=link,
88 photo_url=photo_url)
89
90 leg.add_office('capitol', 'Capitol Office', address=address,
91 phone=phone, email=email)
92
93 if position:
94 leg.add_role( position, term, chamber=chamber,
95 district=district, party=party)
96
97 leg.add_source(url)
98
99 #Probably just get this from the committee scraper
100 #self.scrape_member_page(link, session, chamber, leg)
101 self.save_legislator(leg)
102
103 def scrape_member_page(self, url, session, chamber, leg):
104 html = self.get(url).text
105 root = html.fromstring(html)
106 #get the committee membership
107 c = root.xpath('//td/div/strong[contains(text(), "Committee")]')
108 for row in c.xpath('ancestor::table[1]')[1:]:
109 name = row[0].text_content().strip()
110 role = row[1].text_content().strip()
111 leg.add_role(role, session, chamber=chamber, committee=name)
112
113 leg.add_source(url)
114
[end of openstates/az/legislators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/az/legislators.py b/openstates/az/legislators.py
--- a/openstates/az/legislators.py
+++ b/openstates/az/legislators.py
@@ -1,8 +1,7 @@
-from billy.scrape import NoDataForPeriod
from billy.scrape.legislators import LegislatorScraper, Legislator
from lxml import html
+import re
-import re, datetime
class AZLegislatorScraper(LegislatorScraper):
jurisdiction = 'az'
@@ -80,30 +79,30 @@
+ "\nPhoenix, AZ 85007"
phone = phone.text_content().strip()
- if not phone.startswith('602'):
+ if '602' not in re.findall(r'(\d+)', phone):
phone = "602-" + phone
leg = Legislator(term, chamber, district, full_name=name,
- party=party, url=link,
- photo_url=photo_url)
+ party=party, url=link,
+ photo_url=photo_url)
leg.add_office('capitol', 'Capitol Office', address=address,
phone=phone, email=email)
if position:
- leg.add_role( position, term, chamber=chamber,
+ leg.add_role(position, term, chamber=chamber,
district=district, party=party)
leg.add_source(url)
- #Probably just get this from the committee scraper
- #self.scrape_member_page(link, session, chamber, leg)
+ # Probably just get this from the committee scraper
+ # self.scrape_member_page(link, session, chamber, leg)
self.save_legislator(leg)
def scrape_member_page(self, url, session, chamber, leg):
html = self.get(url).text
root = html.fromstring(html)
- #get the committee membership
+ # get the committee membership
c = root.xpath('//td/div/strong[contains(text(), "Committee")]')
for row in c.xpath('ancestor::table[1]')[1:]:
name = row[0].text_content().strip()
| {"golden_diff": "diff --git a/openstates/az/legislators.py b/openstates/az/legislators.py\n--- a/openstates/az/legislators.py\n+++ b/openstates/az/legislators.py\n@@ -1,8 +1,7 @@\n-from billy.scrape import NoDataForPeriod\n from billy.scrape.legislators import LegislatorScraper, Legislator\n from lxml import html\n+import re\n \n-import re, datetime\n \n class AZLegislatorScraper(LegislatorScraper):\n jurisdiction = 'az'\n@@ -80,30 +79,30 @@\n + \"\\nPhoenix, AZ 85007\"\n \n phone = phone.text_content().strip()\n- if not phone.startswith('602'):\n+ if '602' not in re.findall(r'(\\d+)', phone):\n phone = \"602-\" + phone\n \n leg = Legislator(term, chamber, district, full_name=name,\n- party=party, url=link,\n- photo_url=photo_url)\n+ party=party, url=link,\n+ photo_url=photo_url)\n \n leg.add_office('capitol', 'Capitol Office', address=address,\n phone=phone, email=email)\n \n if position:\n- leg.add_role( position, term, chamber=chamber,\n+ leg.add_role(position, term, chamber=chamber,\n district=district, party=party)\n \n leg.add_source(url)\n \n- #Probably just get this from the committee scraper\n- #self.scrape_member_page(link, session, chamber, leg)\n+ # Probably just get this from the committee scraper\n+ # self.scrape_member_page(link, session, chamber, leg)\n self.save_legislator(leg)\n \n def scrape_member_page(self, url, session, chamber, leg):\n html = self.get(url).text\n root = html.fromstring(html)\n- #get the committee membership\n+ # get the committee membership\n c = root.xpath('//td/div/strong[contains(text(), \"Committee\")]')\n for row in c.xpath('ancestor::table[1]')[1:]:\n name = row[0].text_content().strip()\n", "issue": "AZ Legislator with the following id has an invalid phone number AZL000372\nState: AZ (be sure to include in ticket title)\r\n\r\nThis repository is for issues with state data, for feature requests, etc.\r\nplease visit the contributor guide (see above message) to file the issue in the correct place.\r\n\n", "before_files": [{"content": "from billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom lxml import html\n\nimport re, datetime\n\nclass AZLegislatorScraper(LegislatorScraper):\n jurisdiction = 'az'\n parties = {\n 'R': 'Republican',\n 'D': 'Democratic',\n 'L': 'Libertarian',\n 'I': 'Independent',\n 'G': 'Green'\n }\n\n def get_party(self, abbr):\n return self.parties[abbr]\n\n def scrape(self, chamber, term):\n # TODO: old AZ scraper allowed old sessions, they seem to be gone?\n self.validate_term(term, latest_only=True)\n\n body = {'lower': 'H', 'upper': 'S'}[chamber]\n url = 'http://www.azleg.gov/MemberRoster/?body=' + body\n page = self.get(url).text\n\n # there is a bad comment closing tag on this page\n page = page.replace('--!>', '-->')\n\n root = html.fromstring(page)\n\n path = '//table//tr'\n roster = root.xpath(path)[1:]\n for row in roster:\n position = ''\n name, district, party, email, room, phone, = row.xpath('td')\n\n if email.attrib.get('class') == 'vacantmember':\n continue # Skip any vacant members.\n\n link = name.xpath('string(a/@href)')\n if len(name) == 1:\n name = name.text_content().strip()\n else:\n position = name.tail.strip()\n name = name[0].text_content().strip()\n if '--' in name:\n name = name.split('--')[0].strip()\n\n linkpage = self.get(link).text\n linkpage = linkpage.replace('--!>', '-->')\n linkroot = html.fromstring(linkpage)\n linkroot.make_links_absolute(link)\n\n photos = linkroot.xpath(\"//img[contains(@src, 'MemberPhoto')]\")\n\n if len(photos) != 1:\n self.warning('no photo on ' + link)\n photo_url = ''\n else:\n photo_url = photos[0].attrib['src']\n\n district = district.text_content()\n party = party.text_content().strip()\n email = email.text_content().strip()\n\n if email.startswith('Email: '):\n email = email.replace('Email: ', '').lower() + '@azleg.gov'\n else:\n email = ''\n\n party = self.get_party(party)\n room = room.text_content().strip()\n if chamber == 'lower':\n address = \"House of Representatives\\n\"\n else:\n address = \"Senate\\n\"\n address = address + \"1700 West Washington\\n Room \" + room \\\n + \"\\nPhoenix, AZ 85007\"\n\n phone = phone.text_content().strip()\n if not phone.startswith('602'):\n phone = \"602-\" + phone\n\n leg = Legislator(term, chamber, district, full_name=name,\n party=party, url=link,\n photo_url=photo_url)\n\n leg.add_office('capitol', 'Capitol Office', address=address,\n phone=phone, email=email)\n\n if position:\n leg.add_role( position, term, chamber=chamber,\n district=district, party=party)\n\n leg.add_source(url)\n\n #Probably just get this from the committee scraper\n #self.scrape_member_page(link, session, chamber, leg)\n self.save_legislator(leg)\n\n def scrape_member_page(self, url, session, chamber, leg):\n html = self.get(url).text\n root = html.fromstring(html)\n #get the committee membership\n c = root.xpath('//td/div/strong[contains(text(), \"Committee\")]')\n for row in c.xpath('ancestor::table[1]')[1:]:\n name = row[0].text_content().strip()\n role = row[1].text_content().strip()\n leg.add_role(role, session, chamber=chamber, committee=name)\n\n leg.add_source(url)\n", "path": "openstates/az/legislators.py"}]} | 1,755 | 492 |
gh_patches_debug_3289 | rasdani/github-patches | git_diff | mne-tools__mne-bids-272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JOSS publication
At the MNE-Sprint in Paris, @teonbrooks @jasmainak and I discussed about writing a short report on MNE-BIDS and publishing it in [JOSS](https://joss.theoj.org/about).
JOSS articles generally provide a very high level description of the software and its relevance:
> Your submission should probably be somewhere between 250-1000 words.
It would allow us to properly point to MNE-BIDS in citations and get some scholarly recognition for our work.
I suggest that we take `pybids` as an example and create a [`/paper`](https://github.com/bids-standard/pybids/tree/master/paper) directory in our repository where we prepare the submission.
Publishing at JOSS would mean that mne-bids stays separate from mne-python instead of being integrated eventually. In a short discussion with @agramfort, we all approved of this idea, because it will allow us to stay with our lightweight and "independent" repository, while users can still benefit from mne-bids by using it as a simple "module" to MNE-Python.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 """Setup MNE-BIDS."""
3 import os
4 from setuptools import setup, find_packages
5
6 # get the version
7 version = None
8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
9 for line in (line.strip() for line in fid):
10 if line.startswith('__version__'):
11 version = line.split('=')[1].strip().strip('\'')
12 break
13 if version is None:
14 raise RuntimeError('Could not determine version')
15
16
17 descr = ('An MNE project for organizing and formatting MEG and EEG data '
18 'according to the BIDS specification.')
19
20 DISTNAME = 'mne-bids'
21 DESCRIPTION = descr
22 MAINTAINER = 'Mainak Jas'
23 MAINTAINER_EMAIL = '[email protected]'
24 URL = 'https://mne.tools/mne-bids/'
25 LICENSE = 'BSD (3-clause)'
26 DOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'
27 VERSION = version
28
29 if __name__ == "__main__":
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 maintainer_email=MAINTAINER_EMAIL,
33 description=DESCRIPTION,
34 license=LICENSE,
35 url=URL,
36 version=VERSION,
37 download_url=DOWNLOAD_URL,
38 long_description=open('README.rst').read(),
39 long_description_content_type='text/x-rst',
40 classifiers=[
41 'Intended Audience :: Science/Research',
42 'Intended Audience :: Developers',
43 'License :: OSI Approved',
44 'Programming Language :: Python',
45 'Topic :: Software Development',
46 'Topic :: Scientific/Engineering',
47 'Operating System :: Microsoft :: Windows',
48 'Operating System :: POSIX',
49 'Operating System :: Unix',
50 'Operating System :: MacOS',
51 ],
52 platforms='any',
53 packages=find_packages(),
54 scripts=['bin/mne_bids'],
55 project_urls={
56 'Documentation': 'https://mne.tools/mne-bids',
57 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',
58 'Source': 'https://github.com/mne-tools/mne-bids',
59 },
60 )
61
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,8 +14,8 @@
raise RuntimeError('Could not determine version')
-descr = ('An MNE project for organizing and formatting MEG and EEG data '
- 'according to the BIDS specification.')
+descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '
+ 'specification and facilitating their analysis with MNE-Python')
DISTNAME = 'mne-bids'
DESCRIPTION = descr
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,8 +14,8 @@\n raise RuntimeError('Could not determine version')\n \n \n-descr = ('An MNE project for organizing and formatting MEG and EEG data '\n- 'according to the BIDS specification.')\n+descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n+ 'specification and facilitating their analysis with MNE-Python')\n \n DISTNAME = 'mne-bids'\n DESCRIPTION = descr\n", "issue": "JOSS publication\nAt the MNE-Sprint in Paris, @teonbrooks @jasmainak and I discussed about writing a short report on MNE-BIDS and publishing it in [JOSS](https://joss.theoj.org/about).\r\n\r\nJOSS articles generally provide a very high level description of the software and its relevance:\r\n\r\n> Your submission should probably be somewhere between 250-1000 words.\r\n\r\nIt would allow us to properly point to MNE-BIDS in citations and get some scholarly recognition for our work.\r\n\r\nI suggest that we take `pybids` as an example and create a [`/paper`](https://github.com/bids-standard/pybids/tree/master/paper) directory in our repository where we prepare the submission.\r\n\r\nPublishing at JOSS would mean that mne-bids stays separate from mne-python instead of being integrated eventually. In a short discussion with @agramfort, we all approved of this idea, because it will allow us to stay with our lightweight and \"independent\" repository, while users can still benefit from mne-bids by using it as a simple \"module\" to MNE-Python.\r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('An MNE project for organizing and formatting MEG and EEG data '\n 'according to the BIDS specification.')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids'],\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}]} | 1,344 | 127 |
gh_patches_debug_24877 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add data-base-url attribute to HTML body tag in Plone 4
Since plone 4.3.12, the `<base href` attribute in HTML generated by Plone no longer always points to the context URL as it used to prior to the change. This change broke Plone and some add-ons. More breakage may still surface. Fixes have varied because no alternative was provided when the change was made.
For a lengthy background, see the [discussion](https://community.plone.org/t/how-to-get-context-url-in-js-on-plone-4-3-12/4031).
Rather than rolling back the change which was done to support some other things and would require reverting them, I suggest providing a future-proof alternative (thanks @rodfersou for suggesting using a new attribute):
Plone 5 has removed `<base href` completely. Instead Plone 5 has added a `data-base-url` attribute to the HTML `body` tag. Which points to the context URL.
So, I suggest same be done for Plone 4. That way, anything in Plone core and/or add-ons needing context URL in Javascript have a future-proof way of getting it from here on.
@@sharing is broken on Page objects in Plone 4.3.12 and 4.3.14
## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)
### What I did:
1. Create vanilla Plone 4.3.14 site.
2. Add private Page with title Test at the top of the site
3. Navigate to sharing tab for that page
4. Type some characters into the Search box
5. Kaboom: exception
### What I expect to happen:
List of potential users as search results
### What actually happened:
Large python back trace because the search form AJAX accessed:
http://localhost:8080/Plone2/test/@@sharing/@@updateSharingInfo
rather than the following used by Plone 4.3.11:
http://localhost:8080/Plone2/test/@@updateSharingInfo
The root cause appears to be:
https://pypi.python.org/pypi/plone.app.layout/2.3.17
2.3.15 (2016-06-28)
Fixes:
_Fix base tag differs from actual URL (fixes [86](https://github.com/plone/plone.app.layout/issues/86)). [rodfersou]_
which was actually made **after** plone.app.layout 2.3.15 was released, (December 2016): that comment is placed incorrectly in the README file. I'm happy to make a bug report there as well.
### What version of Plone/ Addons I am using:
Vanilla Plone 4.3.14, which uses plone.app.layout 2.3.17. So does Plone 4.3.12, and I see exactly the same problem there.
(NB: Pinning plone.app.layout 2.3.15 in Plone 4.3.14 resolves the problem).
Update: appears to be the same issue discussed in: https://github.com/plone/Products.CMFPlone/issues/2051
</issue>
<code>
[start of Products/CMFPlone/browser/jsvariables.py]
1 from zope.i18n import translate
2 from zope.publisher.browser import BrowserView
3
4 from Products.CMFCore.utils import getToolByName
5 from Products.CMFPlone import PloneMessageFactory as _
6
7
8 TEMPLATE = """\
9 var portal_url = '%(portal_url)s';
10 var form_modified_message = '%(form_modified)s';
11 var form_resubmit_message = '%(form_resubmit)s';
12 var external_links_open_new_window = '%(open_links)s';
13 var mark_special_links = '%(mark_links)s';
14 var ajax_noresponse_message = '%(ajax_noresponse)s';
15 """
16
17 FORM_MODIFIED = _(u'text_form_modified_message',
18 default=u'Your form has not been saved. All changes you '
19 u'have made will be lost.')
20
21 FORM_RESUBMIT = _(u'text_form_resubmit_message',
22 default=u'You already clicked the submit button. Do you '
23 u'really want to submit this form again?')
24
25 AJAX_NORESPONSE = _(u'text_ajax_noresponse_message',
26 default=u'No response from server. Please try again '
27 u'later.')
28
29
30 class JSVariables(BrowserView):
31
32 def __call__(self, *args, **kwargs):
33 context = self.context
34 response = self.request.response
35 response.setHeader('content-type', 'text/javascript;;charset=utf-8')
36
37 props = getToolByName(context, 'portal_properties').site_properties
38 portal_url = getToolByName(context, 'portal_url')()
39
40 # the following are flags for mark_special_links.js
41 # links get the target="_blank" attribute
42 open_links = props.getProperty('external_links_open_new_window',
43 'false')
44 mark_links = props.getProperty('mark_special_links', 'false')
45
46 form_modified = translate(FORM_MODIFIED, context=self.request)
47 form_resubmit = translate(FORM_RESUBMIT, context=self.request)
48 ajax_noresponse = translate(AJAX_NORESPONSE, context=self.request)
49
50 # escape_for_js
51 form_modified = form_modified.replace("'", "\\'")
52 form_resubmit = form_resubmit.replace("'", "\\'")
53 ajax_noresponse = ajax_noresponse.replace("'", "\\'")
54
55 return TEMPLATE % dict(
56 portal_url=portal_url,
57 open_links=open_links,
58 mark_links=mark_links,
59 form_modified=form_modified,
60 form_resubmit=form_resubmit,
61 ajax_noresponse=ajax_noresponse,
62 )
63
[end of Products/CMFPlone/browser/jsvariables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/browser/jsvariables.py b/Products/CMFPlone/browser/jsvariables.py
--- a/Products/CMFPlone/browser/jsvariables.py
+++ b/Products/CMFPlone/browser/jsvariables.py
@@ -7,6 +7,7 @@
TEMPLATE = """\
var portal_url = '%(portal_url)s';
+var base_url = '%(base_url)s';
var form_modified_message = '%(form_modified)s';
var form_resubmit_message = '%(form_resubmit)s';
var external_links_open_new_window = '%(open_links)s';
@@ -36,6 +37,7 @@
props = getToolByName(context, 'portal_properties').site_properties
portal_url = getToolByName(context, 'portal_url')()
+ base_url = self.request['HTTP_REFERER']
# the following are flags for mark_special_links.js
# links get the target="_blank" attribute
@@ -54,6 +56,7 @@
return TEMPLATE % dict(
portal_url=portal_url,
+ base_url=base_url,
open_links=open_links,
mark_links=mark_links,
form_modified=form_modified,
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/jsvariables.py b/Products/CMFPlone/browser/jsvariables.py\n--- a/Products/CMFPlone/browser/jsvariables.py\n+++ b/Products/CMFPlone/browser/jsvariables.py\n@@ -7,6 +7,7 @@\n \n TEMPLATE = \"\"\"\\\n var portal_url = '%(portal_url)s';\n+var base_url = '%(base_url)s';\n var form_modified_message = '%(form_modified)s';\n var form_resubmit_message = '%(form_resubmit)s';\n var external_links_open_new_window = '%(open_links)s';\n@@ -36,6 +37,7 @@\n \n props = getToolByName(context, 'portal_properties').site_properties\n portal_url = getToolByName(context, 'portal_url')()\n+ base_url = self.request['HTTP_REFERER']\n \n # the following are flags for mark_special_links.js\n # links get the target=\"_blank\" attribute\n@@ -54,6 +56,7 @@\n \n return TEMPLATE % dict(\n portal_url=portal_url,\n+ base_url=base_url,\n open_links=open_links,\n mark_links=mark_links,\n form_modified=form_modified,\n", "issue": "add data-base-url attribute to HTML body tag in Plone 4\nSince plone 4.3.12, the `<base href` attribute in HTML generated by Plone no longer always points to the context URL as it used to prior to the change. This change broke Plone and some add-ons. More breakage may still surface. Fixes have varied because no alternative was provided when the change was made.\r\n\r\nFor a lengthy background, see the [discussion](https://community.plone.org/t/how-to-get-context-url-in-js-on-plone-4-3-12/4031). \r\n\r\nRather than rolling back the change which was done to support some other things and would require reverting them, I suggest providing a future-proof alternative (thanks @rodfersou for suggesting using a new attribute):\r\n\r\nPlone 5 has removed `<base href` completely. Instead Plone 5 has added a `data-base-url` attribute to the HTML `body` tag. Which points to the context URL.\r\n\r\nSo, I suggest same be done for Plone 4. That way, anything in Plone core and/or add-ons needing context URL in Javascript have a future-proof way of getting it from here on.\r\n\r\n\n@@sharing is broken on Page objects in Plone 4.3.12 and 4.3.14\n## BUG/PROBLEM REPORT (OR OTHER COMMON ISSUE)\r\n\r\n### What I did:\r\n\r\n1. Create vanilla Plone 4.3.14 site.\r\n2. Add private Page with title Test at the top of the site\r\n3. Navigate to sharing tab for that page\r\n4. Type some characters into the Search box\r\n5. Kaboom: exception\r\n\r\n### What I expect to happen:\r\n\r\nList of potential users as search results\r\n\r\n### What actually happened:\r\n\r\nLarge python back trace because the search form AJAX accessed:\r\n\r\n http://localhost:8080/Plone2/test/@@sharing/@@updateSharingInfo\r\n\r\nrather than the following used by Plone 4.3.11:\r\n\r\n http://localhost:8080/Plone2/test/@@updateSharingInfo\r\n \r\nThe root cause appears to be:\r\n\r\nhttps://pypi.python.org/pypi/plone.app.layout/2.3.17\r\n\r\n 2.3.15 (2016-06-28)\r\n Fixes:\r\n _Fix base tag differs from actual URL (fixes [86](https://github.com/plone/plone.app.layout/issues/86)). [rodfersou]_\r\n\r\nwhich was actually made **after** plone.app.layout 2.3.15 was released, (December 2016): that comment is placed incorrectly in the README file. I'm happy to make a bug report there as well.\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nVanilla Plone 4.3.14, which uses plone.app.layout 2.3.17. So does Plone 4.3.12, and I see exactly the same problem there.\r\n\r\n(NB: Pinning plone.app.layout 2.3.15 in Plone 4.3.14 resolves the problem).\r\n\r\nUpdate: appears to be the same issue discussed in: https://github.com/plone/Products.CMFPlone/issues/2051\r\n\n", "before_files": [{"content": "from zope.i18n import translate\nfrom zope.publisher.browser import BrowserView\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import PloneMessageFactory as _\n\n\nTEMPLATE = \"\"\"\\\nvar portal_url = '%(portal_url)s';\nvar form_modified_message = '%(form_modified)s';\nvar form_resubmit_message = '%(form_resubmit)s';\nvar external_links_open_new_window = '%(open_links)s';\nvar mark_special_links = '%(mark_links)s';\nvar ajax_noresponse_message = '%(ajax_noresponse)s';\n\"\"\"\n\nFORM_MODIFIED = _(u'text_form_modified_message',\n default=u'Your form has not been saved. All changes you '\n u'have made will be lost.')\n\nFORM_RESUBMIT = _(u'text_form_resubmit_message',\n default=u'You already clicked the submit button. Do you '\n u'really want to submit this form again?')\n\nAJAX_NORESPONSE = _(u'text_ajax_noresponse_message',\n default=u'No response from server. Please try again '\n u'later.')\n\n\nclass JSVariables(BrowserView):\n\n def __call__(self, *args, **kwargs):\n context = self.context\n response = self.request.response\n response.setHeader('content-type', 'text/javascript;;charset=utf-8')\n\n props = getToolByName(context, 'portal_properties').site_properties\n portal_url = getToolByName(context, 'portal_url')()\n\n # the following are flags for mark_special_links.js\n # links get the target=\"_blank\" attribute\n open_links = props.getProperty('external_links_open_new_window',\n 'false')\n mark_links = props.getProperty('mark_special_links', 'false')\n\n form_modified = translate(FORM_MODIFIED, context=self.request)\n form_resubmit = translate(FORM_RESUBMIT, context=self.request)\n ajax_noresponse = translate(AJAX_NORESPONSE, context=self.request)\n\n # escape_for_js\n form_modified = form_modified.replace(\"'\", \"\\\\'\")\n form_resubmit = form_resubmit.replace(\"'\", \"\\\\'\")\n ajax_noresponse = ajax_noresponse.replace(\"'\", \"\\\\'\")\n\n return TEMPLATE % dict(\n portal_url=portal_url,\n open_links=open_links,\n mark_links=mark_links,\n form_modified=form_modified,\n form_resubmit=form_resubmit,\n ajax_noresponse=ajax_noresponse,\n )\n", "path": "Products/CMFPlone/browser/jsvariables.py"}]} | 1,891 | 262 |
gh_patches_debug_19644 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py]
1 import pandas as pd
2
3 from great_expectations.execution_engine import (
4 PandasExecutionEngine,
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
9 ColumnAggregateMetricProvider,
10 column_aggregate_partial,
11 column_aggregate_value,
12 )
13 from great_expectations.expectations.metrics.import_manager import F, sa
14
15
16 class ColumnValuesLengthMin(ColumnAggregateMetricProvider):
17 metric_name = "column_values.length.min"
18
19 @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
20 def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
21 return column.map(len).min()
22
23 @column_aggregate_partial(
24 engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
25 )
26 def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
27 return sa.func.min(sa.func.length(column))
28
29 @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
30 def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
31 return F.min(F.length(F.col(column)))
32
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py]
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py]
1 import pandas as pd
2
3 from great_expectations.execution_engine import (
4 PandasExecutionEngine,
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
9 ColumnAggregateMetricProvider,
10 column_aggregate_partial,
11 column_aggregate_value,
12 )
13 from great_expectations.expectations.metrics.import_manager import F, sa
14
15
16 class ColumnValuesLengthMax(ColumnAggregateMetricProvider):
17 metric_name = "column_values.length.max"
18
19 @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)
20 def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:
21 return column.map(len).max()
22
23 @column_aggregate_partial(
24 engine=SqlAlchemyExecutionEngine, filter_column_isnull=True
25 )
26 def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
27 return sa.func.max(sa.func.length(column))
28
29 @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
30 def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
31 return F.max(F.length(F.col(column)))
32
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py
@@ -27,5 +27,5 @@
return sa.func.max(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
- return F.max(F.length(F.col(column)))
+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
+ return F.max(F.length(column))
diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py
@@ -27,5 +27,5 @@
return sa.func.min(sa.func.length(column))
@column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)
- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]
- return F.min(F.length(F.col(column)))
+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]
+ return F.min(F.length(column))
| {"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py\n@@ -27,5 +27,5 @@\n return sa.func.max(sa.func.length(column))\n \n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n- return F.max(F.length(F.col(column)))\n+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n+ return F.max(F.length(column))\ndiff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py\n@@ -27,5 +27,5 @@\n return sa.func.min(sa.func.length(column))\n \n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n- def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n- return F.min(F.length(F.col(column)))\n+ def _spark(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n+ return F.min(F.length(column))\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMin(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.min\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).min()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.min(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.min(F.length(F.col(column)))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_min.py"}, {"content": "import pandas as pd\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric_provider import (\n ColumnAggregateMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.import_manager import F, sa\n\n\nclass ColumnValuesLengthMax(ColumnAggregateMetricProvider):\n metric_name = \"column_values.length.max\"\n\n @column_aggregate_value(engine=PandasExecutionEngine, filter_column_isnull=True)\n def _pandas(cls, column: pd.Series, **kwargs: dict) -> int:\n return column.map(len).max()\n\n @column_aggregate_partial(\n engine=SqlAlchemyExecutionEngine, filter_column_isnull=True\n )\n def _sqlalchemy(cls, column, **kwargs: dict): # type: ignore[no-untyped-def]\n return sa.func.max(sa.func.length(column))\n\n @column_aggregate_partial(engine=SparkDFExecutionEngine, filter_column_isnull=True)\n def _spark(cls, column: str, **kwargs: dict): # type: ignore[no-untyped-def]\n return F.max(F.length(F.col(column)))\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_values_length_max.py"}]} | 1,270 | 397 |
gh_patches_debug_2335 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1841 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update `test` dependency from `nteract-scrapbook` to `scrapbook`
### Summary
Running the notebook tests generates the warning
```pytb
warnings.warn("'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.", FutureWarning)
```
as [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:
https://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42
### Additional Information
_No response_
### Code of Conduct
- [X] I agree to follow the Code of Conduct
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789
7 'tensorflow-probability>=0.11.0', # c.f. PR #1657
8 ],
9 'torch': ['torch>=1.10.0'], # c.f. PR #1657
10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501
11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567
12 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
23 extras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'scikit-hep-testdata>=0.4.11',
33 'pytest>=6.0',
34 'pytest-cov>=2.5.1',
35 'pytest-mock',
36 'requests-mock>=1.9.0',
37 'pytest-benchmark[histogram]',
38 'pytest-console-scripts',
39 'pytest-mpl',
40 'pydocstyle',
41 'papermill~=2.0',
42 'nteract-scrapbook~=0.2',
43 'jupyter',
44 'graphviz',
45 ]
46 )
47 )
48 extras_require['docs'] = sorted(
49 set(
50 extras_require['xmlio']
51 + extras_require['contrib']
52 + [
53 'sphinx>=4.0.0',
54 'sphinxcontrib-bibtex~=2.1',
55 'sphinx-click',
56 'sphinx_rtd_theme',
57 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620
58 'ipywidgets',
59 'sphinx-issues',
60 'sphinx-copybutton>=0.3.2',
61 'sphinx-togglebutton>=0.3.0',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['lint']
69 + extras_require['test']
70 + [
71 'nbdime',
72 'tbump>=6.7.0',
73 'ipython',
74 'pre-commit',
75 'check-manifest',
76 'codemetapy>=0.3.4',
77 'twine',
78 ]
79 )
80 )
81 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
82
83
84 setup(
85 extras_require=extras_require,
86 use_scm_version=lambda: {'local_scheme': lambda version: ''},
87 )
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,8 +38,8 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'papermill~=2.0',
- 'nteract-scrapbook~=0.2',
+ 'papermill~=2.3.4',
+ 'scrapbook~=0.5.0',
'jupyter',
'graphviz',
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,8 +38,8 @@\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n- 'papermill~=2.0',\n- 'nteract-scrapbook~=0.2',\n+ 'papermill~=2.3.4',\n+ 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n ]\n", "issue": "Update `test` dependency from `nteract-scrapbook` to `scrapbook`\n### Summary\n\nRunning the notebook tests generates the warning\r\n\r\n```pytb\r\nwarnings.warn(\"'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.\", FutureWarning)\r\n```\r\n\r\nas [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42\n\n### Additional Information\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,640 | 112 |
gh_patches_debug_60747 | rasdani/github-patches | git_diff | hi-primus__optimus-872 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Json file exploration/profiling
Unstructured data as JSON can not be explored as regular tabular data. I have been exploring using tree depth and count to highlight the user in which nodes could have important data.
Some work in progress, here. https://github.com/ironmussa/Optimus/blob/develop-3.0/optimus/engines/pandas/io/json.py
</issue>
<code>
[start of optimus/engines/pandas/io/json.py]
1 import glob
2
3 import pandas as pd
4 import ujson
5 from glom import glom
6
7 from optimus.infer import is_dict, is_list, is_str, is_int
8
9 META = "_meta"
10 PROPERTIES = "_properties"
11 ITEMS = "_items"
12
13 COL_DEPTH = "depth"
14
15
16 class JSON:
17 def __init__(self):
18 self.data = None
19
20 def load(self, path):
21 """
22 Load a file in JSON format
23 :param path:
24 :return:
25 """
26 all_json = glob.glob(path, recursive=True)
27 # pd.read_json("data/corona.json")
28 with open(all_json[0]) as f:
29 self.data = ujson.load(f)
30
31 def schema(self):
32 """
33 Return a JSON with the count, dtype and nested structure
34 :return:
35 """
36
37 def _schema(_data, _keys):
38 if isinstance(_data, dict):
39 for x, y in _data.items():
40 if is_dict(y):
41 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
42 if len(y) > 0:
43 _keys[x][PROPERTIES] = {}
44 _schema(y, _keys[x][PROPERTIES])
45 elif is_list(y):
46 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
47 if len(y) > 0:
48 _keys[x] = {ITEMS: {PROPERTIES: {}, META: {"count": len(y), "dtype": type(y)}}}
49 _schema(y, _keys[x][ITEMS][PROPERTIES])
50 elif is_str(y):
51 _keys[x] = {META: {"count": len(y), "dtype": type(y)}}
52 _schema(y, _keys[x])
53 elif is_int(y):
54 _keys[x] = {META: {"dtype": type(y)}}
55 _schema(y, _keys[x])
56
57 elif is_list(_data):
58 for x in _data:
59 _schema(x, _keys)
60
61 keys = {}
62 _schema(self.data, keys)
63 return keys
64
65 def freq(self, n=100):
66 """
67 Calculate the count on every dict or list in the json
68 :param n:
69 :return:
70 """
71
72 def _profile(keys, parent, result=None):
73 for key, values in keys.items():
74 if values.get(PROPERTIES):
75 _meta = values.get(META)
76 _properties = values.get(PROPERTIES)
77 elif values.get(ITEMS):
78 _meta = values.get(ITEMS).get(META)
79 _properties = values.get(ITEMS).get(PROPERTIES)
80
81 if values.get(PROPERTIES) or values.get(ITEMS):
82 result.append([key, _meta["count"], _meta["dtype"], parent, len(parent)])
83 _profile(_properties, parent + [key], result=result)
84
85 data = []
86 _profile(self.schema(), [], data)
87 df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])
88 df = df.sort_values(by=["count", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')
89 return df
90
91 def flatten(self, path):
92 """
93 Flatten a JSON from a json path
94 :param path:
95 :return:
96 """
97
98 def _flatten_json(_values):
99 out = {}
100
101 def flatten(x, name=''):
102 if type(x) is dict:
103 for a in x:
104 flatten(x[a], name + a + '_')
105 elif type(x) is list:
106 # i = 0
107 for a in x:
108 # flatten(a, name + str(i) + '_')
109 flatten(a, name + '_')
110 # i += 1
111 else:
112 out[name[:-1]] = x
113
114 flatten(_values)
115 return out
116
117 result = []
118 value = glom(self.data, path, skip_exc=KeyError)
119 if is_list(value):
120 for i in value:
121 result.append((_flatten_json(i)))
122 elif is_dict(value):
123 for i, j in value.items():
124 a = {"col": i}
125 a.update(_flatten_json(j))
126 result.append(a)
127 return result
128
129 def to_pandas(self, path):
130 result = self.flatten(path)
131 return pd.DataFrame(data=result)
132
[end of optimus/engines/pandas/io/json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optimus/engines/pandas/io/json.py b/optimus/engines/pandas/io/json.py
--- a/optimus/engines/pandas/io/json.py
+++ b/optimus/engines/pandas/io/json.py
@@ -121,7 +121,7 @@
result.append((_flatten_json(i)))
elif is_dict(value):
for i, j in value.items():
- a = {"col": i}
+ a = {path: i}
a.update(_flatten_json(j))
result.append(a)
return result
| {"golden_diff": "diff --git a/optimus/engines/pandas/io/json.py b/optimus/engines/pandas/io/json.py\n--- a/optimus/engines/pandas/io/json.py\n+++ b/optimus/engines/pandas/io/json.py\n@@ -121,7 +121,7 @@\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n- a = {\"col\": i}\n+ a = {path: i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n", "issue": "Json file exploration/profiling\nUnstructured data as JSON can not be explored as regular tabular data. I have been exploring using tree depth and count to highlight the user in which nodes could have important data.\r\n\r\nSome work in progress, here. https://github.com/ironmussa/Optimus/blob/develop-3.0/optimus/engines/pandas/io/json.py\n", "before_files": [{"content": "import glob\n\nimport pandas as pd\nimport ujson\nfrom glom import glom\n\nfrom optimus.infer import is_dict, is_list, is_str, is_int\n\nMETA = \"_meta\"\nPROPERTIES = \"_properties\"\nITEMS = \"_items\"\n\nCOL_DEPTH = \"depth\"\n\n\nclass JSON:\n def __init__(self):\n self.data = None\n\n def load(self, path):\n \"\"\"\n Load a file in JSON format\n :param path:\n :return:\n \"\"\"\n all_json = glob.glob(path, recursive=True)\n # pd.read_json(\"data/corona.json\")\n with open(all_json[0]) as f:\n self.data = ujson.load(f)\n\n def schema(self):\n \"\"\"\n Return a JSON with the count, dtype and nested structure\n :return:\n \"\"\"\n\n def _schema(_data, _keys):\n if isinstance(_data, dict):\n for x, y in _data.items():\n if is_dict(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x][PROPERTIES] = {}\n _schema(y, _keys[x][PROPERTIES])\n elif is_list(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n if len(y) > 0:\n _keys[x] = {ITEMS: {PROPERTIES: {}, META: {\"count\": len(y), \"dtype\": type(y)}}}\n _schema(y, _keys[x][ITEMS][PROPERTIES])\n elif is_str(y):\n _keys[x] = {META: {\"count\": len(y), \"dtype\": type(y)}}\n _schema(y, _keys[x])\n elif is_int(y):\n _keys[x] = {META: {\"dtype\": type(y)}}\n _schema(y, _keys[x])\n\n elif is_list(_data):\n for x in _data:\n _schema(x, _keys)\n\n keys = {}\n _schema(self.data, keys)\n return keys\n\n def freq(self, n=100):\n \"\"\"\n Calculate the count on every dict or list in the json\n :param n:\n :return:\n \"\"\"\n\n def _profile(keys, parent, result=None):\n for key, values in keys.items():\n if values.get(PROPERTIES):\n _meta = values.get(META)\n _properties = values.get(PROPERTIES)\n elif values.get(ITEMS):\n _meta = values.get(ITEMS).get(META)\n _properties = values.get(ITEMS).get(PROPERTIES)\n\n if values.get(PROPERTIES) or values.get(ITEMS):\n result.append([key, _meta[\"count\"], _meta[\"dtype\"], parent, len(parent)])\n _profile(_properties, parent + [key], result=result)\n\n data = []\n _profile(self.schema(), [], data)\n df = pd.DataFrame(data, columns=['key', 'count', 'dtype', 'path', COL_DEPTH])\n df = df.sort_values(by=[\"count\", COL_DEPTH], ascending=[False, True]).head(n).to_dict(orient='row')\n return df\n\n def flatten(self, path):\n \"\"\"\n Flatten a JSON from a json path\n :param path:\n :return:\n \"\"\"\n\n def _flatten_json(_values):\n out = {}\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + '_')\n elif type(x) is list:\n # i = 0\n for a in x:\n # flatten(a, name + str(i) + '_')\n flatten(a, name + '_')\n # i += 1\n else:\n out[name[:-1]] = x\n\n flatten(_values)\n return out\n\n result = []\n value = glom(self.data, path, skip_exc=KeyError)\n if is_list(value):\n for i in value:\n result.append((_flatten_json(i)))\n elif is_dict(value):\n for i, j in value.items():\n a = {\"col\": i}\n a.update(_flatten_json(j))\n result.append(a)\n return result\n\n def to_pandas(self, path):\n result = self.flatten(path)\n return pd.DataFrame(data=result)\n", "path": "optimus/engines/pandas/io/json.py"}]} | 1,866 | 129 |
gh_patches_debug_8107 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2643 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transparent mode fail with looking up failure.
##### Steps to reproduce the problem:
1. Launch Wifi Access Point(OS X)
2. Setup pfctl configuration so that http packet will be forwarded.
3. Launch mitmproxy ( `sudo mitmproxy -p 8080 -m transparent --showhost` )
4. Access web page after connecting to AP which launched before.
5. See event log.
##### Any other comments? What have you tried so far?
When I tried to use transparent mode with OS X(10.11.6).
RuntimeError("Could not resolve original destination.") raised.
I investigated this bug.
And I found that this is caused by difference between AF_INET's and AF_INET6's peername.
https://github.com/mitmproxy/mitmproxy/blob/de006ea8adc08b9a8a6aa94eda2b30468727c307/mitmproxy/net/tcp.py#L567
If we use AF_INET, getpeername() return string like `"192.168.2.5:45670"`.
But if we use AF_INET6, getpeername() return string like `"::ffff:192.168.2.5:45670"`.
`pfctl -s state` 's result is like below.
`ALL tcp 192.168.2.5:45670 -> xx.xx.xx.xx:33291 -> xx.xx.xx.xx:443 ESTABLISHED:ESTABLISHED`
As you see, `::ffff:` doesn't exist.
So [lookup](https://github.com/mitmproxy/mitmproxy/blob/f17c0fdac636f7269f4885294e2a8d2c52c23590/mitmproxy/platform/pf.py#L4) function raises RuntimeError() because `spec in i` condition won't become true.
##### System information
Mitmproxy version: 3.0.0 (release version)
Python version: 3.6.2
Platform: Darwin-15.6.0-x86_64-i386-64bit
SSL version: OpenSSL 1.0.2l 25 May 2017
Mac version: 10.11.6 ('', '', '') x86_64
</issue>
<code>
[start of mitmproxy/platform/pf.py]
1 import sys
2
3
4 def lookup(address, port, s):
5 """
6 Parse the pfctl state output s, to look up the destination host
7 matching the client (address, port).
8
9 Returns an (address, port) tuple, or None.
10 """
11 s = s.decode()
12 spec = "%s:%s" % (address, port)
13 for i in s.split("\n"):
14 if "ESTABLISHED:ESTABLISHED" in i and spec in i:
15 s = i.split()
16 if len(s) > 4:
17 if sys.platform.startswith("freebsd"):
18 # strip parentheses for FreeBSD pfctl
19 s = s[3][1:-1].split(":")
20 else:
21 s = s[4].split(":")
22
23 if len(s) == 2:
24 return s[0], int(s[1])
25 raise RuntimeError("Could not resolve original destination.")
26
[end of mitmproxy/platform/pf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/platform/pf.py b/mitmproxy/platform/pf.py
--- a/mitmproxy/platform/pf.py
+++ b/mitmproxy/platform/pf.py
@@ -1,3 +1,4 @@
+import re
import sys
@@ -8,6 +9,9 @@
Returns an (address, port) tuple, or None.
"""
+ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.
+ # Those still appear as "127.0.0.1" in the table, so we need to strip the prefix.
+ address = re.sub("^::ffff:(?=\d+.\d+.\d+.\d+$)", "", address)
s = s.decode()
spec = "%s:%s" % (address, port)
for i in s.split("\n"):
| {"golden_diff": "diff --git a/mitmproxy/platform/pf.py b/mitmproxy/platform/pf.py\n--- a/mitmproxy/platform/pf.py\n+++ b/mitmproxy/platform/pf.py\n@@ -1,3 +1,4 @@\n+import re\n import sys\n \n \n@@ -8,6 +9,9 @@\n \n Returns an (address, port) tuple, or None.\n \"\"\"\n+ # We may get an ipv4-mapped ipv6 address here, e.g. ::ffff:127.0.0.1.\n+ # Those still appear as \"127.0.0.1\" in the table, so we need to strip the prefix.\n+ address = re.sub(\"^::ffff:(?=\\d+.\\d+.\\d+.\\d+$)\", \"\", address)\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n", "issue": "Transparent mode fail with looking up failure.\n##### Steps to reproduce the problem:\r\n\r\n1. Launch Wifi Access Point(OS X)\r\n2. Setup pfctl configuration so that http packet will be forwarded.\r\n3. Launch mitmproxy ( `sudo mitmproxy -p 8080 -m transparent --showhost` )\r\n4. Access web page after connecting to AP which launched before.\r\n5. See event log.\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nWhen I tried to use transparent mode with OS X(10.11.6).\r\nRuntimeError(\"Could not resolve original destination.\") raised.\r\n\r\nI investigated this bug.\r\nAnd I found that this is caused by difference between AF_INET's and AF_INET6's peername.\r\nhttps://github.com/mitmproxy/mitmproxy/blob/de006ea8adc08b9a8a6aa94eda2b30468727c307/mitmproxy/net/tcp.py#L567\r\n\r\nIf we use AF_INET, getpeername() return string like `\"192.168.2.5:45670\"`.\r\nBut if we use AF_INET6, getpeername() return string like `\"::ffff:192.168.2.5:45670\"`.\r\n\r\n`pfctl -s state` 's result is like below.\r\n`ALL tcp 192.168.2.5:45670 -> xx.xx.xx.xx:33291 -> xx.xx.xx.xx:443 ESTABLISHED:ESTABLISHED`\r\n\r\nAs you see, `::ffff:` doesn't exist.\r\n\r\nSo [lookup](https://github.com/mitmproxy/mitmproxy/blob/f17c0fdac636f7269f4885294e2a8d2c52c23590/mitmproxy/platform/pf.py#L4) function raises RuntimeError() because `spec in i` condition won't become true.\r\n\r\n##### System information\r\n\r\nMitmproxy version: 3.0.0 (release version)\r\nPython version: 3.6.2\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2l 25 May 2017\r\nMac version: 10.11.6 ('', '', '') x86_64\n", "before_files": [{"content": "import sys\n\n\ndef lookup(address, port, s):\n \"\"\"\n Parse the pfctl state output s, to look up the destination host\n matching the client (address, port).\n\n Returns an (address, port) tuple, or None.\n \"\"\"\n s = s.decode()\n spec = \"%s:%s\" % (address, port)\n for i in s.split(\"\\n\"):\n if \"ESTABLISHED:ESTABLISHED\" in i and spec in i:\n s = i.split()\n if len(s) > 4:\n if sys.platform.startswith(\"freebsd\"):\n # strip parentheses for FreeBSD pfctl\n s = s[3][1:-1].split(\":\")\n else:\n s = s[4].split(\":\")\n\n if len(s) == 2:\n return s[0], int(s[1])\n raise RuntimeError(\"Could not resolve original destination.\")\n", "path": "mitmproxy/platform/pf.py"}]} | 1,313 | 203 |
gh_patches_debug_42436 | rasdani/github-patches | git_diff | conan-io__conan-center-index-15293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] spix/0.5
### Package Name/Version
spix/0.5
### Changelog
https://github.com/faaxm/spix/releases/tag/v0.5
### Context about the new update
I will push a PR for this version
</issue>
<code>
[start of recipes/spix/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file
4 from conan.tools.build import check_min_cppstd
5 from conan.tools.scm import Version
6 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
7 import os
8
9
10 required_conan_version = ">=1.52.0"
11
12
13 class SpixConan(ConanFile):
14 name = "spix"
15 description = "UI test automation library for QtQuick/QML Apps"
16 license = "MIT"
17 url = "https://github.com/conan-io/conan-center-index"
18 homepage = "https://github.com/faaxm/spix"
19 topics = ("automation", "qt", "qml", "qt-quick", "qt5", "qtquick", "automated-testing", "qt-qml", "qml-applications")
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "shared": [True, False],
23 "fPIC": [True, False],
24 }
25 default_options = {
26 "shared": False,
27 "fPIC": True,
28 }
29
30 @property
31 def _minimum_cpp_standard(self):
32 return 14
33
34 @property
35 def _compilers_minimum_version(self):
36 return {
37 "Visual Studio": "14",
38 "gcc": "5",
39 "clang": "3.4",
40 "apple-clang": "10"
41 }
42
43 def export_sources(self):
44 export_conandata_patches(self)
45
46 def config_options(self):
47 if self.settings.os == "Windows":
48 del self.options.fPIC
49
50 def configure(self):
51 if self.options.shared:
52 try:
53 del self.options.fPIC
54 except Exception:
55 pass
56
57 def layout(self):
58 cmake_layout(self, src_folder="src")
59
60 def requirements(self):
61 self.requires("anyrpc/1.0.2")
62 self.requires("qt/6.3.1")
63 self.requires("expat/2.4.9")
64
65 def validate(self):
66 if self.info.settings.compiler.cppstd:
67 check_min_cppstd(self, self._minimum_cpp_standard)
68 minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
69 if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
70 raise ConanInvalidConfiguration(
71 f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
72 )
73
74 if Version(self.dependencies["qt"].ref.version).major == 6 and not self.options["qt"].qtshadertools:
75 raise ConanInvalidConfiguration(f"{self.ref} requires qt:qtshadertools to get the Quick module")
76 if not (self.options["qt"].gui and self.options["qt"].qtdeclarative):
77 raise ConanInvalidConfiguration(f"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module")
78
79 def source(self):
80 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
81
82 def generate(self):
83 tc = CMakeToolchain(self)
84 tc.variables["SPIX_BUILD_EXAMPLES"] = False
85 tc.variables["SPIX_BUILD_TESTS"] = False
86 tc.variables["SPIX_QT_MAJOR"] = Version(self.dependencies["qt"].ref.version).major
87 tc.generate()
88
89 deps = CMakeDeps(self)
90 deps.generate()
91
92 def _patch_sources(self):
93 apply_conandata_patches(self)
94 if Version(self.deps_cpp_info["qt"].version).major == 6:
95 replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "set(CMAKE_CXX_STANDARD 14)", "set(CMAKE_CXX_STANDARD 17)")
96
97 def build(self):
98 self._patch_sources()
99 cmake = CMake(self)
100 cmake.configure()
101 cmake.build()
102
103 def package(self):
104 copy(self, pattern="LICENSE.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
105 cmake = CMake(self)
106 cmake.install()
107
108 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
109 rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
110 rmdir(self, os.path.join(self.package_folder, "share"))
111 rm(self, "*.la", os.path.join(self.package_folder, "lib"))
112 rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
113 rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
114
115 def package_info(self):
116 self.cpp_info.libs = ["Spix"]
117 self.cpp_info.set_property("cmake_file_name", "Spix")
118 self.cpp_info.set_property("cmake_target_name", "Spix::Spix")
119
120 # TODO remove once conan v2 removed cmake_find_package_*
121 self.cpp_info.names["cmake_find_package"] = "Spix"
122 self.cpp_info.names["cmake_find_package_multi"] = "Spix"
123
[end of recipes/spix/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/spix/all/conanfile.py b/recipes/spix/all/conanfile.py
--- a/recipes/spix/all/conanfile.py
+++ b/recipes/spix/all/conanfile.py
@@ -7,7 +7,7 @@
import os
-required_conan_version = ">=1.52.0"
+required_conan_version = ">=1.53.0"
class SpixConan(ConanFile):
@@ -29,16 +29,26 @@
@property
def _minimum_cpp_standard(self):
- return 14
+ return 14 if self.version == "0.4" else 17
@property
def _compilers_minimum_version(self):
- return {
- "Visual Studio": "14",
- "gcc": "5",
- "clang": "3.4",
- "apple-clang": "10"
- }
+ if self.version == "0.4":
+ return {
+ "Visual Studio": "14",
+ "msvc": "190",
+ "gcc": "5",
+ "clang": "3.4",
+ "apple-clang": "10"
+ }
+ else:
+ return {
+ "Visual Studio": "15.7",
+ "msvc": "192", # FIXME: 15.7 is actually 1914 but needs to be tested
+ "gcc": "7",
+ "clang": "5",
+ "apple-clang": "10",
+ }
def export_sources(self):
export_conandata_patches(self)
@@ -49,24 +59,20 @@
def configure(self):
if self.options.shared:
- try:
- del self.options.fPIC
- except Exception:
- pass
+ self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("anyrpc/1.0.2")
- self.requires("qt/6.3.1")
- self.requires("expat/2.4.9")
+ self.requires("qt/6.4.2")
def validate(self):
- if self.info.settings.compiler.cppstd:
+ if self.settings.compiler.cppstd:
check_min_cppstd(self, self._minimum_cpp_standard)
- minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)
- if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:
+ minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
+ if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support."
)
@@ -77,7 +83,7 @@
raise ConanInvalidConfiguration(f"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module")
def source(self):
- get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
+ get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
@@ -87,11 +93,13 @@
tc.generate()
deps = CMakeDeps(self)
+ deps.set_property("anyrpc", "cmake_file_name", "AnyRPC")
+ deps.set_property("anyrpc", "cmake_target_name", "AnyRPC::anyrpc")
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
- if Version(self.deps_cpp_info["qt"].version).major == 6:
+ if self.version == "0.4" and Version(self.dependencies["qt"].ref.version).major == 6:
replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "set(CMAKE_CXX_STANDARD 14)", "set(CMAKE_CXX_STANDARD 17)")
def build(self):
| {"golden_diff": "diff --git a/recipes/spix/all/conanfile.py b/recipes/spix/all/conanfile.py\n--- a/recipes/spix/all/conanfile.py\n+++ b/recipes/spix/all/conanfile.py\n@@ -7,7 +7,7 @@\n import os\n \n \n-required_conan_version = \">=1.52.0\"\n+required_conan_version = \">=1.53.0\"\n \n \n class SpixConan(ConanFile):\n@@ -29,16 +29,26 @@\n \n @property\n def _minimum_cpp_standard(self):\n- return 14\n+ return 14 if self.version == \"0.4\" else 17\n \n @property\n def _compilers_minimum_version(self):\n- return {\n- \"Visual Studio\": \"14\",\n- \"gcc\": \"5\",\n- \"clang\": \"3.4\",\n- \"apple-clang\": \"10\"\n- }\n+ if self.version == \"0.4\":\n+ return {\n+ \"Visual Studio\": \"14\",\n+ \"msvc\": \"190\",\n+ \"gcc\": \"5\",\n+ \"clang\": \"3.4\",\n+ \"apple-clang\": \"10\"\n+ }\n+ else:\n+ return {\n+ \"Visual Studio\": \"15.7\",\n+ \"msvc\": \"192\", # FIXME: 15.7 is actually 1914 but needs to be tested\n+ \"gcc\": \"7\",\n+ \"clang\": \"5\",\n+ \"apple-clang\": \"10\",\n+ }\n \n def export_sources(self):\n export_conandata_patches(self)\n@@ -49,24 +59,20 @@\n \n def configure(self):\n if self.options.shared:\n- try:\n- del self.options.fPIC\n- except Exception:\n- pass\n+ self.options.rm_safe(\"fPIC\")\n \n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n \n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n- self.requires(\"qt/6.3.1\")\n- self.requires(\"expat/2.4.9\")\n+ self.requires(\"qt/6.4.2\")\n \n def validate(self):\n- if self.info.settings.compiler.cppstd:\n+ if self.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n- minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)\n- if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:\n+ minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n+ if minimum_version and Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n@@ -77,7 +83,7 @@\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n \n def source(self):\n- get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n+ get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n \n def generate(self):\n tc = CMakeToolchain(self)\n@@ -87,11 +93,13 @@\n tc.generate()\n \n deps = CMakeDeps(self)\n+ deps.set_property(\"anyrpc\", \"cmake_file_name\", \"AnyRPC\")\n+ deps.set_property(\"anyrpc\", \"cmake_target_name\", \"AnyRPC::anyrpc\")\n deps.generate()\n \n def _patch_sources(self):\n apply_conandata_patches(self)\n- if Version(self.deps_cpp_info[\"qt\"].version).major == 6:\n+ if self.version == \"0.4\" and Version(self.dependencies[\"qt\"].ref.version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n \n def build(self):\n", "issue": "[request] spix/0.5\n### Package Name/Version\n\nspix/0.5\n\n### Changelog\n\nhttps://github.com/faaxm/spix/releases/tag/v0.5\n\n### Context about the new update\n\nI will push a PR for this version\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir, replace_in_file\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.scm import Version\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nimport os\n\n\nrequired_conan_version = \">=1.52.0\"\n\n\nclass SpixConan(ConanFile):\n name = \"spix\"\n description = \"UI test automation library for QtQuick/QML Apps\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/faaxm/spix\"\n topics = (\"automation\", \"qt\", \"qml\", \"qt-quick\", \"qt5\", \"qtquick\", \"automated-testing\", \"qt-qml\", \"qml-applications\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _minimum_cpp_standard(self):\n return 14\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"Visual Studio\": \"14\",\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\"\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n try:\n del self.options.fPIC\n except Exception:\n pass\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def requirements(self):\n self.requires(\"anyrpc/1.0.2\")\n self.requires(\"qt/6.3.1\")\n self.requires(\"expat/2.4.9\")\n \n def validate(self):\n if self.info.settings.compiler.cppstd:\n check_min_cppstd(self, self._minimum_cpp_standard)\n minimum_version = self._compilers_minimum_version.get(str(self.info.settings.compiler), False)\n if minimum_version and Version(self.info.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++{self._minimum_cpp_standard}, which your compiler does not support.\"\n )\n\n if Version(self.dependencies[\"qt\"].ref.version).major == 6 and not self.options[\"qt\"].qtshadertools:\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:qtshadertools to get the Quick module\")\n if not (self.options[\"qt\"].gui and self.options[\"qt\"].qtdeclarative):\n raise ConanInvalidConfiguration(f\"{self.ref} requires qt:gui and qt:qtdeclarative to get the Quick module\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"SPIX_BUILD_EXAMPLES\"] = False\n tc.variables[\"SPIX_BUILD_TESTS\"] = False\n tc.variables[\"SPIX_QT_MAJOR\"] = Version(self.dependencies[\"qt\"].ref.version).major\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n if Version(self.deps_cpp_info[\"qt\"].version).major == 6:\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"set(CMAKE_CXX_STANDARD 14)\", \"set(CMAKE_CXX_STANDARD 17)\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, pattern=\"LICENSE.txt\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n cmake = CMake(self)\n cmake.install()\n\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"lib\"))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"Spix\"]\n self.cpp_info.set_property(\"cmake_file_name\", \"Spix\") \n self.cpp_info.set_property(\"cmake_target_name\", \"Spix::Spix\")\n \n # TODO remove once conan v2 removed cmake_find_package_*\n self.cpp_info.names[\"cmake_find_package\"] = \"Spix\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Spix\"\n", "path": "recipes/spix/all/conanfile.py"}]} | 1,995 | 939 |
gh_patches_debug_28026 | rasdani/github-patches | git_diff | bridgecrewio__checkov-961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive on CKV_AWS_59 - APIGatewayAuthorization ignores API keys
**Describe the bug**
**CKV_AWS_59** assumes the API is open to the public if authorization is **NONE**. However, API keys are another option on REST APIs and are configured separately. See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-setup-api-key-with-console.html.
**To Reproduce**
The following terraform code will cause a test failure, which appears to be against the spirit of the rule:
```terraform
resource "aws_api_gateway_method" "POST" {
...
authorization = NONE
api_key_required = true
...
}
````
**Expected behavior**
I would expect this configuration to be considered secure.
**Desktop (please complete the following information):**
- OS: linux/amd64 Docker image bridgecrew/checkov:1.0.833
- Checkov Version 1.0.833
</issue>
<code>
[start of checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
3
4 class APIGatewayAuthorization(BaseResourceCheck):
5
6 def __init__(self):
7 name = "Ensure there is no open access to back-end resources through API"
8 id = "CKV_AWS_59"
9 supported_resources = ['AWS::ApiGateway::Method']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'Properties' in conf.keys():
15 if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():
16 if conf['Properties']['HttpMethod'] != "OPTIONS" and conf['Properties']['AuthorizationType'] == "NONE":
17 return CheckResult.FAILED
18 return CheckResult.PASSED
19
20 check = APIGatewayAuthorization()
21
[end of checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py]
[start of checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class APIGatewayAuthorization(BaseResourceCheck):
6
7 def __init__(self):
8 name = "Ensure there is no open access to back-end resources through API"
9 id = "CKV_AWS_59"
10 supported_resources = ['aws_api_gateway_method']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 self.evaluated_keys = ['http_method', 'authorization']
16 if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE":
17 return CheckResult.FAILED
18 return CheckResult.PASSED
19
20
21 check = APIGatewayAuthorization()
22
[end of checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
--- a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
+++ b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py
@@ -14,7 +14,8 @@
if 'Properties' in conf.keys():
if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():
if conf['Properties']['HttpMethod'] != "OPTIONS" and conf['Properties']['AuthorizationType'] == "NONE":
- return CheckResult.FAILED
+ if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:
+ return CheckResult.FAILED
return CheckResult.PASSED
check = APIGatewayAuthorization()
diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
--- a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
+++ b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py
@@ -12,8 +12,8 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
- self.evaluated_keys = ['http_method', 'authorization']
- if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE":
+ self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']
+ if conf['http_method'][0] != "OPTIONS" and conf['authorization'][0] == "NONE" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):
return CheckResult.FAILED
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n--- a/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n+++ b/checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py\n@@ -14,7 +14,8 @@\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n- return CheckResult.FAILED\n+ if 'ApiKeyRequired' not in conf['Properties'].keys() or conf['Properties']['ApiKeyRequired'] == False:\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n \n check = APIGatewayAuthorization()\ndiff --git a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n--- a/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n+++ b/checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py\n@@ -12,8 +12,8 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n- self.evaluated_keys = ['http_method', 'authorization']\n- if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\":\n+ self.evaluated_keys = ['http_method', 'authorization', 'api_key_required']\n+ if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\" and ('api_key_required' not in conf or conf['api_key_required'][0] == False):\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "False positive on CKV_AWS_59 - APIGatewayAuthorization ignores API keys\n**Describe the bug**\r\n**CKV_AWS_59** assumes the API is open to the public if authorization is **NONE**. However, API keys are another option on REST APIs and are configured separately. See https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-setup-api-key-with-console.html.\r\n\r\n**To Reproduce**\r\nThe following terraform code will cause a test failure, which appears to be against the spirit of the rule:\r\n```terraform\r\nresource \"aws_api_gateway_method\" \"POST\" {\r\n...\r\nauthorization = NONE\r\napi_key_required = true\r\n...\r\n}\r\n````\r\n\r\n**Expected behavior**\r\nI would expect this configuration to be considered secure. \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux/amd64 Docker image bridgecrew/checkov:1.0.833\r\n - Checkov Version 1.0.833\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['AWS::ApiGateway::Method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'HttpMethod' in conf['Properties'].keys() and 'AuthorizationType' in conf['Properties'].keys():\n if conf['Properties']['HttpMethod'] != \"OPTIONS\" and conf['Properties']['AuthorizationType'] == \"NONE\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/cloudformation/checks/resource/aws/APIGatewayAuthorization.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass APIGatewayAuthorization(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure there is no open access to back-end resources through API\"\n id = \"CKV_AWS_59\"\n supported_resources = ['aws_api_gateway_method']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = ['http_method', 'authorization']\n if conf['http_method'][0] != \"OPTIONS\" and conf['authorization'][0] == \"NONE\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = APIGatewayAuthorization()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAuthorization.py"}]} | 1,256 | 413 |
gh_patches_debug_6386 | rasdani/github-patches | git_diff | huggingface__huggingface_hub-757 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chore: Updated the pillow version specifier
Hello there :wave:
Following up on #755, I believe the core build requirements were meant to be updated as well. This aligns the version specifier of Pillow between the core build and the "tests" extra build!
Any feedback is welcome!
cc @osanseviero @Narsil
</issue>
<code>
[start of api-inference-community/setup.py]
1 from setuptools import setup
2
3
4 setup(
5 name="api_inference_community",
6 version="0.0.21",
7 description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
8 url="http://github.com/huggingface/api-inference-community",
9 author="Nicolas Patry",
10 author_email="[email protected]",
11 license="MIT",
12 packages=["api_inference_community"],
13 python_requires=">=3.6.0",
14 zip_safe=False,
15 install_requires=list(line for line in open("requirements.txt", "r")),
16 extras_require={
17 "test": [
18 "httpx>=0.18",
19 "Pillow>=8.2",
20 "httpx>=0.18",
21 "torch>=1.9.0",
22 "pytest>=6.2",
23 ]
24 },
25 )
26
[end of api-inference-community/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api-inference-community/setup.py b/api-inference-community/setup.py
--- a/api-inference-community/setup.py
+++ b/api-inference-community/setup.py
@@ -3,7 +3,7 @@
setup(
name="api_inference_community",
- version="0.0.21",
+ version="0.0.23",
description="A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub",
url="http://github.com/huggingface/api-inference-community",
author="Nicolas Patry",
| {"golden_diff": "diff --git a/api-inference-community/setup.py b/api-inference-community/setup.py\n--- a/api-inference-community/setup.py\n+++ b/api-inference-community/setup.py\n@@ -3,7 +3,7 @@\n \n setup(\n name=\"api_inference_community\",\n- version=\"0.0.21\",\n+ version=\"0.0.23\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n", "issue": "chore: Updated the pillow version specifier\nHello there :wave: \r\n\r\nFollowing up on #755, I believe the core build requirements were meant to be updated as well. This aligns the version specifier of Pillow between the core build and the \"tests\" extra build!\r\n\r\nAny feedback is welcome!\r\n\r\ncc @osanseviero @Narsil \n", "before_files": [{"content": "from setuptools import setup\n\n\nsetup(\n name=\"api_inference_community\",\n version=\"0.0.21\",\n description=\"A package with helper tools to build an API Inference docker app for Hugging Face API inference using huggingface_hub\",\n url=\"http://github.com/huggingface/api-inference-community\",\n author=\"Nicolas Patry\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=[\"api_inference_community\"],\n python_requires=\">=3.6.0\",\n zip_safe=False,\n install_requires=list(line for line in open(\"requirements.txt\", \"r\")),\n extras_require={\n \"test\": [\n \"httpx>=0.18\",\n \"Pillow>=8.2\",\n \"httpx>=0.18\",\n \"torch>=1.9.0\",\n \"pytest>=6.2\",\n ]\n },\n)\n", "path": "api-inference-community/setup.py"}]} | 852 | 131 |
gh_patches_debug_23307 | rasdani/github-patches | git_diff | pypa__virtualenv-1730 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken activation in Windows for python3
virtualenv activation through `activate_this.py` is broken in Windows for python3. Check this very simple piece of code
```
import os
import subprocess
import sys
from distutils.spawn import find_executable
venv = find_executable("virtualenv")
testdir = os.path.join(os.path.curdir, 'testenv')
subprocess.check_output((venv, testdir, "-p", sys.executable))
bin_path = os.path.join(testdir, "Scripts") if sys.platform in ("win32", "cygwin") else os.path.join(testdir, "bin")
path = os.path.join(bin_path, "activate_this.py")
with open(path) as f:
exec(f.read(), {"__file__": path})
```
This generates a `AttributeError: 'str' object has no attribute 'decode'`. Taking a look at the `activate_this.py` code:
```
for lib in "..\Lib\site-packages".split(os.pathsep):
path = os.path.realpath(os.path.join(bin_dir, lib))
site.addsitedir(path.decode("utf-8") if "yes" else path)
```
it's indeed normal that we get the error because we're always calling `decode()` over a str. Question is where the `__DECODE_PATH__` from https://github.com/pypa/virtualenv/blob/master/src/virtualenv/activation/python/activate_this.py#L28 is assigned to `"yes"`?
</issue>
<code>
[start of src/virtualenv/activation/python/__init__.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 from collections import OrderedDict
5
6 from virtualenv.info import WIN_CPYTHON_2
7 from virtualenv.util.path import Path
8 from virtualenv.util.six import ensure_text
9
10 from ..via_template import ViaTemplateActivator
11
12
13 class PythonActivator(ViaTemplateActivator):
14 def templates(self):
15 yield Path("activate_this.py")
16
17 def replacements(self, creator, dest_folder):
18 replacements = super(PythonActivator, self).replacements(creator, dest_folder)
19 lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
20 replacements.update(
21 {
22 "__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
23 "__DECODE_PATH__": ("yes" if WIN_CPYTHON_2 else ""),
24 }
25 )
26 return replacements
27
28 @staticmethod
29 def _repr_unicode(creator, value):
30 py2 = creator.interpreter.version_info.major == 2
31 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
32 value = ensure_text(repr(value.encode("utf-8"))[1:-1])
33 return value
34
[end of src/virtualenv/activation/python/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py
--- a/src/virtualenv/activation/python/__init__.py
+++ b/src/virtualenv/activation/python/__init__.py
@@ -3,7 +3,6 @@
import os
from collections import OrderedDict
-from virtualenv.info import WIN_CPYTHON_2
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_text
@@ -17,10 +16,11 @@
def replacements(self, creator, dest_folder):
replacements = super(PythonActivator, self).replacements(creator, dest_folder)
lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
+ win_py2 = creator.interpreter.platform == "win32" and creator.interpreter.version_info.major == 2
replacements.update(
{
"__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
- "__DECODE_PATH__": ("yes" if WIN_CPYTHON_2 else ""),
+ "__DECODE_PATH__": ("yes" if win_py2 else ""),
}
)
return replacements
| {"golden_diff": "diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py\n--- a/src/virtualenv/activation/python/__init__.py\n+++ b/src/virtualenv/activation/python/__init__.py\n@@ -3,7 +3,6 @@\n import os\n from collections import OrderedDict\n \n-from virtualenv.info import WIN_CPYTHON_2\n from virtualenv.util.path import Path\n from virtualenv.util.six import ensure_text\n \n@@ -17,10 +16,11 @@\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n+ win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n- \"__DECODE_PATH__\": (\"yes\" if WIN_CPYTHON_2 else \"\"),\n+ \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n", "issue": "Broken activation in Windows for python3\nvirtualenv activation through `activate_this.py` is broken in Windows for python3. Check this very simple piece of code\r\n\r\n```\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom distutils.spawn import find_executable\r\n\r\nvenv = find_executable(\"virtualenv\")\r\ntestdir = os.path.join(os.path.curdir, 'testenv')\r\n\r\nsubprocess.check_output((venv, testdir, \"-p\", sys.executable))\r\n\r\nbin_path = os.path.join(testdir, \"Scripts\") if sys.platform in (\"win32\", \"cygwin\") else os.path.join(testdir, \"bin\")\r\n\r\npath = os.path.join(bin_path, \"activate_this.py\")\r\nwith open(path) as f:\r\n exec(f.read(), {\"__file__\": path})\r\n```\r\n\r\nThis generates a `AttributeError: 'str' object has no attribute 'decode'`. Taking a look at the `activate_this.py` code:\r\n\r\n```\r\nfor lib in \"..\\Lib\\site-packages\".split(os.pathsep):\r\n path = os.path.realpath(os.path.join(bin_dir, lib))\r\n site.addsitedir(path.decode(\"utf-8\") if \"yes\" else path)\r\n```\r\n\r\nit's indeed normal that we get the error because we're always calling `decode()` over a str. Question is where the `__DECODE_PATH__` from https://github.com/pypa/virtualenv/blob/master/src/virtualenv/activation/python/activate_this.py#L28 is assigned to `\"yes\"`?\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.info import WIN_CPYTHON_2\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if WIN_CPYTHON_2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}]} | 1,194 | 267 |
gh_patches_debug_25720 | rasdani/github-patches | git_diff | cal-itp__benefits-1343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configure a Sentry denylist
Looks like we can configure a `denylist` on `EventScrubber` when calling `sentry_sdk.init`: https://docs.sentry.io/platforms/python/data-management/sensitive-data/
Another more general approach to modifying events is to configure a `before_send` function: https://docs.sentry.io/platforms/python/configuration/filtering/
_Originally posted by @angela-tran in https://github.com/cal-itp/benefits/issues/1334#issuecomment-1490416579_
</issue>
<code>
[start of benefits/sentry.py]
1 from benefits import VERSION
2 import sentry_sdk
3 from sentry_sdk.integrations.django import DjangoIntegration
4 import shutil
5 import os
6 import subprocess
7
8
9 SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT", "local")
10
11
12 def git_available():
13 return bool(shutil.which("git"))
14
15
16 # https://stackoverflow.com/a/24584384/358804
17 def is_git_directory(path="."):
18 dev_null = open(os.devnull, "w")
19 return subprocess.call(["git", "-C", path, "status"], stderr=dev_null, stdout=dev_null) == 0
20
21
22 # https://stackoverflow.com/a/21901260/358804
23 def get_git_revision_hash():
24 return subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii").strip()
25
26
27 def get_sha_file_path():
28 current_file = os.path.dirname(os.path.abspath(__file__))
29 return os.path.join(current_file, "..", "static", "sha.txt")
30
31
32 def get_sha_from_file():
33 sha_path = get_sha_file_path()
34 if os.path.isfile(sha_path):
35 with open(sha_path) as f:
36 return f.read().strip()
37 else:
38 return None
39
40
41 def get_release() -> str:
42 """Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION."""
43
44 if git_available() and is_git_directory():
45 return get_git_revision_hash()
46 else:
47 sha = get_sha_from_file()
48 if sha:
49 return sha
50 else:
51 # one of the above *should* always be available, but including this just in case
52 return VERSION
53
54
55 def configure():
56 SENTRY_DSN = os.environ.get("SENTRY_DSN")
57 if SENTRY_DSN:
58 release = get_release()
59 print(f"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...")
60
61 # https://docs.sentry.io/platforms/python/configuration/
62 sentry_sdk.init(
63 dsn=SENTRY_DSN,
64 integrations=[
65 DjangoIntegration(),
66 ],
67 traces_sample_rate=1.0,
68 environment=SENTRY_ENVIRONMENT,
69 release=release,
70 in_app_include=["benefits"],
71 )
72 else:
73 print("SENTRY_DSN not set, so won't send events")
74
[end of benefits/sentry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/sentry.py b/benefits/sentry.py
--- a/benefits/sentry.py
+++ b/benefits/sentry.py
@@ -1,10 +1,13 @@
-from benefits import VERSION
-import sentry_sdk
-from sentry_sdk.integrations.django import DjangoIntegration
import shutil
import os
import subprocess
+import sentry_sdk
+from sentry_sdk.integrations.django import DjangoIntegration
+from sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST
+
+from benefits import VERSION
+
SENTRY_ENVIRONMENT = os.environ.get("SENTRY_ENVIRONMENT", "local")
@@ -52,6 +55,12 @@
return VERSION
+def get_denylist():
+ # custom denylist
+ denylist = DEFAULT_DENYLIST + ["sub", "name"]
+ return denylist
+
+
def configure():
SENTRY_DSN = os.environ.get("SENTRY_DSN")
if SENTRY_DSN:
@@ -68,6 +77,10 @@
environment=SENTRY_ENVIRONMENT,
release=release,
in_app_include=["benefits"],
+ # send_default_pii must be False (the default) for a custom EventScrubber/denylist
+ # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber
+ send_default_pii=False,
+ event_scrubber=EventScrubber(denylist=get_denylist()),
)
else:
print("SENTRY_DSN not set, so won't send events")
| {"golden_diff": "diff --git a/benefits/sentry.py b/benefits/sentry.py\n--- a/benefits/sentry.py\n+++ b/benefits/sentry.py\n@@ -1,10 +1,13 @@\n-from benefits import VERSION\n-import sentry_sdk\n-from sentry_sdk.integrations.django import DjangoIntegration\n import shutil\n import os\n import subprocess\n \n+import sentry_sdk\n+from sentry_sdk.integrations.django import DjangoIntegration\n+from sentry_sdk.scrubber import EventScrubber, DEFAULT_DENYLIST\n+\n+from benefits import VERSION\n+\n \n SENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n \n@@ -52,6 +55,12 @@\n return VERSION\n \n \n+def get_denylist():\n+ # custom denylist\n+ denylist = DEFAULT_DENYLIST + [\"sub\", \"name\"]\n+ return denylist\n+\n+\n def configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n@@ -68,6 +77,10 @@\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n+ # send_default_pii must be False (the default) for a custom EventScrubber/denylist\n+ # https://docs.sentry.io/platforms/python/data-management/sensitive-data/#event_scrubber\n+ send_default_pii=False,\n+ event_scrubber=EventScrubber(denylist=get_denylist()),\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "issue": "Configure a Sentry denylist\nLooks like we can configure a `denylist` on `EventScrubber` when calling `sentry_sdk.init`: https://docs.sentry.io/platforms/python/data-management/sensitive-data/\r\n\r\nAnother more general approach to modifying events is to configure a `before_send` function: https://docs.sentry.io/platforms/python/configuration/filtering/\r\n\r\n_Originally posted by @angela-tran in https://github.com/cal-itp/benefits/issues/1334#issuecomment-1490416579_\r\n \n", "before_files": [{"content": "from benefits import VERSION\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\nimport shutil\nimport os\nimport subprocess\n\n\nSENTRY_ENVIRONMENT = os.environ.get(\"SENTRY_ENVIRONMENT\", \"local\")\n\n\ndef git_available():\n return bool(shutil.which(\"git\"))\n\n\n# https://stackoverflow.com/a/24584384/358804\ndef is_git_directory(path=\".\"):\n dev_null = open(os.devnull, \"w\")\n return subprocess.call([\"git\", \"-C\", path, \"status\"], stderr=dev_null, stdout=dev_null) == 0\n\n\n# https://stackoverflow.com/a/21901260/358804\ndef get_git_revision_hash():\n return subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"]).decode(\"ascii\").strip()\n\n\ndef get_sha_file_path():\n current_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(current_file, \"..\", \"static\", \"sha.txt\")\n\n\ndef get_sha_from_file():\n sha_path = get_sha_file_path()\n if os.path.isfile(sha_path):\n with open(sha_path) as f:\n return f.read().strip()\n else:\n return None\n\n\ndef get_release() -> str:\n \"\"\"Returns the first available: the SHA from Git, the value from sha.txt, or the VERSION.\"\"\"\n\n if git_available() and is_git_directory():\n return get_git_revision_hash()\n else:\n sha = get_sha_from_file()\n if sha:\n return sha\n else:\n # one of the above *should* always be available, but including this just in case\n return VERSION\n\n\ndef configure():\n SENTRY_DSN = os.environ.get(\"SENTRY_DSN\")\n if SENTRY_DSN:\n release = get_release()\n print(f\"Enabling Sentry for environment '{SENTRY_ENVIRONMENT}', release '{release}'...\")\n\n # https://docs.sentry.io/platforms/python/configuration/\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[\n DjangoIntegration(),\n ],\n traces_sample_rate=1.0,\n environment=SENTRY_ENVIRONMENT,\n release=release,\n in_app_include=[\"benefits\"],\n )\n else:\n print(\"SENTRY_DSN not set, so won't send events\")\n", "path": "benefits/sentry.py"}]} | 1,308 | 356 |
gh_patches_debug_14180 | rasdani/github-patches | git_diff | pre-commit__pre-commit-622 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unstaged files + never ran pre-commit => "No such file or directory: .../.cache/pre-commit/patch..."
```
$ pre-commit run
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.
An unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
Check the log at /home/asottile/.cache/pre-commit/pre-commit.log
```
Stacktrace:
```python
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 44, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 231, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 249, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py", line 46, in staged_files_only
with io.open(patch_filename, 'wb') as patch_file:
IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
```
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os.path
7 import time
8
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11
12
13 logger = logging.getLogger('pre_commit')
14
15
16 def _git_apply(patch):
17 args = ('apply', '--whitespace=nowarn', patch)
18 try:
19 cmd_output('git', *args, encoding=None)
20 except CalledProcessError:
21 # Retry with autocrlf=false -- see #570
22 cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)
23
24
25 @contextlib.contextmanager
26 def staged_files_only(patch_dir):
27 """Clear any unstaged changes from the git working directory inside this
28 context.
29 """
30 # Determine if there are unstaged files
31 tree = cmd_output('git', 'write-tree')[1].strip()
32 retcode, diff_stdout_binary, _ = cmd_output(
33 'git', 'diff-index', '--ignore-submodules', '--binary',
34 '--exit-code', '--no-color', '--no-ext-diff', tree, '--',
35 retcode=None,
36 encoding=None,
37 )
38 if retcode and diff_stdout_binary.strip():
39 patch_filename = 'patch{}'.format(int(time.time()))
40 patch_filename = os.path.join(patch_dir, patch_filename)
41 logger.warning('Unstaged files detected.')
42 logger.info(
43 'Stashing unstaged files to {}.'.format(patch_filename),
44 )
45 # Save the current unstaged changes as a patch
46 with io.open(patch_filename, 'wb') as patch_file:
47 patch_file.write(diff_stdout_binary)
48
49 # Clear the working directory of unstaged changes
50 cmd_output('git', 'checkout', '--', '.')
51 try:
52 yield
53 finally:
54 # Try to apply the patch we saved
55 try:
56 _git_apply(patch_filename)
57 except CalledProcessError:
58 logger.warning(
59 'Stashed changes conflicted with hook auto-fixes... '
60 'Rolling back fixes...',
61 )
62 # We failed to apply the patch, presumably due to fixes made
63 # by hooks.
64 # Roll back the changes made by hooks.
65 cmd_output('git', 'checkout', '--', '.')
66 _git_apply(patch_filename)
67 logger.info('Restored changes from {}.'.format(patch_filename))
68 else:
69 # There weren't any staged files so we don't need to do anything
70 # special
71 yield
72
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -8,6 +8,7 @@
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
+from pre_commit.util import mkdirp
logger = logging.getLogger('pre_commit')
@@ -43,6 +44,7 @@
'Stashing unstaged files to {}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
+ mkdirp(patch_dir)
with io.open(patch_filename, 'wb') as patch_file:
patch_file.write(diff_stdout_binary)
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -8,6 +8,7 @@\n \n from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n+from pre_commit.util import mkdirp\n \n \n logger = logging.getLogger('pre_commit')\n@@ -43,6 +44,7 @@\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n+ mkdirp(patch_dir)\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n", "issue": "Unstaged files + never ran pre-commit => \"No such file or directory: .../.cache/pre-commit/patch...\"\n```\r\n$ pre-commit run\r\n[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /home/asottile/.cache/pre-commit/patch1505686307.\r\nAn unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'\r\nCheck the log at /home/asottile/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\nStacktrace:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/error_handler.py\", line 44, in error_handler\r\n yield\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/main.py\", line 231, in main\r\n return run(runner, args)\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/commands/run.py\", line 249, in run\r\n with ctx:\r\n File \"/usr/lib/python2.7/contextlib.py\", line 17, in __enter__\r\n return self.gen.next()\r\n File \"/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py\", line 46, in staged_files_only\r\n with io.open(patch_filename, 'wb') as patch_file:\r\nIOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport time\n\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output('git', *args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)\n\n\[email protected]\ndef staged_files_only(patch_dir):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = 'patch{}'.format(int(time.time()))\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_output('git', 'checkout', '--', '.')\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output('git', 'checkout', '--', '.')\n _git_apply(patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,554 | 156 |
gh_patches_debug_36746 | rasdani/github-patches | git_diff | DataDog__dd-agent-2965 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support ECDSA for ssh_check
ssh_check.py is not support ECDSA ssh key.
paramiko is support ECDSA ssh key.
http://docs.paramiko.org/en/1.17/api/keys.html#module-paramiko.ecdsakey
I changes ssh_key.py, but It's not working.
```
2016-05-08 17:56:27 JST | ERROR | dd.collector | checks.ssh_check(__init__.py:763) | Check 'ssh_check' instance #0 failed
Traceback (most recent call last):
File "/opt/datadog-agent/agent/checks/__init__.py", line 746, in run
self.check(copy.deepcopy(instance))
File "/opt/datadog-agent/agent/checks.d/ssh_check.py", line 70, in check
password=conf.password, pkey=private_key)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py", line 307, in connect
look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)
File "/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py", line 519, in _auth
raise saved_exception
AuthenticationException: Authentication failed.
```
</issue>
<code>
[start of checks.d/ssh_check.py]
1 # (C) Datadog, Inc. 2010-2016
2 # All rights reserved
3 # Licensed under Simplified BSD License (see LICENSE)
4
5 # stdlib
6 from collections import namedtuple
7 import time
8
9 # 3p
10 import paramiko
11
12 # project
13 from checks import AgentCheck
14
15
16 class CheckSSH(AgentCheck):
17
18 OPTIONS = [
19 ('host', True, None, str),
20 ('port', False, 22, int),
21 ('username', True, None, str),
22 ('password', False, None, str),
23 ('private_key_file', False, None, str),
24 ('sftp_check', False, True, bool),
25 ('add_missing_keys', False, False, bool),
26 ]
27
28 Config = namedtuple('Config', [
29 'host',
30 'port',
31 'username',
32 'password',
33 'private_key_file',
34 'sftp_check',
35 'add_missing_keys',
36 ])
37
38 def _load_conf(self, instance):
39 params = []
40 for option, required, default, expected_type in self.OPTIONS:
41 value = instance.get(option)
42 if required and (not value or type(value)) != expected_type :
43 raise Exception("Please specify a valid {0}".format(option))
44
45 if value is None or type(value) != expected_type:
46 self.log.debug("Bad or missing value for {0} parameter. Using default".format(option))
47 value = default
48
49 params.append(value)
50 return self.Config._make(params)
51
52 def check(self, instance):
53 conf = self._load_conf(instance)
54 tags = ["instance:{0}-{1}".format(conf.host, conf.port)]
55
56 private_key = None
57 try:
58 private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
59 except IOError:
60 self.warning("Unable to find private key file: {}".format(conf.private_key_file))
61 except paramiko.ssh_exception.PasswordRequiredException:
62 self.warning("Private key file is encrypted but no password was given")
63 except paramiko.ssh_exception.SSHException:
64 self.warning("Private key file is invalid")
65
66 client = paramiko.SSHClient()
67 if conf.add_missing_keys:
68 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
69 client.load_system_host_keys()
70
71 exception_message = None
72 #Service Availability to check status of SSH
73 try:
74 client.connect(conf.host, port=conf.port, username=conf.username,
75 password=conf.password, pkey=private_key)
76 self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
77 message=exception_message)
78
79 except Exception as e:
80 exception_message = str(e)
81 status = AgentCheck.CRITICAL
82 self.service_check('ssh.can_connect', status, tags=tags,
83 message=exception_message)
84 if conf.sftp_check:
85 self.service_check('sftp.can_connect', status, tags=tags,
86 message=exception_message)
87 raise
88
89 #Service Availability to check status of SFTP
90 if conf.sftp_check:
91 try:
92 sftp = client.open_sftp()
93 #Check response time of SFTP
94 start_time = time.time()
95 sftp.listdir('.')
96 status = AgentCheck.OK
97 end_time = time.time()
98 time_taken = end_time - start_time
99 self.gauge('sftp.response_time', time_taken, tags=tags)
100
101 except Exception as e:
102 exception_message = str(e)
103 status = AgentCheck.CRITICAL
104
105 if exception_message is None:
106 exception_message = "No errors occured"
107
108 self.service_check('sftp.can_connect', status, tags=tags,
109 message=exception_message)
110
[end of checks.d/ssh_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py
--- a/checks.d/ssh_check.py
+++ b/checks.d/ssh_check.py
@@ -21,6 +21,7 @@
('username', True, None, str),
('password', False, None, str),
('private_key_file', False, None, str),
+ ('private_key_type', False, 'rsa', str),
('sftp_check', False, True, bool),
('add_missing_keys', False, False, bool),
]
@@ -31,6 +32,7 @@
'username',
'password',
'private_key_file',
+ 'private_key_type',
'sftp_check',
'add_missing_keys',
])
@@ -55,7 +57,10 @@
private_key = None
try:
- private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
+ if conf.private_key_type == 'ecdsa':
+ private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)
+ else:
+ private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)
except IOError:
self.warning("Unable to find private key file: {}".format(conf.private_key_file))
except paramiko.ssh_exception.PasswordRequiredException:
@@ -69,11 +74,11 @@
client.load_system_host_keys()
exception_message = None
- #Service Availability to check status of SSH
+ # Service Availability to check status of SSH
try:
client.connect(conf.host, port=conf.port, username=conf.username,
password=conf.password, pkey=private_key)
- self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,
message=exception_message)
except Exception as e:
@@ -86,7 +91,7 @@
message=exception_message)
raise
- #Service Availability to check status of SFTP
+ # Service Availability to check status of SFTP
if conf.sftp_check:
try:
sftp = client.open_sftp()
| {"golden_diff": "diff --git a/checks.d/ssh_check.py b/checks.d/ssh_check.py\n--- a/checks.d/ssh_check.py\n+++ b/checks.d/ssh_check.py\n@@ -21,6 +21,7 @@\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n+ ('private_key_type', False, 'rsa', str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n@@ -31,6 +32,7 @@\n 'username',\n 'password',\n 'private_key_file',\n+ 'private_key_type',\n 'sftp_check',\n 'add_missing_keys',\n ])\n@@ -55,7 +57,10 @@\n \n private_key = None\n try:\n- private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n+ if conf.private_key_type == 'ecdsa':\n+ private_key = paramiko.ECDSAKey.from_private_key_file(conf.private_key_file)\n+ else:\n+ private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n@@ -69,11 +74,11 @@\n client.load_system_host_keys()\n \n exception_message = None\n- #Service Availability to check status of SSH\n+ # Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n- self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n+ self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n \n except Exception as e:\n@@ -86,7 +91,7 @@\n message=exception_message)\n raise\n \n- #Service Availability to check status of SFTP\n+ # Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n", "issue": "Support ECDSA for ssh_check\nssh_check.py is not support ECDSA ssh key.\nparamiko is support ECDSA ssh key.\nhttp://docs.paramiko.org/en/1.17/api/keys.html#module-paramiko.ecdsakey\n\nI changes ssh_key.py, but It's not working.\n\n```\n2016-05-08 17:56:27 JST | ERROR | dd.collector | checks.ssh_check(__init__.py:763) | Check 'ssh_check' instance #0 failed\nTraceback (most recent call last):\n File \"/opt/datadog-agent/agent/checks/__init__.py\", line 746, in run\n self.check(copy.deepcopy(instance))\n File \"/opt/datadog-agent/agent/checks.d/ssh_check.py\", line 70, in check\n password=conf.password, pkey=private_key)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py\", line 307, in connect\n look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host)\n File \"/opt/datadog-agent/embedded/lib/python2.7/site-packages/paramiko/client.py\", line 519, in _auth\n raise saved_exception\nAuthenticationException: Authentication failed.\n```\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nfrom collections import namedtuple\nimport time\n\n# 3p\nimport paramiko\n\n# project\nfrom checks import AgentCheck\n\n\nclass CheckSSH(AgentCheck):\n\n OPTIONS = [\n ('host', True, None, str),\n ('port', False, 22, int),\n ('username', True, None, str),\n ('password', False, None, str),\n ('private_key_file', False, None, str),\n ('sftp_check', False, True, bool),\n ('add_missing_keys', False, False, bool),\n ]\n\n Config = namedtuple('Config', [\n 'host',\n 'port',\n 'username',\n 'password',\n 'private_key_file',\n 'sftp_check',\n 'add_missing_keys',\n ])\n\n def _load_conf(self, instance):\n params = []\n for option, required, default, expected_type in self.OPTIONS:\n value = instance.get(option)\n if required and (not value or type(value)) != expected_type :\n raise Exception(\"Please specify a valid {0}\".format(option))\n\n if value is None or type(value) != expected_type:\n self.log.debug(\"Bad or missing value for {0} parameter. Using default\".format(option))\n value = default\n\n params.append(value)\n return self.Config._make(params)\n\n def check(self, instance):\n conf = self._load_conf(instance)\n tags = [\"instance:{0}-{1}\".format(conf.host, conf.port)]\n\n private_key = None\n try:\n private_key = paramiko.RSAKey.from_private_key_file(conf.private_key_file)\n except IOError:\n self.warning(\"Unable to find private key file: {}\".format(conf.private_key_file))\n except paramiko.ssh_exception.PasswordRequiredException:\n self.warning(\"Private key file is encrypted but no password was given\")\n except paramiko.ssh_exception.SSHException:\n self.warning(\"Private key file is invalid\")\n\n client = paramiko.SSHClient()\n if conf.add_missing_keys:\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.load_system_host_keys()\n\n exception_message = None\n #Service Availability to check status of SSH\n try:\n client.connect(conf.host, port=conf.port, username=conf.username,\n password=conf.password, pkey=private_key)\n self.service_check('ssh.can_connect', AgentCheck.OK, tags=tags,\n message=exception_message)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n self.service_check('ssh.can_connect', status, tags=tags,\n message=exception_message)\n if conf.sftp_check:\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n raise\n\n #Service Availability to check status of SFTP\n if conf.sftp_check:\n try:\n sftp = client.open_sftp()\n #Check response time of SFTP\n start_time = time.time()\n sftp.listdir('.')\n status = AgentCheck.OK\n end_time = time.time()\n time_taken = end_time - start_time\n self.gauge('sftp.response_time', time_taken, tags=tags)\n\n except Exception as e:\n exception_message = str(e)\n status = AgentCheck.CRITICAL\n\n if exception_message is None:\n exception_message = \"No errors occured\"\n\n self.service_check('sftp.can_connect', status, tags=tags,\n message=exception_message)\n", "path": "checks.d/ssh_check.py"}]} | 1,859 | 497 |
gh_patches_debug_2494 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-951 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Projectcontainer active projects count broken
https://mein.berlin.de/projects/stadtforum-berlin-wohnen/
shows `7 of 4` active projects.
</issue>
<code>
[start of meinberlin/apps/projectcontainers/models.py]
1 from django.db import models
2 from django.utils import timezone
3 from django.utils.translation import ugettext_lazy as _
4
5 from adhocracy4.projects import models as project_models
6
7
8 class ProjectContainer(project_models.Project):
9 projects = models.ManyToManyField(
10 project_models.Project,
11 related_name='containers',
12 verbose_name=_('Projects')
13 )
14
15 @property
16 def not_archived_projects(self):
17 return self.projects.filter(is_archived=False)
18
19 @property
20 def active_projects(self):
21 now = timezone.now()
22 return self.projects.filter(
23 module__phase__start_date__lte=now,
24 module__phase__end_date__gt=now)
25
26 @property
27 def phases(self):
28 from adhocracy4.phases import models as phase_models
29 return phase_models.Phase.objects\
30 .filter(module__project__containers__id=self.id)
31
[end of meinberlin/apps/projectcontainers/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/projectcontainers/models.py b/meinberlin/apps/projectcontainers/models.py
--- a/meinberlin/apps/projectcontainers/models.py
+++ b/meinberlin/apps/projectcontainers/models.py
@@ -21,7 +21,7 @@
now = timezone.now()
return self.projects.filter(
module__phase__start_date__lte=now,
- module__phase__end_date__gt=now)
+ module__phase__end_date__gt=now).distinct()
@property
def phases(self):
| {"golden_diff": "diff --git a/meinberlin/apps/projectcontainers/models.py b/meinberlin/apps/projectcontainers/models.py\n--- a/meinberlin/apps/projectcontainers/models.py\n+++ b/meinberlin/apps/projectcontainers/models.py\n@@ -21,7 +21,7 @@\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n- module__phase__end_date__gt=now)\n+ module__phase__end_date__gt=now).distinct()\n \n @property\n def phases(self):\n", "issue": "Projectcontainer active projects count broken\nhttps://mein.berlin.de/projects/stadtforum-berlin-wohnen/\r\n\r\nshows `7 of 4` active projects.\n", "before_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.projects import models as project_models\n\n\nclass ProjectContainer(project_models.Project):\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='containers',\n verbose_name=_('Projects')\n )\n\n @property\n def not_archived_projects(self):\n return self.projects.filter(is_archived=False)\n\n @property\n def active_projects(self):\n now = timezone.now()\n return self.projects.filter(\n module__phase__start_date__lte=now,\n module__phase__end_date__gt=now)\n\n @property\n def phases(self):\n from adhocracy4.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project__containers__id=self.id)\n", "path": "meinberlin/apps/projectcontainers/models.py"}]} | 816 | 123 |
gh_patches_debug_589 | rasdani/github-patches | git_diff | pex-tool__pex-1377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.43
On the docket:
+ [x] Support more verbose output for interpreter info. (#1347)
+ [x] Fix Pex emitting warnings about its Pip PEX venv. (#1351)
+ [x] Fix execution modes. (#1353)
+ [x] Warn for PEX env vars unsupported by venv. (#1354)
+ [x] Do not suppress pex output in bidst_pex (#1358)
+ [x] Using --platform manylinux2010 includes pyarrow wheel for manylinux2014 #1355
+ [x] Fix --no-manylinux. #1365
+ [x] Environment markers are incorrectly evaluated for --platform resolves. #1366
+ [x] Pex probes wheel metadata incorrectly. #1375
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.42"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.42"
+__version__ = "2.1.43"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.42\"\n+__version__ = \"2.1.43\"\n", "issue": "Release 2.1.43\nOn the docket:\r\n+ [x] Support more verbose output for interpreter info. (#1347) \r\n+ [x] Fix Pex emitting warnings about its Pip PEX venv. (#1351)\r\n+ [x] Fix execution modes. (#1353) \r\n+ [x] Warn for PEX env vars unsupported by venv. (#1354)\r\n+ [x] Do not suppress pex output in bidst_pex (#1358)\r\n+ [x] Using --platform manylinux2010 includes pyarrow wheel for manylinux2014 #1355\r\n+ [x] Fix --no-manylinux. #1365\r\n+ [x] Environment markers are incorrectly evaluated for --platform resolves. #1366\r\n+ [x] Pex probes wheel metadata incorrectly. #1375\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.42\"\n", "path": "pex/version.py"}]} | 779 | 97 |
gh_patches_debug_16783 | rasdani/github-patches | git_diff | kivy__python-for-android-2842 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
libzmq recipy build fail
### Logs
```
[1m[90m[DEBUG][39m[0m: CXX src/src_libzmq_la-router.lo
[1m[90m[DEBUG][39m[0m: In file included from src/mtrie.cpp:32:
[1m[90m[DEBUG][39m[0m: ./src/generic_mtrie_impl.hpp:52:46: error: ISO C++ requires the name after '::~' to be found in the same scope as the name before '::~' [-Werror,-Wdtor-name]
[1m[90m[DEBUG][39m[0m: template <typename T> zmq::generic_mtrie_t<T>::~generic_mtrie_t ()
[1m[90m[DEBUG][39m[0m: ~~~~~~~~~~~~~~~~~~~~~~~^~
[1m[90m[DEBUG][39m[0m: ::generic_mtrie_t
[1m[90m[DEBUG][39m[0m: CXX src/src_libzmq_la-scatter.lo
[1m[90m[DEBUG][39m[0m: 1 error generated.
```
</issue>
<code>
[start of pythonforandroid/recipes/libzmq/__init__.py]
1 from pythonforandroid.recipe import Recipe
2 from pythonforandroid.logger import shprint
3 from pythonforandroid.util import current_directory
4 from os.path import join
5 import sh
6
7
8 class LibZMQRecipe(Recipe):
9 version = '4.3.2'
10 url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'
11 depends = []
12 built_libraries = {'libzmq.so': 'src/.libs'}
13 need_stl_shared = True
14
15 def build_arch(self, arch):
16 env = self.get_recipe_env(arch)
17 #
18 # libsodium_recipe = Recipe.get_recipe('libsodium', self.ctx)
19 # libsodium_dir = libsodium_recipe.get_build_dir(arch.arch)
20 # env['sodium_CFLAGS'] = '-I{}'.format(join(
21 # libsodium_dir, 'src'))
22 # env['sodium_LDLAGS'] = '-L{}'.format(join(
23 # libsodium_dir, 'src', 'libsodium', '.libs'))
24
25 curdir = self.get_build_dir(arch.arch)
26 prefix = join(curdir, "install")
27
28 with current_directory(curdir):
29 bash = sh.Command('sh')
30 shprint(
31 bash, './configure',
32 '--host={}'.format(arch.command_prefix),
33 '--without-documentation',
34 '--prefix={}'.format(prefix),
35 '--with-libsodium=no',
36 '--disable-libunwind',
37 _env=env)
38 shprint(sh.make, _env=env)
39 shprint(sh.make, 'install', _env=env)
40
41
42 recipe = LibZMQRecipe()
43
[end of pythonforandroid/recipes/libzmq/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/libzmq/__init__.py b/pythonforandroid/recipes/libzmq/__init__.py
--- a/pythonforandroid/recipes/libzmq/__init__.py
+++ b/pythonforandroid/recipes/libzmq/__init__.py
@@ -6,7 +6,7 @@
class LibZMQRecipe(Recipe):
- version = '4.3.2'
+ version = '4.3.4'
url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'
depends = []
built_libraries = {'libzmq.so': 'src/.libs'}
@@ -34,6 +34,7 @@
'--prefix={}'.format(prefix),
'--with-libsodium=no',
'--disable-libunwind',
+ '--disable-Werror',
_env=env)
shprint(sh.make, _env=env)
shprint(sh.make, 'install', _env=env)
| {"golden_diff": "diff --git a/pythonforandroid/recipes/libzmq/__init__.py b/pythonforandroid/recipes/libzmq/__init__.py\n--- a/pythonforandroid/recipes/libzmq/__init__.py\n+++ b/pythonforandroid/recipes/libzmq/__init__.py\n@@ -6,7 +6,7 @@\n \n \n class LibZMQRecipe(Recipe):\n- version = '4.3.2'\n+ version = '4.3.4'\n url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'\n depends = []\n built_libraries = {'libzmq.so': 'src/.libs'}\n@@ -34,6 +34,7 @@\n '--prefix={}'.format(prefix),\n '--with-libsodium=no',\n '--disable-libunwind',\n+ '--disable-Werror',\n _env=env)\n shprint(sh.make, _env=env)\n shprint(sh.make, 'install', _env=env)\n", "issue": "libzmq recipy build fail\n\r\n\r\n### Logs\r\n\r\n```\r\n[1m[90m[DEBUG][39m[0m: \t CXX src/src_libzmq_la-router.lo\r\n[1m[90m[DEBUG][39m[0m: \tIn file included from src/mtrie.cpp:32:\r\n[1m[90m[DEBUG][39m[0m: \t./src/generic_mtrie_impl.hpp:52:46: error: ISO C++ requires the name after '::~' to be found in the same scope as the name before '::~' [-Werror,-Wdtor-name]\r\n[1m[90m[DEBUG][39m[0m: \ttemplate <typename T> zmq::generic_mtrie_t<T>::~generic_mtrie_t ()\r\n[1m[90m[DEBUG][39m[0m: \t ~~~~~~~~~~~~~~~~~~~~~~~^~\r\n[1m[90m[DEBUG][39m[0m: \t ::generic_mtrie_t\r\n[1m[90m[DEBUG][39m[0m: \t CXX src/src_libzmq_la-scatter.lo\r\n[1m[90m[DEBUG][39m[0m: \t1 error generated.\r\n```\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import Recipe\nfrom pythonforandroid.logger import shprint\nfrom pythonforandroid.util import current_directory\nfrom os.path import join\nimport sh\n\n\nclass LibZMQRecipe(Recipe):\n version = '4.3.2'\n url = 'https://github.com/zeromq/libzmq/releases/download/v{version}/zeromq-{version}.zip'\n depends = []\n built_libraries = {'libzmq.so': 'src/.libs'}\n need_stl_shared = True\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n #\n # libsodium_recipe = Recipe.get_recipe('libsodium', self.ctx)\n # libsodium_dir = libsodium_recipe.get_build_dir(arch.arch)\n # env['sodium_CFLAGS'] = '-I{}'.format(join(\n # libsodium_dir, 'src'))\n # env['sodium_LDLAGS'] = '-L{}'.format(join(\n # libsodium_dir, 'src', 'libsodium', '.libs'))\n\n curdir = self.get_build_dir(arch.arch)\n prefix = join(curdir, \"install\")\n\n with current_directory(curdir):\n bash = sh.Command('sh')\n shprint(\n bash, './configure',\n '--host={}'.format(arch.command_prefix),\n '--without-documentation',\n '--prefix={}'.format(prefix),\n '--with-libsodium=no',\n '--disable-libunwind',\n _env=env)\n shprint(sh.make, _env=env)\n shprint(sh.make, 'install', _env=env)\n\n\nrecipe = LibZMQRecipe()\n", "path": "pythonforandroid/recipes/libzmq/__init__.py"}]} | 1,271 | 224 |
gh_patches_debug_10174 | rasdani/github-patches | git_diff | pre-commit__pre-commit-96 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
System hooks with spaces in entry are not runnable
It's pretty reasonable to have a system hook that looks like this:
```
- id: foo
name: foo
entry: python -m bar
language: system
```
Currently this fails:
```
$ pre-commit run foo --all-files
foo...................................................Failed
xargs: python -m bar: No such file or directory
```
</issue>
<code>
[start of pre_commit/languages/system.py]
1 ENVIRONMENT_DIR = None
2
3
4 def install_environment(repo_cmd_runner):
5 """Installation for system type is a noop."""
6
7
8 def run_hook(repo_cmd_runner, hook, file_args):
9 return repo_cmd_runner.run(
10 ['xargs', hook['entry']] + hook['args'],
11 # TODO: this is duplicated in pre_commit/languages/helpers.py
12 stdin='\n'.join(list(file_args) + ['']),
13 retcode=None,
14 )
15
[end of pre_commit/languages/system.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/system.py b/pre_commit/languages/system.py
--- a/pre_commit/languages/system.py
+++ b/pre_commit/languages/system.py
@@ -1,3 +1,6 @@
+import shlex
+
+
ENVIRONMENT_DIR = None
@@ -7,7 +10,7 @@
def run_hook(repo_cmd_runner, hook, file_args):
return repo_cmd_runner.run(
- ['xargs', hook['entry']] + hook['args'],
+ ['xargs'] + shlex.split(hook['entry']) + hook['args'],
# TODO: this is duplicated in pre_commit/languages/helpers.py
stdin='\n'.join(list(file_args) + ['']),
retcode=None,
| {"golden_diff": "diff --git a/pre_commit/languages/system.py b/pre_commit/languages/system.py\n--- a/pre_commit/languages/system.py\n+++ b/pre_commit/languages/system.py\n@@ -1,3 +1,6 @@\n+import shlex\n+\n+\n ENVIRONMENT_DIR = None\n \n \n@@ -7,7 +10,7 @@\n \n def run_hook(repo_cmd_runner, hook, file_args):\n return repo_cmd_runner.run(\n- ['xargs', hook['entry']] + hook['args'],\n+ ['xargs'] + shlex.split(hook['entry']) + hook['args'],\n # TODO: this is duplicated in pre_commit/languages/helpers.py\n stdin='\\n'.join(list(file_args) + ['']),\n retcode=None,\n", "issue": "System hooks with spaces in entry are not runnable\nIt's pretty reasonable to have a system hook that looks like this:\n\n```\n- id: foo\n name: foo\n entry: python -m bar\n language: system\n```\n\nCurrently this fails:\n\n```\n$ pre-commit run foo --all-files\nfoo...................................................Failed\n\nxargs: python -m bar: No such file or directory\n```\n\n", "before_files": [{"content": "ENVIRONMENT_DIR = None\n\n\ndef install_environment(repo_cmd_runner):\n \"\"\"Installation for system type is a noop.\"\"\"\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n return repo_cmd_runner.run(\n ['xargs', hook['entry']] + hook['args'],\n # TODO: this is duplicated in pre_commit/languages/helpers.py\n stdin='\\n'.join(list(file_args) + ['']),\n retcode=None,\n )\n", "path": "pre_commit/languages/system.py"}]} | 743 | 160 |
gh_patches_debug_19377 | rasdani/github-patches | git_diff | scrapy__scrapy-5526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Response.headers loses data on multiple values
https://github.com/scrapy/scrapy/issues/1262 reported that by default `response.headers` would only expose the first value of a header e.g. when casted as a `dict`, acknowledging that `response.headers.getlist` could be used instead to get all values.
I have just found out that the latter is not true:
```python
>>> from scrapy.http import Response
>>> response = Response("https://example.com", headers=(("a", "b"), ("a", "c")))
>>> response.headers.getlist("a")
[b'c']
```
I could verify the issue happening as far back as Scrapy 1.6, so it does not look like a recent bug.
</issue>
<code>
[start of scrapy/http/headers.py]
1 from w3lib.http import headers_dict_to_raw
2 from scrapy.utils.datatypes import CaselessDict
3 from scrapy.utils.python import to_unicode
4
5
6 class Headers(CaselessDict):
7 """Case insensitive http headers dictionary"""
8
9 def __init__(self, seq=None, encoding='utf-8'):
10 self.encoding = encoding
11 super().__init__(seq)
12
13 def normkey(self, key):
14 """Normalize key to bytes"""
15 return self._tobytes(key.title())
16
17 def normvalue(self, value):
18 """Normalize values to bytes"""
19 if value is None:
20 value = []
21 elif isinstance(value, (str, bytes)):
22 value = [value]
23 elif not hasattr(value, '__iter__'):
24 value = [value]
25
26 return [self._tobytes(x) for x in value]
27
28 def _tobytes(self, x):
29 if isinstance(x, bytes):
30 return x
31 elif isinstance(x, str):
32 return x.encode(self.encoding)
33 elif isinstance(x, int):
34 return str(x).encode(self.encoding)
35 else:
36 raise TypeError(f'Unsupported value type: {type(x)}')
37
38 def __getitem__(self, key):
39 try:
40 return super().__getitem__(key)[-1]
41 except IndexError:
42 return None
43
44 def get(self, key, def_val=None):
45 try:
46 return super().get(key, def_val)[-1]
47 except IndexError:
48 return None
49
50 def getlist(self, key, def_val=None):
51 try:
52 return super().__getitem__(key)
53 except KeyError:
54 if def_val is not None:
55 return self.normvalue(def_val)
56 return []
57
58 def setlist(self, key, list_):
59 self[key] = list_
60
61 def setlistdefault(self, key, default_list=()):
62 return self.setdefault(key, default_list)
63
64 def appendlist(self, key, value):
65 lst = self.getlist(key)
66 lst.extend(self.normvalue(value))
67 self[key] = lst
68
69 def items(self):
70 return ((k, self.getlist(k)) for k in self.keys())
71
72 def values(self):
73 return [self[k] for k in self.keys()]
74
75 def to_string(self):
76 return headers_dict_to_raw(self)
77
78 def to_unicode_dict(self):
79 """ Return headers as a CaselessDict with unicode keys
80 and unicode values. Multiple values are joined with ','.
81 """
82 return CaselessDict(
83 (to_unicode(key, encoding=self.encoding),
84 to_unicode(b','.join(value), encoding=self.encoding))
85 for key, value in self.items())
86
87 def __copy__(self):
88 return self.__class__(self)
89 copy = __copy__
90
[end of scrapy/http/headers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/http/headers.py b/scrapy/http/headers.py
--- a/scrapy/http/headers.py
+++ b/scrapy/http/headers.py
@@ -1,3 +1,5 @@
+from collections.abc import Mapping
+
from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaselessDict
from scrapy.utils.python import to_unicode
@@ -10,6 +12,13 @@
self.encoding = encoding
super().__init__(seq)
+ def update(self, seq):
+ seq = seq.items() if isinstance(seq, Mapping) else seq
+ iseq = {}
+ for k, v in seq:
+ iseq.setdefault(self.normkey(k), []).extend(self.normvalue(v))
+ super().update(iseq)
+
def normkey(self, key):
"""Normalize key to bytes"""
return self._tobytes(key.title())
@@ -86,4 +95,5 @@
def __copy__(self):
return self.__class__(self)
+
copy = __copy__
| {"golden_diff": "diff --git a/scrapy/http/headers.py b/scrapy/http/headers.py\n--- a/scrapy/http/headers.py\n+++ b/scrapy/http/headers.py\n@@ -1,3 +1,5 @@\n+from collections.abc import Mapping\n+\n from w3lib.http import headers_dict_to_raw\n from scrapy.utils.datatypes import CaselessDict\n from scrapy.utils.python import to_unicode\n@@ -10,6 +12,13 @@\n self.encoding = encoding\n super().__init__(seq)\n \n+ def update(self, seq):\n+ seq = seq.items() if isinstance(seq, Mapping) else seq\n+ iseq = {}\n+ for k, v in seq:\n+ iseq.setdefault(self.normkey(k), []).extend(self.normvalue(v))\n+ super().update(iseq)\n+\n def normkey(self, key):\n \"\"\"Normalize key to bytes\"\"\"\n return self._tobytes(key.title())\n@@ -86,4 +95,5 @@\n \n def __copy__(self):\n return self.__class__(self)\n+\n copy = __copy__\n", "issue": "Response.headers loses data on multiple values\nhttps://github.com/scrapy/scrapy/issues/1262 reported that by default `response.headers` would only expose the first value of a header e.g. when casted as a `dict`, acknowledging that `response.headers.getlist` could be used instead to get all values.\r\n\r\nI have just found out that the latter is not true:\r\n\r\n```python\r\n>>> from scrapy.http import Response\r\n>>> response = Response(\"https://example.com\", headers=((\"a\", \"b\"), (\"a\", \"c\")))\r\n>>> response.headers.getlist(\"a\")\r\n[b'c']\r\n```\r\n\r\nI could verify the issue happening as far back as Scrapy 1.6, so it does not look like a recent bug.\n", "before_files": [{"content": "from w3lib.http import headers_dict_to_raw\nfrom scrapy.utils.datatypes import CaselessDict\nfrom scrapy.utils.python import to_unicode\n\n\nclass Headers(CaselessDict):\n \"\"\"Case insensitive http headers dictionary\"\"\"\n\n def __init__(self, seq=None, encoding='utf-8'):\n self.encoding = encoding\n super().__init__(seq)\n\n def normkey(self, key):\n \"\"\"Normalize key to bytes\"\"\"\n return self._tobytes(key.title())\n\n def normvalue(self, value):\n \"\"\"Normalize values to bytes\"\"\"\n if value is None:\n value = []\n elif isinstance(value, (str, bytes)):\n value = [value]\n elif not hasattr(value, '__iter__'):\n value = [value]\n\n return [self._tobytes(x) for x in value]\n\n def _tobytes(self, x):\n if isinstance(x, bytes):\n return x\n elif isinstance(x, str):\n return x.encode(self.encoding)\n elif isinstance(x, int):\n return str(x).encode(self.encoding)\n else:\n raise TypeError(f'Unsupported value type: {type(x)}')\n\n def __getitem__(self, key):\n try:\n return super().__getitem__(key)[-1]\n except IndexError:\n return None\n\n def get(self, key, def_val=None):\n try:\n return super().get(key, def_val)[-1]\n except IndexError:\n return None\n\n def getlist(self, key, def_val=None):\n try:\n return super().__getitem__(key)\n except KeyError:\n if def_val is not None:\n return self.normvalue(def_val)\n return []\n\n def setlist(self, key, list_):\n self[key] = list_\n\n def setlistdefault(self, key, default_list=()):\n return self.setdefault(key, default_list)\n\n def appendlist(self, key, value):\n lst = self.getlist(key)\n lst.extend(self.normvalue(value))\n self[key] = lst\n\n def items(self):\n return ((k, self.getlist(k)) for k in self.keys())\n\n def values(self):\n return [self[k] for k in self.keys()]\n\n def to_string(self):\n return headers_dict_to_raw(self)\n\n def to_unicode_dict(self):\n \"\"\" Return headers as a CaselessDict with unicode keys\n and unicode values. Multiple values are joined with ','.\n \"\"\"\n return CaselessDict(\n (to_unicode(key, encoding=self.encoding),\n to_unicode(b','.join(value), encoding=self.encoding))\n for key, value in self.items())\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n", "path": "scrapy/http/headers.py"}]} | 1,445 | 238 |
gh_patches_debug_29067 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1008 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Chunked uploads not attempting retries
I forgot to implement the actual retry-portion for the jQuery file uploading. As such, jQuery file upload will only try to upload a given chunk once. See here:
https://github.com/blueimp/jQuery-File-Upload/wiki/Chunked-file-uploads
</issue>
<code>
[start of app/grandchallenge/jqfileupload/views.py]
1 import re
2 from datetime import timedelta
3
4 from django.utils.timezone import now
5 from rest_framework import mixins
6 from rest_framework.parsers import FormParser, MultiPartParser
7 from rest_framework.response import Response
8 from rest_framework.status import HTTP_400_BAD_REQUEST
9 from rest_framework.viewsets import GenericViewSet
10 from rest_framework_guardian.filters import ObjectPermissionsFilter
11
12 from grandchallenge.core.permissions.rest_framework import (
13 DjangoObjectOnlyPermissions,
14 )
15 from grandchallenge.jqfileupload.models import StagedFile
16 from grandchallenge.jqfileupload.serializers import StagedFileSerializer
17
18
19 class StagedFileViewSet(
20 mixins.CreateModelMixin,
21 mixins.RetrieveModelMixin,
22 mixins.ListModelMixin,
23 GenericViewSet,
24 ):
25 serializer_class = StagedFileSerializer
26 queryset = StagedFile.objects.all()
27 parser_classes = (FormParser, MultiPartParser)
28 permission_classes = (DjangoObjectOnlyPermissions,)
29 filter_backends = (ObjectPermissionsFilter,)
30
31 def create(self, request, *args, **kwargs):
32 if "HTTP_CONTENT_RANGE" in self.request.META:
33 if not self.range_header or not self.range_match:
34 return Response(
35 {"status": "Client did not supply valid Content-Range"},
36 status=HTTP_400_BAD_REQUEST,
37 )
38
39 return super().create(request, *args, **kwargs)
40
41 def get_serializer(self, *args, **kwargs):
42 data = [
43 self._handle_file(uploaded_file)
44 for uploaded_file in self.request.FILES.values()
45 ]
46
47 if data:
48 kwargs.update({"many": True, "data": data})
49
50 return super().get_serializer(*args, **kwargs)
51
52 @property
53 def user_pk_str(self):
54 return str(self.request.user.pk)
55
56 @property
57 def client_id(self):
58 return self.request.POST.get("X-Upload-ID")
59
60 @property
61 def range_header(self):
62 return self.request.META.get("HTTP_CONTENT_RANGE")
63
64 @property
65 def range_match(self):
66 return re.match(
67 r"bytes (?P<start>[0-9]{1,32})-(?P<end>[0-9]{1,32})/(?P<length>\*|[0-9]{1,32})",
68 self.range_header,
69 )
70
71 def _handle_file(self, uploaded_file):
72 if "HTTP_CONTENT_RANGE" in self.request.META:
73 start_byte = int(self.range_match.group("start"))
74 end_byte = int(self.range_match.group("end"))
75 if (self.range_match.group("length") is None) or (
76 self.range_match.group("length") == "*"
77 ):
78 total_size = None
79 else:
80 total_size = int(self.range_match.group("length"))
81 else:
82 start_byte = 0
83 end_byte = uploaded_file.size - 1
84 total_size = uploaded_file.size
85
86 return {
87 "client_id": self.client_id,
88 "end_byte": end_byte,
89 "file": uploaded_file,
90 "filename": uploaded_file.name,
91 "start_byte": start_byte if start_byte is not None else 0,
92 "timeout": now() + timedelta(hours=6),
93 "total_size": total_size,
94 "user_pk_str": self.user_pk_str,
95 }
96
[end of app/grandchallenge/jqfileupload/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/jqfileupload/views.py b/app/grandchallenge/jqfileupload/views.py
--- a/app/grandchallenge/jqfileupload/views.py
+++ b/app/grandchallenge/jqfileupload/views.py
@@ -3,6 +3,7 @@
from django.utils.timezone import now
from rest_framework import mixins
+from rest_framework.decorators import action
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
@@ -35,7 +36,6 @@
{"status": "Client did not supply valid Content-Range"},
status=HTTP_400_BAD_REQUEST,
)
-
return super().create(request, *args, **kwargs)
def get_serializer(self, *args, **kwargs):
@@ -93,3 +93,21 @@
"total_size": total_size,
"user_pk_str": self.user_pk_str,
}
+
+ def _find_last_end_byte(self, files):
+ last_end_byte = -1
+ for file in files:
+ if file["start_byte"] != last_end_byte + 1:
+ return last_end_byte
+ last_end_byte = file["end_byte"]
+ return last_end_byte
+
+ @action(detail=False, methods=["get"])
+ def get_current_file_size(self, request):
+ client_id = request.GET.get("file", None)
+ files = (
+ StagedFile.objects.filter(client_id=client_id)
+ .order_by("start_byte")
+ .values("start_byte", "end_byte")
+ )
+ return Response({"current_size": self._find_last_end_byte(files)})
| {"golden_diff": "diff --git a/app/grandchallenge/jqfileupload/views.py b/app/grandchallenge/jqfileupload/views.py\n--- a/app/grandchallenge/jqfileupload/views.py\n+++ b/app/grandchallenge/jqfileupload/views.py\n@@ -3,6 +3,7 @@\n \n from django.utils.timezone import now\n from rest_framework import mixins\n+from rest_framework.decorators import action\n from rest_framework.parsers import FormParser, MultiPartParser\n from rest_framework.response import Response\n from rest_framework.status import HTTP_400_BAD_REQUEST\n@@ -35,7 +36,6 @@\n {\"status\": \"Client did not supply valid Content-Range\"},\n status=HTTP_400_BAD_REQUEST,\n )\n-\n return super().create(request, *args, **kwargs)\n \n def get_serializer(self, *args, **kwargs):\n@@ -93,3 +93,21 @@\n \"total_size\": total_size,\n \"user_pk_str\": self.user_pk_str,\n }\n+\n+ def _find_last_end_byte(self, files):\n+ last_end_byte = -1\n+ for file in files:\n+ if file[\"start_byte\"] != last_end_byte + 1:\n+ return last_end_byte\n+ last_end_byte = file[\"end_byte\"]\n+ return last_end_byte\n+\n+ @action(detail=False, methods=[\"get\"])\n+ def get_current_file_size(self, request):\n+ client_id = request.GET.get(\"file\", None)\n+ files = (\n+ StagedFile.objects.filter(client_id=client_id)\n+ .order_by(\"start_byte\")\n+ .values(\"start_byte\", \"end_byte\")\n+ )\n+ return Response({\"current_size\": self._find_last_end_byte(files)})\n", "issue": "Chunked uploads not attempting retries\nI forgot to implement the actual retry-portion for the jQuery file uploading. As such, jQuery file upload will only try to upload a given chunk once. See here:\r\n\r\nhttps://github.com/blueimp/jQuery-File-Upload/wiki/Chunked-file-uploads\n", "before_files": [{"content": "import re\nfrom datetime import timedelta\n\nfrom django.utils.timezone import now\nfrom rest_framework import mixins\nfrom rest_framework.parsers import FormParser, MultiPartParser\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_400_BAD_REQUEST\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.core.permissions.rest_framework import (\n DjangoObjectOnlyPermissions,\n)\nfrom grandchallenge.jqfileupload.models import StagedFile\nfrom grandchallenge.jqfileupload.serializers import StagedFileSerializer\n\n\nclass StagedFileViewSet(\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n GenericViewSet,\n):\n serializer_class = StagedFileSerializer\n queryset = StagedFile.objects.all()\n parser_classes = (FormParser, MultiPartParser)\n permission_classes = (DjangoObjectOnlyPermissions,)\n filter_backends = (ObjectPermissionsFilter,)\n\n def create(self, request, *args, **kwargs):\n if \"HTTP_CONTENT_RANGE\" in self.request.META:\n if not self.range_header or not self.range_match:\n return Response(\n {\"status\": \"Client did not supply valid Content-Range\"},\n status=HTTP_400_BAD_REQUEST,\n )\n\n return super().create(request, *args, **kwargs)\n\n def get_serializer(self, *args, **kwargs):\n data = [\n self._handle_file(uploaded_file)\n for uploaded_file in self.request.FILES.values()\n ]\n\n if data:\n kwargs.update({\"many\": True, \"data\": data})\n\n return super().get_serializer(*args, **kwargs)\n\n @property\n def user_pk_str(self):\n return str(self.request.user.pk)\n\n @property\n def client_id(self):\n return self.request.POST.get(\"X-Upload-ID\")\n\n @property\n def range_header(self):\n return self.request.META.get(\"HTTP_CONTENT_RANGE\")\n\n @property\n def range_match(self):\n return re.match(\n r\"bytes (?P<start>[0-9]{1,32})-(?P<end>[0-9]{1,32})/(?P<length>\\*|[0-9]{1,32})\",\n self.range_header,\n )\n\n def _handle_file(self, uploaded_file):\n if \"HTTP_CONTENT_RANGE\" in self.request.META:\n start_byte = int(self.range_match.group(\"start\"))\n end_byte = int(self.range_match.group(\"end\"))\n if (self.range_match.group(\"length\") is None) or (\n self.range_match.group(\"length\") == \"*\"\n ):\n total_size = None\n else:\n total_size = int(self.range_match.group(\"length\"))\n else:\n start_byte = 0\n end_byte = uploaded_file.size - 1\n total_size = uploaded_file.size\n\n return {\n \"client_id\": self.client_id,\n \"end_byte\": end_byte,\n \"file\": uploaded_file,\n \"filename\": uploaded_file.name,\n \"start_byte\": start_byte if start_byte is not None else 0,\n \"timeout\": now() + timedelta(hours=6),\n \"total_size\": total_size,\n \"user_pk_str\": self.user_pk_str,\n }\n", "path": "app/grandchallenge/jqfileupload/views.py"}]} | 1,495 | 381 |
gh_patches_debug_23282 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-2170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'
Recently, I update pytorch to 1.4. When running `tools/dist_train.sh submitted/faster_giou_train_config.py 1 --validate --autoscale-lr --seed 512`, one error raised: "AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'". I found random seed caused this error, when running without `--seed`, it's ok.
I haven't ran script with `--seed` option in pytorch 1.3, so i don't know if it's the pytorch verion.
I wonder if this is a bug or a feature, Thank you in advance!
</issue>
<code>
[start of mmdet/datasets/loader/build_loader.py]
1 import platform
2 import random
3 from functools import partial
4
5 import numpy as np
6 from mmcv.parallel import collate
7 from mmcv.runner import get_dist_info
8 from torch.utils.data import DataLoader
9
10 from .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler
11
12 if platform.system() != 'Windows':
13 # https://github.com/pytorch/pytorch/issues/973
14 import resource
15 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
16 resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
17
18
19 def build_dataloader(dataset,
20 imgs_per_gpu,
21 workers_per_gpu,
22 num_gpus=1,
23 dist=True,
24 shuffle=True,
25 seed=None,
26 **kwargs):
27 """Build PyTorch DataLoader.
28
29 In distributed training, each GPU/process has a dataloader.
30 In non-distributed training, there is only one dataloader for all GPUs.
31
32 Args:
33 dataset (Dataset): A PyTorch dataset.
34 imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of
35 each GPU.
36 workers_per_gpu (int): How many subprocesses to use for data loading
37 for each GPU.
38 num_gpus (int): Number of GPUs. Only used in non-distributed training.
39 dist (bool): Distributed training/test or not. Default: True.
40 shuffle (bool): Whether to shuffle the data at every epoch.
41 Default: True.
42 kwargs: any keyword argument to be used to initialize DataLoader
43
44 Returns:
45 DataLoader: A PyTorch dataloader.
46 """
47 rank, world_size = get_dist_info()
48 if dist:
49 # DistributedGroupSampler will definitely shuffle the data to satisfy
50 # that images on each GPU are in the same group
51 if shuffle:
52 sampler = DistributedGroupSampler(dataset, imgs_per_gpu,
53 world_size, rank)
54 else:
55 sampler = DistributedSampler(
56 dataset, world_size, rank, shuffle=False)
57 batch_size = imgs_per_gpu
58 num_workers = workers_per_gpu
59 else:
60 sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None
61 batch_size = num_gpus * imgs_per_gpu
62 num_workers = num_gpus * workers_per_gpu
63
64 def worker_init_fn(worker_id):
65 # The seed of each worker equals to
66 # num_worker * rank + worker_id + user_seed
67 worker_seed = num_workers * rank + worker_id + seed
68 np.random.seed(worker_seed)
69 random.seed(worker_seed)
70
71 data_loader = DataLoader(
72 dataset,
73 batch_size=batch_size,
74 sampler=sampler,
75 num_workers=num_workers,
76 collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
77 pin_memory=False,
78 worker_init_fn=worker_init_fn if seed is not None else None,
79 **kwargs)
80
81 return data_loader
82
[end of mmdet/datasets/loader/build_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmdet/datasets/loader/build_loader.py b/mmdet/datasets/loader/build_loader.py
--- a/mmdet/datasets/loader/build_loader.py
+++ b/mmdet/datasets/loader/build_loader.py
@@ -61,12 +61,9 @@
batch_size = num_gpus * imgs_per_gpu
num_workers = num_gpus * workers_per_gpu
- def worker_init_fn(worker_id):
- # The seed of each worker equals to
- # num_worker * rank + worker_id + user_seed
- worker_seed = num_workers * rank + worker_id + seed
- np.random.seed(worker_seed)
- random.seed(worker_seed)
+ init_fn = partial(
+ worker_init_fn, num_workers=num_workers, rank=rank,
+ seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
@@ -75,7 +72,15 @@
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),
pin_memory=False,
- worker_init_fn=worker_init_fn if seed is not None else None,
+ worker_init_fn=init_fn,
**kwargs)
return data_loader
+
+
+def worker_init_fn(worker_id, num_workers, rank, seed):
+ # The seed of each worker equals to
+ # num_worker * rank + worker_id + user_seed
+ worker_seed = num_workers * rank + worker_id + seed
+ np.random.seed(worker_seed)
+ random.seed(worker_seed)
| {"golden_diff": "diff --git a/mmdet/datasets/loader/build_loader.py b/mmdet/datasets/loader/build_loader.py\n--- a/mmdet/datasets/loader/build_loader.py\n+++ b/mmdet/datasets/loader/build_loader.py\n@@ -61,12 +61,9 @@\n batch_size = num_gpus * imgs_per_gpu\n num_workers = num_gpus * workers_per_gpu\n \n- def worker_init_fn(worker_id):\n- # The seed of each worker equals to\n- # num_worker * rank + worker_id + user_seed\n- worker_seed = num_workers * rank + worker_id + seed\n- np.random.seed(worker_seed)\n- random.seed(worker_seed)\n+ init_fn = partial(\n+ worker_init_fn, num_workers=num_workers, rank=rank,\n+ seed=seed) if seed is not None else None\n \n data_loader = DataLoader(\n dataset,\n@@ -75,7 +72,15 @@\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),\n pin_memory=False,\n- worker_init_fn=worker_init_fn if seed is not None else None,\n+ worker_init_fn=init_fn,\n **kwargs)\n \n return data_loader\n+\n+\n+def worker_init_fn(worker_id, num_workers, rank, seed):\n+ # The seed of each worker equals to\n+ # num_worker * rank + worker_id + user_seed\n+ worker_seed = num_workers * rank + worker_id + seed\n+ np.random.seed(worker_seed)\n+ random.seed(worker_seed)\n", "issue": "AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'\nRecently, I update pytorch to 1.4. When running `tools/dist_train.sh submitted/faster_giou_train_config.py 1 --validate --autoscale-lr --seed 512`, one error raised: \"AttributeError: Can't pickle local object 'build_dataloader.<locals>.worker_init_fn'\". I found random seed caused this error, when running without `--seed`, it's ok.\r\n\r\nI haven't ran script with `--seed` option in pytorch 1.3, so i don't know if it's the pytorch verion.\r\n\r\nI wonder if this is a bug or a feature, Thank you in advance!\n", "before_files": [{"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom torch.utils.data import DataLoader\n\nfrom .sampler import DistributedGroupSampler, DistributedSampler, GroupSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))\n\n\ndef build_dataloader(dataset,\n imgs_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of\n each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n # DistributedGroupSampler will definitely shuffle the data to satisfy\n # that images on each GPU are in the same group\n if shuffle:\n sampler = DistributedGroupSampler(dataset, imgs_per_gpu,\n world_size, rank)\n else:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=False)\n batch_size = imgs_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = GroupSampler(dataset, imgs_per_gpu) if shuffle else None\n batch_size = num_gpus * imgs_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n def worker_init_fn(worker_id):\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=imgs_per_gpu),\n pin_memory=False,\n worker_init_fn=worker_init_fn if seed is not None else None,\n **kwargs)\n\n return data_loader\n", "path": "mmdet/datasets/loader/build_loader.py"}]} | 1,478 | 344 |
gh_patches_debug_25026 | rasdani/github-patches | git_diff | dask__distributed-5822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
importing distributed runs 4 `git` subprocesses in CI (when installed with -e)
I noticed that tests that run a dask subprocess are often flakey on CI (especially so on low performance macos runners)
https://github.com/dask/distributed/runs/4922796526?check_suite_focus=true#step:12:1849
This is an example of a process taking more than 5 seconds to boot on a mac in `test_dask_worker::test_memory_limit`:
```pytb
Traceback (most recent call last):
File "/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker", line 33, in <module>
sys.exit(load_entry_point('distributed', 'console_scripts', 'dask-worker')())
File "/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker", line 25, in importlib_load_entry_point
return next(matches).load()
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/metadata.py", line 77, in load
module = import_module(match.group('module'))
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/Users/runner/work/distributed/distributed/distributed/__init__.py", line 49, in <module>
versions = get_versions()
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 534, in get_versions
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 265, in git_pieces_from_vcs
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 78, in run_command
p = subprocess.Popen(
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py", line 951, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py", line 1777, in _execute_child
part = os.read(errpipe_read, 50000)
KeyboardInterrupt
```
</issue>
<code>
[start of distributed/__init__.py]
1 from . import config # isort:skip; load distributed configuration first
2 from . import widgets # isort:skip; load distributed widgets second
3 import dask
4 from dask.config import config # type: ignore
5
6 from ._version import get_versions
7 from .actor import Actor, BaseActorFuture
8 from .client import (
9 Client,
10 CompatibleExecutor,
11 Executor,
12 Future,
13 as_completed,
14 default_client,
15 fire_and_forget,
16 futures_of,
17 get_task_metadata,
18 get_task_stream,
19 performance_report,
20 wait,
21 )
22 from .core import Status, connect, rpc
23 from .deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster
24 from .diagnostics.plugin import (
25 Environ,
26 NannyPlugin,
27 PipInstall,
28 SchedulerPlugin,
29 UploadDirectory,
30 UploadFile,
31 WorkerPlugin,
32 )
33 from .diagnostics.progressbar import progress
34 from .event import Event
35 from .lock import Lock
36 from .multi_lock import MultiLock
37 from .nanny import Nanny
38 from .pubsub import Pub, Sub
39 from .queues import Queue
40 from .scheduler import Scheduler
41 from .security import Security
42 from .semaphore import Semaphore
43 from .threadpoolexecutor import rejoin
44 from .utils import CancelledError, TimeoutError, sync
45 from .variable import Variable
46 from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn
47 from .worker_client import local_client, worker_client
48
49 versions = get_versions()
50 __version__ = versions["version"]
51 __git_revision__ = versions["full-revisionid"]
52 del get_versions, versions
53
[end of distributed/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/__init__.py b/distributed/__init__.py
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -1,10 +1,12 @@
from . import config # isort:skip; load distributed configuration first
from . import widgets # isort:skip; load distributed widgets second
+
+
import dask
from dask.config import config # type: ignore
from ._version import get_versions
-from .actor import Actor, BaseActorFuture
+from .actor import Actor, ActorFuture, BaseActorFuture
from .client import (
Client,
CompatibleExecutor,
@@ -46,7 +48,20 @@
from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn
from .worker_client import local_client, worker_client
-versions = get_versions()
-__version__ = versions["version"]
-__git_revision__ = versions["full-revisionid"]
-del get_versions, versions
+
+def __getattr__(name):
+ global __version__, __git_revision__
+
+ if name == "__version__":
+ from importlib.metadata import version
+
+ __version__ = version("distributed")
+ return __version__
+
+ if name == "__git_revision__":
+ from ._version import get_versions
+
+ __git_revision__ = get_versions()["full-revisionid"]
+ return __git_revision__
+
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| {"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -1,10 +1,12 @@\n from . import config # isort:skip; load distributed configuration first\n from . import widgets # isort:skip; load distributed widgets second\n+\n+\n import dask\n from dask.config import config # type: ignore\n \n from ._version import get_versions\n-from .actor import Actor, BaseActorFuture\n+from .actor import Actor, ActorFuture, BaseActorFuture\n from .client import (\n Client,\n CompatibleExecutor,\n@@ -46,7 +48,20 @@\n from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn\n from .worker_client import local_client, worker_client\n \n-versions = get_versions()\n-__version__ = versions[\"version\"]\n-__git_revision__ = versions[\"full-revisionid\"]\n-del get_versions, versions\n+\n+def __getattr__(name):\n+ global __version__, __git_revision__\n+\n+ if name == \"__version__\":\n+ from importlib.metadata import version\n+\n+ __version__ = version(\"distributed\")\n+ return __version__\n+\n+ if name == \"__git_revision__\":\n+ from ._version import get_versions\n+\n+ __git_revision__ = get_versions()[\"full-revisionid\"]\n+ return __git_revision__\n+\n+ raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "issue": "importing distributed runs 4 `git` subprocesses in CI (when installed with -e)\nI noticed that tests that run a dask subprocess are often flakey on CI (especially so on low performance macos runners)\r\n\r\nhttps://github.com/dask/distributed/runs/4922796526?check_suite_focus=true#step:12:1849\r\nThis is an example of a process taking more than 5 seconds to boot on a mac in `test_dask_worker::test_memory_limit`:\r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker\", line 33, in <module>\r\n sys.exit(load_entry_point('distributed', 'console_scripts', 'dask-worker')())\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker\", line 25, in importlib_load_entry_point\r\n return next(matches).load()\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/metadata.py\", line 77, in load\r\n module = import_module(match.group('module'))\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 972, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 972, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"/Users/runner/work/distributed/distributed/distributed/__init__.py\", line 49, in <module>\r\n versions = get_versions()\r\n File \"/Users/runner/work/distributed/distributed/distributed/_version.py\", line 534, in get_versions\r\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\r\n File \"/Users/runner/work/distributed/distributed/distributed/_version.py\", line 265, in git_pieces_from_vcs\r\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\r\n File \"/Users/runner/work/distributed/distributed/distributed/_version.py\", line 78, in run_command\r\n p = subprocess.Popen(\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py\", line 951, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py\", line 1777, in _execute_child\r\n part = os.read(errpipe_read, 50000)\r\nKeyboardInterrupt\r\n```\n", "before_files": [{"content": "from . import config # isort:skip; load distributed configuration first\nfrom . import widgets # isort:skip; load distributed widgets second\nimport dask\nfrom dask.config import config # type: ignore\n\nfrom ._version import get_versions\nfrom .actor import Actor, BaseActorFuture\nfrom .client import (\n Client,\n CompatibleExecutor,\n Executor,\n Future,\n as_completed,\n default_client,\n fire_and_forget,\n futures_of,\n get_task_metadata,\n get_task_stream,\n performance_report,\n wait,\n)\nfrom .core import Status, connect, rpc\nfrom .deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster\nfrom .diagnostics.plugin import (\n Environ,\n NannyPlugin,\n PipInstall,\n SchedulerPlugin,\n UploadDirectory,\n UploadFile,\n WorkerPlugin,\n)\nfrom .diagnostics.progressbar import progress\nfrom .event import Event\nfrom .lock import Lock\nfrom .multi_lock import MultiLock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .security import Security\nfrom .semaphore import Semaphore\nfrom .threadpoolexecutor import rejoin\nfrom .utils import CancelledError, TimeoutError, sync\nfrom .variable import Variable\nfrom .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn\nfrom .worker_client import local_client, worker_client\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py"}]} | 1,880 | 340 |
gh_patches_debug_19677 | rasdani/github-patches | git_diff | conda__conda-7178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Channel pins in "environment.yaml" files are not saved to package specs
Channel pins specified in environment files are not respected. For example,run ```conda env create``` with this environment file:
```yaml
name: channel-not-written-to-user-specs
dependencies:
- defaults::six
```
If we look at conda-meta/six*.json we can see that the channel pin has not been added to specs. Where we should read ```"requested_spec": "defaults::six"```, we only find ```"requested_spec": "six"```.
This is with conda 4.4.0rc2.
</issue>
<code>
[start of conda_env/installers/conda.py]
1 from __future__ import absolute_import
2
3 from os.path import basename
4
5 from conda._vendor.boltons.setutils import IndexedSet
6 from conda.base.context import context
7 from conda.core.solve import Solver
8 from conda.models.channel import Channel, prioritize_channels
9
10
11 def install(prefix, specs, args, env, *_, **kwargs):
12 # TODO: support all various ways this happens
13 # Including 'nodefaults' in the channels list disables the defaults
14 new_specs = []
15 channel_urls = set()
16 for elem in specs:
17 if "::" in elem:
18 channel_urls.add(elem.split("::")[0])
19 new_specs.append(elem.split("::")[-1])
20 else:
21 new_specs.append(elem)
22 specs = new_specs
23 channel_urls = list(channel_urls)
24 # TODO: support all various ways this happens
25 # Including 'nodefaults' in the channels list disables the defaults
26 channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']
27 if 'nodefaults' not in env.channels:
28 channel_urls.extend(context.channels)
29 _channel_priority_map = prioritize_channels(channel_urls)
30
31 channels = IndexedSet(Channel(url) for url in _channel_priority_map)
32 subdirs = IndexedSet(basename(url) for url in _channel_priority_map)
33
34 solver = Solver(prefix, channels, subdirs, specs_to_add=specs)
35 unlink_link_transaction = solver.solve_for_transaction(prune=getattr(args, 'prune', False))
36
37 pfe = unlink_link_transaction._get_pfe()
38 pfe.execute()
39 unlink_link_transaction.execute()
40
[end of conda_env/installers/conda.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_env/installers/conda.py b/conda_env/installers/conda.py
--- a/conda_env/installers/conda.py
+++ b/conda_env/installers/conda.py
@@ -11,19 +11,8 @@
def install(prefix, specs, args, env, *_, **kwargs):
# TODO: support all various ways this happens
# Including 'nodefaults' in the channels list disables the defaults
- new_specs = []
- channel_urls = set()
- for elem in specs:
- if "::" in elem:
- channel_urls.add(elem.split("::")[0])
- new_specs.append(elem.split("::")[-1])
- else:
- new_specs.append(elem)
- specs = new_specs
- channel_urls = list(channel_urls)
- # TODO: support all various ways this happens
- # Including 'nodefaults' in the channels list disables the defaults
- channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']
+ channel_urls = [chan for chan in env.channels if chan != 'nodefaults']
+
if 'nodefaults' not in env.channels:
channel_urls.extend(context.channels)
_channel_priority_map = prioritize_channels(channel_urls)
| {"golden_diff": "diff --git a/conda_env/installers/conda.py b/conda_env/installers/conda.py\n--- a/conda_env/installers/conda.py\n+++ b/conda_env/installers/conda.py\n@@ -11,19 +11,8 @@\n def install(prefix, specs, args, env, *_, **kwargs):\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n- new_specs = []\n- channel_urls = set()\n- for elem in specs:\n- if \"::\" in elem:\n- channel_urls.add(elem.split(\"::\")[0])\n- new_specs.append(elem.split(\"::\")[-1])\n- else:\n- new_specs.append(elem)\n- specs = new_specs\n- channel_urls = list(channel_urls)\n- # TODO: support all various ways this happens\n- # Including 'nodefaults' in the channels list disables the defaults\n- channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']\n+ channel_urls = [chan for chan in env.channels if chan != 'nodefaults']\n+\n if 'nodefaults' not in env.channels:\n channel_urls.extend(context.channels)\n _channel_priority_map = prioritize_channels(channel_urls)\n", "issue": "Channel pins in \"environment.yaml\" files are not saved to package specs\nChannel pins specified in environment files are not respected. For example,run ```conda env create``` with this environment file:\r\n\r\n```yaml\r\nname: channel-not-written-to-user-specs\r\n\r\ndependencies:\r\n - defaults::six\r\n```\r\n\r\nIf we look at conda-meta/six*.json we can see that the channel pin has not been added to specs. Where we should read ```\"requested_spec\": \"defaults::six\"```, we only find ```\"requested_spec\": \"six\"```.\r\n\r\nThis is with conda 4.4.0rc2.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom os.path import basename\n\nfrom conda._vendor.boltons.setutils import IndexedSet\nfrom conda.base.context import context\nfrom conda.core.solve import Solver\nfrom conda.models.channel import Channel, prioritize_channels\n\n\ndef install(prefix, specs, args, env, *_, **kwargs):\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n new_specs = []\n channel_urls = set()\n for elem in specs:\n if \"::\" in elem:\n channel_urls.add(elem.split(\"::\")[0])\n new_specs.append(elem.split(\"::\")[-1])\n else:\n new_specs.append(elem)\n specs = new_specs\n channel_urls = list(channel_urls)\n # TODO: support all various ways this happens\n # Including 'nodefaults' in the channels list disables the defaults\n channel_urls = channel_urls + [chan for chan in env.channels if chan != 'nodefaults']\n if 'nodefaults' not in env.channels:\n channel_urls.extend(context.channels)\n _channel_priority_map = prioritize_channels(channel_urls)\n\n channels = IndexedSet(Channel(url) for url in _channel_priority_map)\n subdirs = IndexedSet(basename(url) for url in _channel_priority_map)\n\n solver = Solver(prefix, channels, subdirs, specs_to_add=specs)\n unlink_link_transaction = solver.solve_for_transaction(prune=getattr(args, 'prune', False))\n\n pfe = unlink_link_transaction._get_pfe()\n pfe.execute()\n unlink_link_transaction.execute()\n", "path": "conda_env/installers/conda.py"}]} | 1,092 | 283 |
gh_patches_debug_1923 | rasdani/github-patches | git_diff | ivy-llc__ivy-18252 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
broadcast_to
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 )
10
11
12 @to_ivy_arrays_and_back
13 def reshape(x, shape):
14 return ivy.reshape(x, shape)
15
16
17 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
18 @to_ivy_arrays_and_back
19 def abs(x, name=None):
20 return ivy.abs(x)
21
22
23 absolute = abs
24
25
26 @to_ivy_arrays_and_back
27 def stack(x, axis=0, name=None):
28 return ivy.stack(x, axis=axis)
29
30
31 @with_unsupported_dtypes({"2.5.0 and below": ("int8", "int16")}, "paddle")
32 @to_ivy_arrays_and_back
33 def concat(x, axis, name=None):
34 return ivy.concat(x, axis=axis)
35
36
37 @with_unsupported_dtypes(
38 {"2.5.0 and below": ("int8", "uint8", "int16", "float16")},
39 "paddle",
40 )
41 @to_ivy_arrays_and_back
42 def tile(x, repeat_times, name=None):
43 return ivy.tile(x, repeats=repeat_times)
44
45
46 @with_unsupported_dtypes(
47 {"2.5.0 and below": ("int16", "complex64", "complex128")},
48 "paddle",
49 )
50 @to_ivy_arrays_and_back
51 def split(x, num_or_sections, axis=0, name=None):
52 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
53
54
55 @with_unsupported_dtypes(
56 {"2.5.0 and below": ("float16", "bfloat16", "int8", "int16")},
57 "paddle",
58 )
59 @to_ivy_arrays_and_back
60 def squeeze(x, axis=None, name=None):
61 return ivy.squeeze(x, axis=axis)
62
63
64 @with_supported_dtypes(
65 {
66 "2.5.0 and below": (
67 "bool",
68 "float16",
69 "float32",
70 "float64",
71 "int32",
72 "int64",
73 "uint8",
74 )
75 },
76 "paddle",
77 )
78 @to_ivy_arrays_and_back
79 def cast(x, dtype):
80 return ivy.astype(x, dtype)
81
[end of ivy/functional/frontends/paddle/tensor/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py
--- a/ivy/functional/frontends/paddle/tensor/manipulation.py
+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py
@@ -78,3 +78,12 @@
@to_ivy_arrays_and_back
def cast(x, dtype):
return ivy.astype(x, dtype)
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("bool", "float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def broadcast_to(x, shape, name=None):
+ return ivy.broadcast_to(x, shape)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -78,3 +78,12 @@\n @to_ivy_arrays_and_back\n def cast(x, dtype):\n return ivy.astype(x, dtype)\n+\n+\n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def broadcast_to(x, shape, name=None):\n+ return ivy.broadcast_to(x, shape)\n", "issue": "broadcast_to\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]} | 1,259 | 187 |
gh_patches_debug_25597 | rasdani/github-patches | git_diff | litestar-org__litestar-1404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of docs/examples/stores/registry_configure_integrations.py]
1 from pathlib import Path
2
3 from starlite import Starlite
4 from starlite.middleware.session.server_side import ServerSideSessionConfig
5 from starlite.stores.file import FileStore
6 from starlite.stores.redis import RedisStore
7
8 app = Starlite(
9 stores={
10 "sessions": RedisStore.with_client(),
11 "request_cache": FileStore(Path("request-cache")),
12 },
13 middleware=[ServerSideSessionConfig().middleware],
14 )
15
[end of docs/examples/stores/registry_configure_integrations.py]
[start of starlite/config/response_cache.py]
1 from __future__ import annotations
2
3 from dataclasses import dataclass, field
4 from typing import TYPE_CHECKING, Any
5 from urllib.parse import urlencode
6
7 __all__ = ("ResponseCacheConfig", "default_cache_key_builder")
8
9
10 if TYPE_CHECKING:
11 from starlite import Starlite
12 from starlite.connection import Request
13 from starlite.stores.base import Store
14 from starlite.types import CacheKeyBuilder
15
16
17 def default_cache_key_builder(request: Request[Any, Any, Any]) -> str:
18 """Given a request object, returns a cache key by combining the path with the sorted query params.
19
20 Args:
21 request: request used to generate cache key.
22
23 Returns:
24 A combination of url path and query parameters
25 """
26 query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())
27 query_params.sort(key=lambda x: x[0])
28 return request.url.path + urlencode(query_params, doseq=True)
29
30
31 @dataclass
32 class ResponseCacheConfig:
33 """Configuration for response caching.
34
35 To enable response caching, pass an instance of this class to :class:`Starlite <.app.Starlite>` using the
36 ``response_cache_config`` key.
37 """
38
39 default_expiration: int = field(default=60)
40 """Default cache expiration in seconds."""
41 key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)
42 """:class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`."""
43 store: str = "request_cache"
44 """Name of the :class:`Store <.stores.base.Store>` to use."""
45
46 def get_store_from_app(self, app: Starlite) -> Store:
47 """Get the store defined in :attr:`store` from an :class:`Starlite <.app.Starlite>` instance."""
48 return app.stores.get(self.store)
49
[end of starlite/config/response_cache.py]
[start of docs/examples/stores/registry_default_factory_namespacing.py]
1 from starlite import Starlite, get
2 from starlite.middleware.rate_limit import RateLimitConfig
3 from starlite.middleware.session.server_side import ServerSideSessionConfig
4 from starlite.stores.redis import RedisStore
5 from starlite.stores.registry import StoreRegistry
6
7 root_store = RedisStore.with_client()
8
9
10 @get(cache=True)
11 def cached_handler() -> str:
12 # this will use app.stores.get("request_cache")
13 return "Hello, world!"
14
15
16 app = Starlite(
17 [cached_handler],
18 stores=StoreRegistry(default_factory=root_store.with_namespace),
19 middleware=[
20 RateLimitConfig(("second", 1)).middleware,
21 ServerSideSessionConfig().middleware,
22 ],
23 )
24
[end of docs/examples/stores/registry_default_factory_namespacing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/stores/registry_configure_integrations.py b/docs/examples/stores/registry_configure_integrations.py
--- a/docs/examples/stores/registry_configure_integrations.py
+++ b/docs/examples/stores/registry_configure_integrations.py
@@ -8,7 +8,7 @@
app = Starlite(
stores={
"sessions": RedisStore.with_client(),
- "request_cache": FileStore(Path("request-cache")),
+ "response_cache": FileStore(Path("response-cache")),
},
middleware=[ServerSideSessionConfig().middleware],
)
diff --git a/docs/examples/stores/registry_default_factory_namespacing.py b/docs/examples/stores/registry_default_factory_namespacing.py
--- a/docs/examples/stores/registry_default_factory_namespacing.py
+++ b/docs/examples/stores/registry_default_factory_namespacing.py
@@ -9,7 +9,7 @@
@get(cache=True)
def cached_handler() -> str:
- # this will use app.stores.get("request_cache")
+ # this will use app.stores.get("response_cache")
return "Hello, world!"
diff --git a/starlite/config/response_cache.py b/starlite/config/response_cache.py
--- a/starlite/config/response_cache.py
+++ b/starlite/config/response_cache.py
@@ -40,7 +40,7 @@
"""Default cache expiration in seconds."""
key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)
""":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`."""
- store: str = "request_cache"
+ store: str = "response_cache"
"""Name of the :class:`Store <.stores.base.Store>` to use."""
def get_store_from_app(self, app: Starlite) -> Store:
| {"golden_diff": "diff --git a/docs/examples/stores/registry_configure_integrations.py b/docs/examples/stores/registry_configure_integrations.py\n--- a/docs/examples/stores/registry_configure_integrations.py\n+++ b/docs/examples/stores/registry_configure_integrations.py\n@@ -8,7 +8,7 @@\n app = Starlite(\n stores={\n \"sessions\": RedisStore.with_client(),\n- \"request_cache\": FileStore(Path(\"request-cache\")),\n+ \"response_cache\": FileStore(Path(\"response-cache\")),\n },\n middleware=[ServerSideSessionConfig().middleware],\n )\ndiff --git a/docs/examples/stores/registry_default_factory_namespacing.py b/docs/examples/stores/registry_default_factory_namespacing.py\n--- a/docs/examples/stores/registry_default_factory_namespacing.py\n+++ b/docs/examples/stores/registry_default_factory_namespacing.py\n@@ -9,7 +9,7 @@\n \n @get(cache=True)\n def cached_handler() -> str:\n- # this will use app.stores.get(\"request_cache\")\n+ # this will use app.stores.get(\"response_cache\")\n return \"Hello, world!\"\n \n \ndiff --git a/starlite/config/response_cache.py b/starlite/config/response_cache.py\n--- a/starlite/config/response_cache.py\n+++ b/starlite/config/response_cache.py\n@@ -40,7 +40,7 @@\n \"\"\"Default cache expiration in seconds.\"\"\"\n key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)\n \"\"\":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`.\"\"\"\n- store: str = \"request_cache\"\n+ store: str = \"response_cache\"\n \"\"\"Name of the :class:`Store <.stores.base.Store>` to use.\"\"\"\n \n def get_store_from_app(self, app: Starlite) -> Store:\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from pathlib import Path\n\nfrom starlite import Starlite\nfrom starlite.middleware.session.server_side import ServerSideSessionConfig\nfrom starlite.stores.file import FileStore\nfrom starlite.stores.redis import RedisStore\n\napp = Starlite(\n stores={\n \"sessions\": RedisStore.with_client(),\n \"request_cache\": FileStore(Path(\"request-cache\")),\n },\n middleware=[ServerSideSessionConfig().middleware],\n)\n", "path": "docs/examples/stores/registry_configure_integrations.py"}, {"content": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any\nfrom urllib.parse import urlencode\n\n__all__ = (\"ResponseCacheConfig\", \"default_cache_key_builder\")\n\n\nif TYPE_CHECKING:\n from starlite import Starlite\n from starlite.connection import Request\n from starlite.stores.base import Store\n from starlite.types import CacheKeyBuilder\n\n\ndef default_cache_key_builder(request: Request[Any, Any, Any]) -> str:\n \"\"\"Given a request object, returns a cache key by combining the path with the sorted query params.\n\n Args:\n request: request used to generate cache key.\n\n Returns:\n A combination of url path and query parameters\n \"\"\"\n query_params: list[tuple[str, Any]] = list(request.query_params.dict().items())\n query_params.sort(key=lambda x: x[0])\n return request.url.path + urlencode(query_params, doseq=True)\n\n\n@dataclass\nclass ResponseCacheConfig:\n \"\"\"Configuration for response caching.\n\n To enable response caching, pass an instance of this class to :class:`Starlite <.app.Starlite>` using the\n ``response_cache_config`` key.\n \"\"\"\n\n default_expiration: int = field(default=60)\n \"\"\"Default cache expiration in seconds.\"\"\"\n key_builder: CacheKeyBuilder = field(default=default_cache_key_builder)\n \"\"\":class:`CacheKeyBuilder <.types.CacheKeyBuilder>`. Defaults to :func:`default_cache_key_builder`.\"\"\"\n store: str = \"request_cache\"\n \"\"\"Name of the :class:`Store <.stores.base.Store>` to use.\"\"\"\n\n def get_store_from_app(self, app: Starlite) -> Store:\n \"\"\"Get the store defined in :attr:`store` from an :class:`Starlite <.app.Starlite>` instance.\"\"\"\n return app.stores.get(self.store)\n", "path": "starlite/config/response_cache.py"}, {"content": "from starlite import Starlite, get\nfrom starlite.middleware.rate_limit import RateLimitConfig\nfrom starlite.middleware.session.server_side import ServerSideSessionConfig\nfrom starlite.stores.redis import RedisStore\nfrom starlite.stores.registry import StoreRegistry\n\nroot_store = RedisStore.with_client()\n\n\n@get(cache=True)\ndef cached_handler() -> str:\n # this will use app.stores.get(\"request_cache\")\n return \"Hello, world!\"\n\n\napp = Starlite(\n [cached_handler],\n stores=StoreRegistry(default_factory=root_store.with_namespace),\n middleware=[\n RateLimitConfig((\"second\", 1)).middleware,\n ServerSideSessionConfig().middleware,\n ],\n)\n", "path": "docs/examples/stores/registry_default_factory_namespacing.py"}]} | 1,558 | 395 |
gh_patches_debug_12286 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2551 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flatpages and Redirects broken when using ASGI
Examples:
- https://grand-challenge.org/about/ (flatpage, exists)
- https://grand-challenge.org/about (should redirect to https://grand-challenge.org/about/)
- https://parse2022.grand-challenge.org/Participation (should redirect to https://parse2022.grand-challenge.org/Participation/)
- https://parse2022.grand-challenge.org/gfsdfgdfdsg (should redirect to https://parse2022.grand-challenge.org/gfsdfgdfdsg/, which should 404).
Error occurs in the clickjacking middleware:
https://sentry.io/organizations/grand-challenge/issues/3374906811/?project=303639&query=is%3Aignored
</issue>
<code>
[start of app/grandchallenge/core/middleware.py]
1 from allauth_2fa.middleware import BaseRequire2FAMiddleware
2 from django.urls import Resolver404, get_resolver
3 from django.utils.deprecation import MiddlewareMixin
4
5
6 class RequireStaffAndSuperuser2FAMiddleware(BaseRequire2FAMiddleware):
7 def require_2fa(self, request):
8 # Staff users and superusers are required to have 2FA.
9 return request.user.is_staff or request.user.is_superuser
10
11
12 class TwoFactorMiddleware(MiddlewareMixin):
13 """
14 Reset the login flow if another page is loaded halfway through the login.
15 (I.e. if the user has logged in with a username/password, but not yet
16 entered their two-factor credentials.) This makes sure a user does not stay
17 half logged in by mistake.
18 """
19
20 def __init__(self, get_response):
21 self.get_response = get_response
22
23 def process_request(self, request):
24 try:
25 match = get_resolver(request.urlconf).resolve(request.path)
26 if (
27 match
28 and not match.url_name
29 or not match.url_name.startswith("two-factor-authenticate")
30 ):
31 try:
32 del request.session["allauth_2fa_user_id"]
33 except KeyError:
34 pass
35 except Resolver404:
36 return self.get_response(request)
37
[end of app/grandchallenge/core/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/middleware.py b/app/grandchallenge/core/middleware.py
--- a/app/grandchallenge/core/middleware.py
+++ b/app/grandchallenge/core/middleware.py
@@ -17,9 +17,6 @@
half logged in by mistake.
"""
- def __init__(self, get_response):
- self.get_response = get_response
-
def process_request(self, request):
try:
match = get_resolver(request.urlconf).resolve(request.path)
@@ -33,4 +30,4 @@
except KeyError:
pass
except Resolver404:
- return self.get_response(request)
+ pass
| {"golden_diff": "diff --git a/app/grandchallenge/core/middleware.py b/app/grandchallenge/core/middleware.py\n--- a/app/grandchallenge/core/middleware.py\n+++ b/app/grandchallenge/core/middleware.py\n@@ -17,9 +17,6 @@\n half logged in by mistake.\r\n \"\"\"\r\n \r\n- def __init__(self, get_response):\r\n- self.get_response = get_response\r\n-\r\n def process_request(self, request):\r\n try:\r\n match = get_resolver(request.urlconf).resolve(request.path)\r\n@@ -33,4 +30,4 @@\n except KeyError:\r\n pass\r\n except Resolver404:\r\n- return self.get_response(request)\r\n+ pass\n", "issue": "Flatpages and Redirects broken when using ASGI\nExamples:\r\n\r\n- https://grand-challenge.org/about/ (flatpage, exists)\r\n- https://grand-challenge.org/about (should redirect to https://grand-challenge.org/about/)\r\n- https://parse2022.grand-challenge.org/Participation (should redirect to https://parse2022.grand-challenge.org/Participation/)\r\n- https://parse2022.grand-challenge.org/gfsdfgdfdsg (should redirect to https://parse2022.grand-challenge.org/gfsdfgdfdsg/, which should 404).\r\n\r\nError occurs in the clickjacking middleware:\r\n\r\nhttps://sentry.io/organizations/grand-challenge/issues/3374906811/?project=303639&query=is%3Aignored\n", "before_files": [{"content": "from allauth_2fa.middleware import BaseRequire2FAMiddleware\r\nfrom django.urls import Resolver404, get_resolver\r\nfrom django.utils.deprecation import MiddlewareMixin\r\n\r\n\r\nclass RequireStaffAndSuperuser2FAMiddleware(BaseRequire2FAMiddleware):\r\n def require_2fa(self, request):\r\n # Staff users and superusers are required to have 2FA.\r\n return request.user.is_staff or request.user.is_superuser\r\n\r\n\r\nclass TwoFactorMiddleware(MiddlewareMixin):\r\n \"\"\"\r\n Reset the login flow if another page is loaded halfway through the login.\r\n (I.e. if the user has logged in with a username/password, but not yet\r\n entered their two-factor credentials.) This makes sure a user does not stay\r\n half logged in by mistake.\r\n \"\"\"\r\n\r\n def __init__(self, get_response):\r\n self.get_response = get_response\r\n\r\n def process_request(self, request):\r\n try:\r\n match = get_resolver(request.urlconf).resolve(request.path)\r\n if (\r\n match\r\n and not match.url_name\r\n or not match.url_name.startswith(\"two-factor-authenticate\")\r\n ):\r\n try:\r\n del request.session[\"allauth_2fa_user_id\"]\r\n except KeyError:\r\n pass\r\n except Resolver404:\r\n return self.get_response(request)\r\n", "path": "app/grandchallenge/core/middleware.py"}]} | 1,066 | 150 |
gh_patches_debug_39502 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
toolstation spider now not returning any UK stores
The most recent run of the toolstation.py spider from 2023-05-15 has lost about 550 stores compared to the previous run from 2023-04-15. This corresponds to all the UK branches (on the toolstation.com website).
It looks like toolstation.com has changed its mapping provider (from Google to Woosmap, which ironically uses an OSM basemap) and as a result the machine readable store info formatting has changed. The ATP spider now fails to find the expected JS script fragment, throws an error, and doesn't return the branch. The .fr and .nl branches still use a Google map on their branch pages, so the spider still works for them (at least for now).
I think the data we need for the UK branches is still there in a structured form, see e.g. a UK branch page at https://www.toolstation.com/branches/aldridge , but it will need some custom code to parse it out.
</issue>
<code>
[start of locations/spiders/toolstation.py]
1 import json
2 import re
3
4 import scrapy
5
6 from locations.dict_parser import DictParser
7
8
9 class ToolstationSpider(scrapy.spiders.SitemapSpider):
10 name = "toolstation"
11 item_attributes = {"brand": "Toolstation", "brand_wikidata": "Q7824103"}
12 sitemap_urls = [
13 "https://www.toolstation.com/sitemap/branches.xml",
14 "https://www.toolstation.fr/sitemap/branches.xml",
15 "https://www.toolstation.nl/sitemap/branches.xml",
16 ]
17
18 def parse(self, response):
19 pattern = re.compile(r"var store = (.*?)\n", re.MULTILINE | re.DOTALL)
20 store = json.loads(response.xpath('//script[contains(., "var store")]/text()').re(pattern)[0])[0]
21 item = DictParser.parse(store)
22 item["website"] = response.url
23 item["addr_full"] = store["address_text"].split("<br /><br />")[0]
24 yield item
25
[end of locations/spiders/toolstation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/toolstation.py b/locations/spiders/toolstation.py
--- a/locations/spiders/toolstation.py
+++ b/locations/spiders/toolstation.py
@@ -1,9 +1,12 @@
import json
import re
+import chompjs
import scrapy
from locations.dict_parser import DictParser
+from locations.hours import OpeningHours, day_range, sanitise_day
+from locations.spiders.vapestore_gb import clean_address
class ToolstationSpider(scrapy.spiders.SitemapSpider):
@@ -14,11 +17,64 @@
"https://www.toolstation.fr/sitemap/branches.xml",
"https://www.toolstation.nl/sitemap/branches.xml",
]
+ gm_pattern = re.compile(r"var store = (.*?)\n", re.MULTILINE | re.DOTALL)
+ params_pattern = re.compile(r"function\(([_$\w,\s]+)\)")
+ values_pattern = re.compile(r"}\((.+)\)\);")
+ stores_pattern = re.compile(r"data:(\[.+\]),fe")
def parse(self, response):
- pattern = re.compile(r"var store = (.*?)\n", re.MULTILINE | re.DOTALL)
- store = json.loads(response.xpath('//script[contains(., "var store")]/text()').re(pattern)[0])[0]
- item = DictParser.parse(store)
- item["website"] = response.url
- item["addr_full"] = store["address_text"].split("<br /><br />")[0]
- yield item
+ if js := response.xpath('//script[contains(., "var store")]/text()').get():
+ store = json.loads(re.search(self.gm_pattern, js).group(1))[0]
+ item = DictParser.parse(store)
+ item["website"] = response.url
+ item["addr_full"] = clean_address(store["address_text"].split("<br /><br />")[0])
+ yield item
+ elif js := response.xpath('//script[contains(text(), "__NUXT__")]/text()').get():
+ # stores is actually a JS function, so we have to parse the parameters and values
+ params = re.search(self.params_pattern, js).group(1).split(",")
+ values = chompjs.parse_js_object("[" + re.search(self.values_pattern, js).group(1) + "]")
+ args = {}
+ for i in range(0, len(params)):
+ args[params[i]] = values[i]
+
+ store = chompjs.parse_js_object(re.search(self.stores_pattern, js).group(1))[0]["branch"]
+ self.populate(store, args)
+
+ if store["status"] != 1:
+ return
+
+ item = DictParser.parse(store)
+ item["website"] = response.url
+ item["addr_full"] = store["address_text"]
+
+ item["opening_hours"] = OpeningHours()
+ for rule in store["opening_hours"]:
+ days, times = rule.split(": ", 1)
+ if "-" in days:
+ start_day, end_day = days.split("-")
+ else:
+ start_day = end_day = days
+ start_day = sanitise_day(start_day)
+ end_day = sanitise_day(end_day)
+ if start_day and end_day:
+ start_time, end_time = times.strip().split("-")
+ item["opening_hours"].add_days_range(
+ day_range(start_day, end_day), start_time, end_time, time_format="%H%M"
+ )
+
+ yield item
+
+ @staticmethod
+ def populate(data: dict, args: dict):
+ for key, value in data.items():
+ if isinstance(value, str):
+ if value in args:
+ data[key] = args[value]
+ elif isinstance(value, list):
+ for i, x in enumerate(value):
+ if isinstance(x, dict):
+ ToolstationSpider.populate(x, args)
+ elif x in args:
+ value[i] = args[x]
+ elif isinstance(value, dict):
+ ToolstationSpider.populate(value, args)
| {"golden_diff": "diff --git a/locations/spiders/toolstation.py b/locations/spiders/toolstation.py\n--- a/locations/spiders/toolstation.py\n+++ b/locations/spiders/toolstation.py\n@@ -1,9 +1,12 @@\n import json\n import re\n \n+import chompjs\n import scrapy\n \n from locations.dict_parser import DictParser\n+from locations.hours import OpeningHours, day_range, sanitise_day\n+from locations.spiders.vapestore_gb import clean_address\n \n \n class ToolstationSpider(scrapy.spiders.SitemapSpider):\n@@ -14,11 +17,64 @@\n \"https://www.toolstation.fr/sitemap/branches.xml\",\n \"https://www.toolstation.nl/sitemap/branches.xml\",\n ]\n+ gm_pattern = re.compile(r\"var store = (.*?)\\n\", re.MULTILINE | re.DOTALL)\n+ params_pattern = re.compile(r\"function\\(([_$\\w,\\s]+)\\)\")\n+ values_pattern = re.compile(r\"}\\((.+)\\)\\);\")\n+ stores_pattern = re.compile(r\"data:(\\[.+\\]),fe\")\n \n def parse(self, response):\n- pattern = re.compile(r\"var store = (.*?)\\n\", re.MULTILINE | re.DOTALL)\n- store = json.loads(response.xpath('//script[contains(., \"var store\")]/text()').re(pattern)[0])[0]\n- item = DictParser.parse(store)\n- item[\"website\"] = response.url\n- item[\"addr_full\"] = store[\"address_text\"].split(\"<br /><br />\")[0]\n- yield item\n+ if js := response.xpath('//script[contains(., \"var store\")]/text()').get():\n+ store = json.loads(re.search(self.gm_pattern, js).group(1))[0]\n+ item = DictParser.parse(store)\n+ item[\"website\"] = response.url\n+ item[\"addr_full\"] = clean_address(store[\"address_text\"].split(\"<br /><br />\")[0])\n+ yield item\n+ elif js := response.xpath('//script[contains(text(), \"__NUXT__\")]/text()').get():\n+ # stores is actually a JS function, so we have to parse the parameters and values\n+ params = re.search(self.params_pattern, js).group(1).split(\",\")\n+ values = chompjs.parse_js_object(\"[\" + re.search(self.values_pattern, js).group(1) + \"]\")\n+ args = {}\n+ for i in range(0, len(params)):\n+ args[params[i]] = values[i]\n+\n+ store = chompjs.parse_js_object(re.search(self.stores_pattern, js).group(1))[0][\"branch\"]\n+ self.populate(store, args)\n+\n+ if store[\"status\"] != 1:\n+ return\n+\n+ item = DictParser.parse(store)\n+ item[\"website\"] = response.url\n+ item[\"addr_full\"] = store[\"address_text\"]\n+\n+ item[\"opening_hours\"] = OpeningHours()\n+ for rule in store[\"opening_hours\"]:\n+ days, times = rule.split(\": \", 1)\n+ if \"-\" in days:\n+ start_day, end_day = days.split(\"-\")\n+ else:\n+ start_day = end_day = days\n+ start_day = sanitise_day(start_day)\n+ end_day = sanitise_day(end_day)\n+ if start_day and end_day:\n+ start_time, end_time = times.strip().split(\"-\")\n+ item[\"opening_hours\"].add_days_range(\n+ day_range(start_day, end_day), start_time, end_time, time_format=\"%H%M\"\n+ )\n+\n+ yield item\n+\n+ @staticmethod\n+ def populate(data: dict, args: dict):\n+ for key, value in data.items():\n+ if isinstance(value, str):\n+ if value in args:\n+ data[key] = args[value]\n+ elif isinstance(value, list):\n+ for i, x in enumerate(value):\n+ if isinstance(x, dict):\n+ ToolstationSpider.populate(x, args)\n+ elif x in args:\n+ value[i] = args[x]\n+ elif isinstance(value, dict):\n+ ToolstationSpider.populate(value, args)\n", "issue": "toolstation spider now not returning any UK stores\nThe most recent run of the toolstation.py spider from 2023-05-15 has lost about 550 stores compared to the previous run from 2023-04-15. This corresponds to all the UK branches (on the toolstation.com website).\r\n\r\nIt looks like toolstation.com has changed its mapping provider (from Google to Woosmap, which ironically uses an OSM basemap) and as a result the machine readable store info formatting has changed. The ATP spider now fails to find the expected JS script fragment, throws an error, and doesn't return the branch. The .fr and .nl branches still use a Google map on their branch pages, so the spider still works for them (at least for now).\r\n\r\nI think the data we need for the UK branches is still there in a structured form, see e.g. a UK branch page at https://www.toolstation.com/branches/aldridge , but it will need some custom code to parse it out.\n", "before_files": [{"content": "import json\nimport re\n\nimport scrapy\n\nfrom locations.dict_parser import DictParser\n\n\nclass ToolstationSpider(scrapy.spiders.SitemapSpider):\n name = \"toolstation\"\n item_attributes = {\"brand\": \"Toolstation\", \"brand_wikidata\": \"Q7824103\"}\n sitemap_urls = [\n \"https://www.toolstation.com/sitemap/branches.xml\",\n \"https://www.toolstation.fr/sitemap/branches.xml\",\n \"https://www.toolstation.nl/sitemap/branches.xml\",\n ]\n\n def parse(self, response):\n pattern = re.compile(r\"var store = (.*?)\\n\", re.MULTILINE | re.DOTALL)\n store = json.loads(response.xpath('//script[contains(., \"var store\")]/text()').re(pattern)[0])[0]\n item = DictParser.parse(store)\n item[\"website\"] = response.url\n item[\"addr_full\"] = store[\"address_text\"].split(\"<br /><br />\")[0]\n yield item\n", "path": "locations/spiders/toolstation.py"}]} | 1,018 | 915 |
gh_patches_debug_6240 | rasdani/github-patches | git_diff | hylang__hy-2554 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
An exemplar Hy program
When you have a new programming language, it really helps—for the purposes of advertising the language, of helping people learn how to use it, and of giving the language a broader and more practical test than unit tests—to have some kind of nontrivial program written in the language. I think I speak for many of us when I say that write a lot of small programs in Hy, as well as a lot of not-so-small programs that are of little general interest (in my case, code for the data analysis of specific scientific studies). What I don't have is a program that is both not small and of general interest.
I propose we consider writing and maintaining an exemplar program. It doesn't have to be kept under the hylang organization and probably shouldn't be considered a responsibility of the Hy core team; it could be one person's passion project, so long as it's free software. It should be something that's useful to end users of some kind, rather than Hy programmers—we want to show something that is done in Hy, not something you can use to write other programs in Hy. It should offer something that doesn't already exist, rather than being a Hy rewrite (or stripped-down equivalent) of an existing program. And it shouldn't be too specialized. A text editor, paint program, or process manager isn't too specialized, whereas a program for managing reservations at a hotel or for designing aircraft is.
One genre of program that fits a lot of these criteria is games. [Rogue TV](https://github.com/kodiologist/rogue-tv) has the potential to be a good exemplar program, although it would need considerable work to get running on the latest Hy, and it's still missing a lot of content elements to be a good game. Also, Rogue TV is arguably too big and complex to be good for this purpose. Ironically, I quit development of it largely to work on Hy itself.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Set both `setup_requires` and `install_requires` with our
4 # dependencies, since we need to compile Hy files during setup. And
5 # put this as the first statement in the file so it's easy to parse
6 # out without executing the file.
7 requires = [
8 "funcparserlib ~= 1.0",
9 'astor>=0.8 ; python_version < "3.9"',
10 ]
11
12 import os
13
14 import fastentrypoints # Monkey-patches setuptools.
15 from get_version import __version__
16 from setuptools import find_packages, setup
17 from setuptools.command.install import install
18
19 os.chdir(os.path.split(os.path.abspath(__file__))[0])
20
21 PKG = "hy"
22
23 long_description = """Hy is a Lisp dialect that's embedded in Python.
24 Since Hy transforms its Lisp code into Python abstract syntax tree (AST)
25 objects, you have the whole beautiful world of Python at your fingertips,
26 in Lisp form."""
27
28
29 class install(install):
30 def run(self):
31 super().run()
32 import py_compile
33
34 import hy # for compile hooks
35
36 for path in set(self.get_outputs()):
37 if path.endswith(".hy"):
38 py_compile.compile(
39 path,
40 invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,
41 )
42
43 setup(
44 name=PKG,
45 version=(
46 None
47 if __version__ == "unknown"
48 else __version__
49 ),
50 setup_requires=["wheel"] + requires,
51 install_requires=requires,
52 python_requires=">= 3.8, < 3.13",
53 entry_points={
54 "console_scripts": [
55 "hy = hy.cmdline:hy_main",
56 "hyc = hy.cmdline:hyc_main",
57 "hy2py = hy.cmdline:hy2py_main"
58 ]
59 },
60 packages=find_packages(exclude=["tests*"]),
61 package_data={
62 "": ["*.hy"],
63 },
64 data_files=[("get_version", ["get_version.py"])],
65 author="Paul Tagliamonte",
66 author_email="[email protected]",
67 long_description=long_description,
68 description="A Lisp dialect embedded in Python",
69 license="Expat",
70 url="http://hylang.org/",
71 platforms=["any"],
72 classifiers=[
73 "Development Status :: 4 - Beta",
74 "Intended Audience :: Developers",
75 "License :: DFSG approved",
76 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
77 "Operating System :: OS Independent",
78 "Programming Language :: Lisp",
79 "Programming Language :: Python",
80 "Programming Language :: Python :: 3",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 "Programming Language :: Python :: 3.11",
85 "Programming Language :: Python :: 3.12",
86 "Programming Language :: Python :: Implementation :: PyPy",
87 "Environment :: WebAssembly :: Emscripten",
88 "Topic :: Software Development :: Code Generators",
89 "Topic :: Software Development :: Compilers",
90 "Topic :: Software Development :: Libraries",
91 ],
92 project_urls={
93 "Documentation": "https://docs.hylang.org/",
94 "Source": "https://github.com/hylang/hy",
95 },
96 cmdclass={
97 "install": install,
98 },
99 )
100
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -75,6 +75,7 @@
"License :: DFSG approved",
"License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
"Operating System :: OS Independent",
+ "Programming Language :: Hy",
"Programming Language :: Lisp",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -75,6 +75,7 @@\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n+ \"Programming Language :: Hy\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n", "issue": "An exemplar Hy program\nWhen you have a new programming language, it really helps\u2014for the purposes of advertising the language, of helping people learn how to use it, and of giving the language a broader and more practical test than unit tests\u2014to have some kind of nontrivial program written in the language. I think I speak for many of us when I say that write a lot of small programs in Hy, as well as a lot of not-so-small programs that are of little general interest (in my case, code for the data analysis of specific scientific studies). What I don't have is a program that is both not small and of general interest.\r\n\r\nI propose we consider writing and maintaining an exemplar program. It doesn't have to be kept under the hylang organization and probably shouldn't be considered a responsibility of the Hy core team; it could be one person's passion project, so long as it's free software. It should be something that's useful to end users of some kind, rather than Hy programmers\u2014we want to show something that is done in Hy, not something you can use to write other programs in Hy. It should offer something that doesn't already exist, rather than being a Hy rewrite (or stripped-down equivalent) of an existing program. And it shouldn't be too specialized. A text editor, paint program, or process manager isn't too specialized, whereas a program for managing reservations at a hotel or for designing aircraft is.\r\n\r\nOne genre of program that fits a lot of these criteria is games. [Rogue TV](https://github.com/kodiologist/rogue-tv) has the potential to be a good exemplar program, although it would need considerable work to get running on the latest Hy, and it's still missing a lot of content elements to be a good game. Also, Rogue TV is arguably too big and complex to be good for this purpose. Ironically, I quit development of it largely to work on Hy itself.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Set both `setup_requires` and `install_requires` with our\n# dependencies, since we need to compile Hy files during setup. And\n# put this as the first statement in the file so it's easy to parse\n# out without executing the file.\nrequires = [\n \"funcparserlib ~= 1.0\",\n 'astor>=0.8 ; python_version < \"3.9\"',\n]\n\nimport os\n\nimport fastentrypoints # Monkey-patches setuptools.\nfrom get_version import __version__\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Lisp dialect that's embedded in Python.\nSince Hy transforms its Lisp code into Python abstract syntax tree (AST)\nobjects, you have the whole beautiful world of Python at your fingertips,\nin Lisp form.\"\"\"\n\n\nclass install(install):\n def run(self):\n super().run()\n import py_compile\n\n import hy # for compile hooks\n\n for path in set(self.get_outputs()):\n if path.endswith(\".hy\"):\n py_compile.compile(\n path,\n invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,\n )\n\nsetup(\n name=PKG,\n version=(\n None\n if __version__ == \"unknown\"\n else __version__\n ),\n setup_requires=[\"wheel\"] + requires,\n install_requires=requires,\n python_requires=\">= 3.8, < 3.13\",\n entry_points={\n \"console_scripts\": [\n \"hy = hy.cmdline:hy_main\",\n \"hyc = hy.cmdline:hyc_main\",\n \"hy2py = hy.cmdline:hy2py_main\"\n ]\n },\n packages=find_packages(exclude=[\"tests*\"]),\n package_data={\n \"\": [\"*.hy\"],\n },\n data_files=[(\"get_version\", [\"get_version.py\"])],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description=\"A Lisp dialect embedded in Python\",\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Environment :: WebAssembly :: Emscripten\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n },\n cmdclass={\n \"install\": install,\n },\n)\n", "path": "setup.py"}]} | 1,853 | 101 |
gh_patches_debug_15849 | rasdani/github-patches | git_diff | iterative__dvc-2693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pull: wrong warning on pulling import stage
Script to reproduce:
```
mkdir test
dvc import https://github.com/iterative/example-get-started model.pkl
git add .
git commit -am "init"
cd ..
git clone test test1
cd test1
dvc pull
```
outputs only:
```
WARNING: DVC-file 'model.pkl.dvc' is locked. Its dependencies are not going to be checked out.
```
I think there should be no warning, especially considering that it has done the job - pulled the `model.pkl`. Like we discussed in #2667 there should be some statistics instead.
</issue>
<code>
[start of dvc/repo/checkout.py]
1 from __future__ import unicode_literals
2
3 import logging
4
5 from dvc.exceptions import CheckoutErrorSuggestGit, CheckoutError
6 from dvc.progress import Tqdm
7
8
9 logger = logging.getLogger(__name__)
10
11
12 def _cleanup_unused_links(self, all_stages):
13 used = [
14 out.fspath
15 for stage in all_stages
16 for out in stage.outs
17 if out.scheme == "local"
18 ]
19 self.state.remove_unused_links(used)
20
21
22 def get_all_files_numbers(stages):
23 return sum(stage.get_all_files_number() for stage in stages)
24
25
26 def _checkout(
27 self, targets=None, with_deps=False, force=False, recursive=False
28 ):
29 from dvc.stage import StageFileDoesNotExistError, StageFileBadNameError
30
31 stages = set()
32 targets = targets or [None]
33 for target in targets:
34 try:
35 new = self.collect(
36 target, with_deps=with_deps, recursive=recursive
37 )
38 stages.update(new)
39 except (StageFileDoesNotExistError, StageFileBadNameError) as exc:
40 if not target:
41 raise
42 raise CheckoutErrorSuggestGit(target, exc)
43
44 _cleanup_unused_links(self, self.stages)
45 total = get_all_files_numbers(stages)
46 if total == 0:
47 logger.info("Nothing to do")
48 failed = []
49 with Tqdm(
50 total=total, unit="file", desc="Checkout", disable=total == 0
51 ) as pbar:
52 for stage in stages:
53 if stage.locked:
54 logger.warning(
55 "DVC-file '{path}' is locked. Its dependencies are"
56 " not going to be checked out.".format(path=stage.relpath)
57 )
58
59 failed.extend(
60 stage.checkout(force=force, progress_callback=pbar.update_desc)
61 )
62 if failed:
63 raise CheckoutError(failed)
64
[end of dvc/repo/checkout.py]
[start of dvc/repo/pull.py]
1 from __future__ import unicode_literals
2
3 from . import locked
4
5
6 @locked
7 def pull(
8 self,
9 targets=None,
10 jobs=None,
11 remote=None,
12 all_branches=False,
13 with_deps=False,
14 all_tags=False,
15 force=False,
16 recursive=False,
17 ):
18 processed_files_count = self._fetch(
19 targets,
20 jobs,
21 remote=remote,
22 all_branches=all_branches,
23 all_tags=all_tags,
24 with_deps=with_deps,
25 recursive=recursive,
26 )
27 self._checkout(
28 targets=targets, with_deps=with_deps, force=force, recursive=recursive
29 )
30 return processed_files_count
31
[end of dvc/repo/pull.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/repo/checkout.py b/dvc/repo/checkout.py
--- a/dvc/repo/checkout.py
+++ b/dvc/repo/checkout.py
@@ -50,12 +50,6 @@
total=total, unit="file", desc="Checkout", disable=total == 0
) as pbar:
for stage in stages:
- if stage.locked:
- logger.warning(
- "DVC-file '{path}' is locked. Its dependencies are"
- " not going to be checked out.".format(path=stage.relpath)
- )
-
failed.extend(
stage.checkout(force=force, progress_callback=pbar.update_desc)
)
diff --git a/dvc/repo/pull.py b/dvc/repo/pull.py
--- a/dvc/repo/pull.py
+++ b/dvc/repo/pull.py
@@ -1,6 +1,11 @@
from __future__ import unicode_literals
-from . import locked
+import logging
+
+from dvc.repo import locked
+
+
+logger = logging.getLogger(__name__)
@locked
| {"golden_diff": "diff --git a/dvc/repo/checkout.py b/dvc/repo/checkout.py\n--- a/dvc/repo/checkout.py\n+++ b/dvc/repo/checkout.py\n@@ -50,12 +50,6 @@\n total=total, unit=\"file\", desc=\"Checkout\", disable=total == 0\n ) as pbar:\n for stage in stages:\n- if stage.locked:\n- logger.warning(\n- \"DVC-file '{path}' is locked. Its dependencies are\"\n- \" not going to be checked out.\".format(path=stage.relpath)\n- )\n-\n failed.extend(\n stage.checkout(force=force, progress_callback=pbar.update_desc)\n )\ndiff --git a/dvc/repo/pull.py b/dvc/repo/pull.py\n--- a/dvc/repo/pull.py\n+++ b/dvc/repo/pull.py\n@@ -1,6 +1,11 @@\n from __future__ import unicode_literals\n \n-from . import locked\n+import logging\n+\n+from dvc.repo import locked\n+\n+\n+logger = logging.getLogger(__name__)\n \n \n @locked\n", "issue": "pull: wrong warning on pulling import stage\nScript to reproduce:\r\n\r\n```\r\nmkdir test\r\ndvc import https://github.com/iterative/example-get-started model.pkl\r\ngit add .\r\ngit commit -am \"init\"\r\ncd ..\r\ngit clone test test1\r\ncd test1\r\ndvc pull\r\n```\r\n\r\noutputs only:\r\n\r\n```\r\nWARNING: DVC-file 'model.pkl.dvc' is locked. Its dependencies are not going to be checked out.\r\n```\r\n\r\nI think there should be no warning, especially considering that it has done the job - pulled the `model.pkl`. Like we discussed in #2667 there should be some statistics instead.\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.exceptions import CheckoutErrorSuggestGit, CheckoutError\nfrom dvc.progress import Tqdm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _cleanup_unused_links(self, all_stages):\n used = [\n out.fspath\n for stage in all_stages\n for out in stage.outs\n if out.scheme == \"local\"\n ]\n self.state.remove_unused_links(used)\n\n\ndef get_all_files_numbers(stages):\n return sum(stage.get_all_files_number() for stage in stages)\n\n\ndef _checkout(\n self, targets=None, with_deps=False, force=False, recursive=False\n):\n from dvc.stage import StageFileDoesNotExistError, StageFileBadNameError\n\n stages = set()\n targets = targets or [None]\n for target in targets:\n try:\n new = self.collect(\n target, with_deps=with_deps, recursive=recursive\n )\n stages.update(new)\n except (StageFileDoesNotExistError, StageFileBadNameError) as exc:\n if not target:\n raise\n raise CheckoutErrorSuggestGit(target, exc)\n\n _cleanup_unused_links(self, self.stages)\n total = get_all_files_numbers(stages)\n if total == 0:\n logger.info(\"Nothing to do\")\n failed = []\n with Tqdm(\n total=total, unit=\"file\", desc=\"Checkout\", disable=total == 0\n ) as pbar:\n for stage in stages:\n if stage.locked:\n logger.warning(\n \"DVC-file '{path}' is locked. Its dependencies are\"\n \" not going to be checked out.\".format(path=stage.relpath)\n )\n\n failed.extend(\n stage.checkout(force=force, progress_callback=pbar.update_desc)\n )\n if failed:\n raise CheckoutError(failed)\n", "path": "dvc/repo/checkout.py"}, {"content": "from __future__ import unicode_literals\n\nfrom . import locked\n\n\n@locked\ndef pull(\n self,\n targets=None,\n jobs=None,\n remote=None,\n all_branches=False,\n with_deps=False,\n all_tags=False,\n force=False,\n recursive=False,\n):\n processed_files_count = self._fetch(\n targets,\n jobs,\n remote=remote,\n all_branches=all_branches,\n all_tags=all_tags,\n with_deps=with_deps,\n recursive=recursive,\n )\n self._checkout(\n targets=targets, with_deps=with_deps, force=force, recursive=recursive\n )\n return processed_files_count\n", "path": "dvc/repo/pull.py"}]} | 1,419 | 247 |
gh_patches_debug_22798 | rasdani/github-patches | git_diff | svthalia__concrexit-3184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow people to delete albums without pushnotifications permissions
### Describe the bug
The paparazcie cannot delete albums because they don't (didn't, I temporarily gave them permissions) have delete permissions on message and scheduledmessage and facedetectionphoto.
### How to reproduce
Steps to reproduce the behaviour:
1. Have delete_album and delete_photo permission but no other delete permissions.
2. Try to delete an album
3. Get error screen telling you you need some more permissions.
### Expected behaviour
Cascade deletes are allowed when deleting an album or photo regardless of permissions on the related items.
### Additional context
The delete permissions are needed only for related models that have a ModelAdmin registered in the admin site. Models without an admin are ignored already by default.
Here the missing permissions are gathered: https://github.com/django/django/blob/7cc138a58f73c17f07cfaf459ef8e7677ac41ac0/django/contrib/admin/utils.py#LL147C8-L149C52.
We can probably drop them in `ModelAdmin.get_deleted_objects`.
With splitting up some models (e.g. BlacklistedThabloidUser, etc.) there may be more admins that need something like this.
</issue>
<code>
[start of website/photos/admin.py]
1 from django.contrib import admin, messages
2 from django.db.models import Count
3 from django.dispatch import Signal
4 from django.utils.translation import gettext_lazy as _
5
6 from django_filepond_widget.fields import FilePondFile
7
8 from .forms import AlbumForm
9 from .models import Album, Like, Photo
10 from .services import extract_archive, save_photo
11
12 album_uploaded = Signal()
13
14
15 @admin.register(Album)
16 class AlbumAdmin(admin.ModelAdmin):
17 """Model for Album admin page."""
18
19 list_display = ("title", "date", "num_photos", "hidden", "shareable")
20 fields = (
21 "title",
22 "slug",
23 "date",
24 "event",
25 "hidden",
26 "shareable",
27 "album_archive",
28 "_cover",
29 )
30 search_fields = ("title", "date")
31 list_filter = ("hidden", "shareable")
32 date_hierarchy = "date"
33 prepopulated_fields = {
34 "slug": (
35 "date",
36 "title",
37 )
38 }
39 form = AlbumForm
40
41 def get_queryset(self, request):
42 """Get Albums and add the amount of photos as an annotation."""
43 return Album.objects.annotate(photos_count=Count("photo"))
44
45 def num_photos(self, obj):
46 """Pretty-print the number of photos."""
47 return obj.photos_count
48
49 num_photos.short_description = _("Number of photos")
50 num_photos.admin_order_field = "photos_count"
51
52 def save_model(self, request, obj, form, change):
53 """Save the new Album by extracting the archive."""
54 super().save_model(request, obj, form, change)
55
56 archive = form.cleaned_data.get("album_archive", None)
57 if archive is not None:
58 try:
59 extract_archive(request, obj, archive)
60 album_uploaded.send(sender=None, album=obj)
61 except Exception as e:
62 raise e
63 finally:
64 if isinstance(archive, FilePondFile):
65 archive.remove()
66
67 messages.add_message(
68 request,
69 messages.WARNING,
70 _("Full-sized photos will not be saved on the Thalia-website."),
71 )
72
73
74 class LikeInline(admin.StackedInline):
75 model = Like
76 extra = 0
77
78
79 @admin.register(Photo)
80 class PhotoAdmin(admin.ModelAdmin):
81 """Model for Photo admin page."""
82
83 list_display = (
84 "__str__",
85 "album",
86 "hidden",
87 "num_likes",
88 )
89 search_fields = ("file",)
90 list_filter = ("album", "hidden")
91 exclude = ("_digest",)
92
93 inlines = [
94 LikeInline,
95 ]
96
97 def save_model(self, request, obj, form, change):
98 """Save new Photo."""
99 super().save_model(request, obj, form, change)
100 if change and obj.original_file == obj.file.name:
101 return
102
103 if save_photo(obj, obj.file, obj.file.name):
104 messages.add_message(
105 request,
106 messages.WARNING,
107 _("Full-sized photos will not be saved on the Thalia-website."),
108 )
109 else:
110 messages.add_message(
111 request, messages.ERROR, _("This photo already exists in the album.")
112 )
113
[end of website/photos/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/photos/admin.py b/website/photos/admin.py
--- a/website/photos/admin.py
+++ b/website/photos/admin.py
@@ -70,6 +70,18 @@
_("Full-sized photos will not be saved on the Thalia-website."),
)
+ def get_deleted_objects(self, objs, request):
+ (
+ deleted_objects,
+ model_count,
+ perms_needed,
+ protected,
+ ) = super().get_deleted_objects(objs, request)
+
+ # Drop any missing delete permissions. If the user has `delete_album` permission,
+ # they should automatically be allowed to cascade e.g. related pushnotifications.
+ return deleted_objects, model_count, set(), protected
+
class LikeInline(admin.StackedInline):
model = Like
@@ -94,6 +106,16 @@
LikeInline,
]
+ def get_deleted_objects(self, objs, request):
+ (
+ deleted_objects,
+ model_count,
+ perms_needed,
+ protected,
+ ) = super().get_deleted_objects(objs, request)
+
+ return deleted_objects, model_count, set(), protected
+
def save_model(self, request, obj, form, change):
"""Save new Photo."""
super().save_model(request, obj, form, change)
| {"golden_diff": "diff --git a/website/photos/admin.py b/website/photos/admin.py\n--- a/website/photos/admin.py\n+++ b/website/photos/admin.py\n@@ -70,6 +70,18 @@\n _(\"Full-sized photos will not be saved on the Thalia-website.\"),\n )\n \n+ def get_deleted_objects(self, objs, request):\n+ (\n+ deleted_objects,\n+ model_count,\n+ perms_needed,\n+ protected,\n+ ) = super().get_deleted_objects(objs, request)\n+\n+ # Drop any missing delete permissions. If the user has `delete_album` permission,\n+ # they should automatically be allowed to cascade e.g. related pushnotifications.\n+ return deleted_objects, model_count, set(), protected\n+\n \n class LikeInline(admin.StackedInline):\n model = Like\n@@ -94,6 +106,16 @@\n LikeInline,\n ]\n \n+ def get_deleted_objects(self, objs, request):\n+ (\n+ deleted_objects,\n+ model_count,\n+ perms_needed,\n+ protected,\n+ ) = super().get_deleted_objects(objs, request)\n+\n+ return deleted_objects, model_count, set(), protected\n+\n def save_model(self, request, obj, form, change):\n \"\"\"Save new Photo.\"\"\"\n super().save_model(request, obj, form, change)\n", "issue": "Allow people to delete albums without pushnotifications permissions\n### Describe the bug\r\nThe paparazcie cannot delete albums because they don't (didn't, I temporarily gave them permissions) have delete permissions on message and scheduledmessage and facedetectionphoto.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Have delete_album and delete_photo permission but no other delete permissions.\r\n2. Try to delete an album\r\n3. Get error screen telling you you need some more permissions.\r\n\r\n### Expected behaviour\r\nCascade deletes are allowed when deleting an album or photo regardless of permissions on the related items.\r\n\r\n### Additional context\r\nThe delete permissions are needed only for related models that have a ModelAdmin registered in the admin site. Models without an admin are ignored already by default.\r\n\r\nHere the missing permissions are gathered: https://github.com/django/django/blob/7cc138a58f73c17f07cfaf459ef8e7677ac41ac0/django/contrib/admin/utils.py#LL147C8-L149C52. \r\n\r\nWe can probably drop them in `ModelAdmin.get_deleted_objects`.\r\n\r\nWith splitting up some models (e.g. BlacklistedThabloidUser, etc.) there may be more admins that need something like this.\n", "before_files": [{"content": "from django.contrib import admin, messages\nfrom django.db.models import Count\nfrom django.dispatch import Signal\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_filepond_widget.fields import FilePondFile\n\nfrom .forms import AlbumForm\nfrom .models import Album, Like, Photo\nfrom .services import extract_archive, save_photo\n\nalbum_uploaded = Signal()\n\n\[email protected](Album)\nclass AlbumAdmin(admin.ModelAdmin):\n \"\"\"Model for Album admin page.\"\"\"\n\n list_display = (\"title\", \"date\", \"num_photos\", \"hidden\", \"shareable\")\n fields = (\n \"title\",\n \"slug\",\n \"date\",\n \"event\",\n \"hidden\",\n \"shareable\",\n \"album_archive\",\n \"_cover\",\n )\n search_fields = (\"title\", \"date\")\n list_filter = (\"hidden\", \"shareable\")\n date_hierarchy = \"date\"\n prepopulated_fields = {\n \"slug\": (\n \"date\",\n \"title\",\n )\n }\n form = AlbumForm\n\n def get_queryset(self, request):\n \"\"\"Get Albums and add the amount of photos as an annotation.\"\"\"\n return Album.objects.annotate(photos_count=Count(\"photo\"))\n\n def num_photos(self, obj):\n \"\"\"Pretty-print the number of photos.\"\"\"\n return obj.photos_count\n\n num_photos.short_description = _(\"Number of photos\")\n num_photos.admin_order_field = \"photos_count\"\n\n def save_model(self, request, obj, form, change):\n \"\"\"Save the new Album by extracting the archive.\"\"\"\n super().save_model(request, obj, form, change)\n\n archive = form.cleaned_data.get(\"album_archive\", None)\n if archive is not None:\n try:\n extract_archive(request, obj, archive)\n album_uploaded.send(sender=None, album=obj)\n except Exception as e:\n raise e\n finally:\n if isinstance(archive, FilePondFile):\n archive.remove()\n\n messages.add_message(\n request,\n messages.WARNING,\n _(\"Full-sized photos will not be saved on the Thalia-website.\"),\n )\n\n\nclass LikeInline(admin.StackedInline):\n model = Like\n extra = 0\n\n\[email protected](Photo)\nclass PhotoAdmin(admin.ModelAdmin):\n \"\"\"Model for Photo admin page.\"\"\"\n\n list_display = (\n \"__str__\",\n \"album\",\n \"hidden\",\n \"num_likes\",\n )\n search_fields = (\"file\",)\n list_filter = (\"album\", \"hidden\")\n exclude = (\"_digest\",)\n\n inlines = [\n LikeInline,\n ]\n\n def save_model(self, request, obj, form, change):\n \"\"\"Save new Photo.\"\"\"\n super().save_model(request, obj, form, change)\n if change and obj.original_file == obj.file.name:\n return\n\n if save_photo(obj, obj.file, obj.file.name):\n messages.add_message(\n request,\n messages.WARNING,\n _(\"Full-sized photos will not be saved on the Thalia-website.\"),\n )\n else:\n messages.add_message(\n request, messages.ERROR, _(\"This photo already exists in the album.\")\n )\n", "path": "website/photos/admin.py"}]} | 1,701 | 295 |
gh_patches_debug_23986 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-4724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FollowPath] position_update log frequency
position_update log entries for FollowPath appear too often, like every 1.5 seconds, with distance changes from 1 meter to 5 meter depend on the random speed. It's kind of unnecessarily spam the terminal.
An interval of 5 seconds or 10 meter should be more relevant.
</issue>
<code>
[start of pokemongo_bot/base_task.py]
1 import logging
2
3
4 class BaseTask(object):
5 TASK_API_VERSION = 1
6
7 def __init__(self, bot, config):
8 """
9
10 :param bot:
11 :type bot: pokemongo_bot.PokemonGoBot
12 :param config:
13 :return:
14 """
15 self.bot = bot
16 self.config = config
17 self._validate_work_exists()
18 self.logger = logging.getLogger(type(self).__name__)
19 self.enabled = config.get('enabled', True)
20 self.initialize()
21
22 def _validate_work_exists(self):
23 method = getattr(self, 'work', None)
24 if not method or not callable(method):
25 raise NotImplementedError('Missing "work" method')
26
27 def emit_event(self, event, sender=None, level='info', formatted='', data={}):
28 if not sender:
29 sender=self
30 self.bot.event_manager.emit(
31 event,
32 sender=sender,
33 level=level,
34 formatted=formatted,
35 data=data
36 )
37
38 def initialize(self):
39 pass
40
[end of pokemongo_bot/base_task.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/base_task.py b/pokemongo_bot/base_task.py
--- a/pokemongo_bot/base_task.py
+++ b/pokemongo_bot/base_task.py
@@ -1,5 +1,7 @@
import logging
+import time
+
class BaseTask(object):
TASK_API_VERSION = 1
@@ -17,6 +19,7 @@
self._validate_work_exists()
self.logger = logging.getLogger(type(self).__name__)
self.enabled = config.get('enabled', True)
+ self.last_log_time = time.time()
self.initialize()
def _validate_work_exists(self):
@@ -27,13 +30,17 @@
def emit_event(self, event, sender=None, level='info', formatted='', data={}):
if not sender:
sender=self
- self.bot.event_manager.emit(
- event,
- sender=sender,
- level=level,
- formatted=formatted,
- data=data
- )
+
+ # Print log only if X seconds are passed from last log
+ if (time.time() - self.last_log_time) > self.config.get('log_interval', 0):
+ self.last_log_time = time.time()
+ self.bot.event_manager.emit(
+ event,
+ sender=sender,
+ level=level,
+ formatted=formatted,
+ data=data
+ )
def initialize(self):
pass
| {"golden_diff": "diff --git a/pokemongo_bot/base_task.py b/pokemongo_bot/base_task.py\n--- a/pokemongo_bot/base_task.py\n+++ b/pokemongo_bot/base_task.py\n@@ -1,5 +1,7 @@\n import logging\n \n+import time\n+\n \n class BaseTask(object):\n TASK_API_VERSION = 1\n@@ -17,6 +19,7 @@\n self._validate_work_exists()\n self.logger = logging.getLogger(type(self).__name__)\n self.enabled = config.get('enabled', True)\n+ self.last_log_time = time.time()\n self.initialize()\n \n def _validate_work_exists(self):\n@@ -27,13 +30,17 @@\n def emit_event(self, event, sender=None, level='info', formatted='', data={}):\n if not sender:\n sender=self\n- self.bot.event_manager.emit(\n- event,\n- sender=sender,\n- level=level,\n- formatted=formatted,\n- data=data\n- )\n+\n+ # Print log only if X seconds are passed from last log\n+ if (time.time() - self.last_log_time) > self.config.get('log_interval', 0):\n+ self.last_log_time = time.time()\n+ self.bot.event_manager.emit(\n+ event,\n+ sender=sender,\n+ level=level,\n+ formatted=formatted,\n+ data=data\n+ )\n \n def initialize(self):\n pass\n", "issue": "[FollowPath] position_update log frequency\nposition_update log entries for FollowPath appear too often, like every 1.5 seconds, with distance changes from 1 meter to 5 meter depend on the random speed. It's kind of unnecessarily spam the terminal.\n\nAn interval of 5 seconds or 10 meter should be more relevant.\n\n", "before_files": [{"content": "import logging\n\n\nclass BaseTask(object):\n TASK_API_VERSION = 1\n\n def __init__(self, bot, config):\n \"\"\"\n\n :param bot:\n :type bot: pokemongo_bot.PokemonGoBot\n :param config:\n :return:\n \"\"\"\n self.bot = bot\n self.config = config\n self._validate_work_exists()\n self.logger = logging.getLogger(type(self).__name__)\n self.enabled = config.get('enabled', True)\n self.initialize()\n\n def _validate_work_exists(self):\n method = getattr(self, 'work', None)\n if not method or not callable(method):\n raise NotImplementedError('Missing \"work\" method')\n\n def emit_event(self, event, sender=None, level='info', formatted='', data={}):\n if not sender:\n sender=self\n self.bot.event_manager.emit(\n event,\n sender=sender,\n level=level,\n formatted=formatted,\n data=data\n )\n\n def initialize(self):\n pass\n", "path": "pokemongo_bot/base_task.py"}]} | 898 | 319 |
gh_patches_debug_3090 | rasdani/github-patches | git_diff | docker__docker-py-1671 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue with port option in 2.4.0 version
Hi,
I update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) :
`ports:
- "127.0.0.1:9292:9090"`
I got the following error:
`
ERROR: for ContainerName expected string or buffer
Traceback (most recent call last):
File "/usr/local/bin/docker-compose", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 68, in main
command()
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 118, in perform_command
handler(command, command_options)
File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 926, in up
scale_override=parse_scale_args(options['--scale']),
File "/usr/local/lib/python2.7/dist-packages/compose/project.py", line 424, in up
get_deps
File "/usr/local/lib/python2.7/dist-packages/compose/parallel.py", line 69, in parallel_execute
raise error_to_reraise
TypeError: expected string or buffer
`
I have no issue when i downgrade again to the 2.3 version of the package
To reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):
```
version: '2'
services :
ContainerName:
image: bae2d441e03a
ports:
- "127.0.0.1:9292:9090"
```
I run on Ubuntu 14.04.5 LTS with the following package:
```
docker==2.4.0
docker-compose==1.14.0
docker-pycreds==0.2.1
dockerpty==0.4.1
Python 2.7.6
Client:
Version: 17.05.0-ce
API version: 1.29
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Server:
Version: 17.05.0-ce
API version: 1.29 (minimum version 1.12)
Go version: go1.7.5
Git commit: 89658be
Built: Thu May 4 22:06:06 2017
OS/Arch: linux/amd64
Experimental: false
```
</issue>
<code>
[start of docker/utils/ports.py]
1 import re
2
3 PORT_SPEC = re.compile(
4 "^" # Match full string
5 "(" # External part
6 "((?P<host>[a-fA-F\d.:]+):)?" # Address
7 "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
8 ")?"
9 "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
10 "(?P<proto>/(udp|tcp))?" # Protocol
11 "$" # Match full string
12 )
13
14
15 def add_port_mapping(port_bindings, internal_port, external):
16 if internal_port in port_bindings:
17 port_bindings[internal_port].append(external)
18 else:
19 port_bindings[internal_port] = [external]
20
21
22 def add_port(port_bindings, internal_port_range, external_range):
23 if external_range is None:
24 for internal_port in internal_port_range:
25 add_port_mapping(port_bindings, internal_port, None)
26 else:
27 ports = zip(internal_port_range, external_range)
28 for internal_port, external_port in ports:
29 add_port_mapping(port_bindings, internal_port, external_port)
30
31
32 def build_port_bindings(ports):
33 port_bindings = {}
34 for port in ports:
35 internal_port_range, external_range = split_port(port)
36 add_port(port_bindings, internal_port_range, external_range)
37 return port_bindings
38
39
40 def _raise_invalid_port(port):
41 raise ValueError('Invalid port "%s", should be '
42 '[[remote_ip:]remote_port[-remote_port]:]'
43 'port[/protocol]' % port)
44
45
46 def port_range(start, end, proto, randomly_available_port=False):
47 if not start:
48 return start
49 if not end:
50 return [start + proto]
51 if randomly_available_port:
52 return ['{}-{}'.format(start, end) + proto]
53 return [str(port) + proto for port in range(int(start), int(end) + 1)]
54
55
56 def split_port(port):
57 port = str(port)
58 match = PORT_SPEC.match(port)
59 if match is None:
60 _raise_invalid_port(port)
61 parts = match.groupdict()
62
63 host = parts['host']
64 proto = parts['proto'] or ''
65 internal = port_range(parts['int'], parts['int_end'], proto)
66 external = port_range(
67 parts['ext'], parts['ext_end'], '', len(internal) == 1)
68
69 if host is None:
70 if external is not None and len(internal) != len(external):
71 raise ValueError('Port ranges don\'t match in length')
72 return internal, external
73 else:
74 if not external:
75 external = [None] * len(internal)
76 elif len(internal) != len(external):
77 raise ValueError('Port ranges don\'t match in length')
78 return internal, [(host, ext_port) for ext_port in external]
79
[end of docker/utils/ports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/ports.py b/docker/utils/ports.py
--- a/docker/utils/ports.py
+++ b/docker/utils/ports.py
@@ -54,6 +54,11 @@
def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
| {"golden_diff": "diff --git a/docker/utils/ports.py b/docker/utils/ports.py\n--- a/docker/utils/ports.py\n+++ b/docker/utils/ports.py\n@@ -54,6 +54,11 @@\n \n \n def split_port(port):\n+ if hasattr(port, 'legacy_repr'):\n+ # This is the worst hack, but it prevents a bug in Compose 1.14.0\n+ # https://github.com/docker/docker-py/issues/1668\n+ # TODO: remove once fixed in Compose stable\n+ port = port.legacy_repr()\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n", "issue": "Issue with port option in 2.4.0 version\nHi,\r\nI update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) : \r\n`ports:\r\n - \"127.0.0.1:9292:9090\"`\r\n\r\nI got the following error:\r\n\r\n`\r\nERROR: for ContainerName expected string or buffer\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/docker-compose\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 68, in main\r\n command()\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 118, in perform_command\r\n handler(command, command_options)\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 926, in up\r\n scale_override=parse_scale_args(options['--scale']),\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/project.py\", line 424, in up\r\n get_deps\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/parallel.py\", line 69, in parallel_execute\r\n raise error_to_reraise\r\nTypeError: expected string or buffer\r\n`\r\n\r\nI have no issue when i downgrade again to the 2.3 version of the package\r\n\r\nTo reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):\r\n```\r\nversion: '2'\r\n\r\nservices :\r\n ContainerName:\r\n image: bae2d441e03a\r\n ports:\r\n - \"127.0.0.1:9292:9090\"\r\n```\r\n\r\nI run on Ubuntu 14.04.5 LTS with the following package:\r\n```\r\ndocker==2.4.0\r\ndocker-compose==1.14.0\r\ndocker-pycreds==0.2.1\r\ndockerpty==0.4.1\r\nPython 2.7.6\r\nClient:\r\n Version: 17.05.0-ce\r\n API version: 1.29\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Version: 17.05.0-ce\r\n API version: 1.29 (minimum version 1.12)\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n", "path": "docker/utils/ports.py"}]} | 1,957 | 147 |
gh_patches_debug_20319 | rasdani/github-patches | git_diff | pantsbuild__pants-12885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`DigestEntries` returns a `Directory` instead of an empty vector for non-matching digest
</issue>
<code>
[start of src/python/pants/jvm/util_rules.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7
8 from pants.engine.fs import Digest, DigestEntries, DigestSubset, FileDigest, FileEntry, PathGlobs
9 from pants.engine.rules import Get, collect_rules, rule
10
11
12 @dataclass(frozen=True)
13 class ExtractFileDigest:
14 digest: Digest
15 file_path: str
16
17
18 @rule
19 async def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:
20 digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))
21 files_or_directories = await Get(DigestEntries, Digest, digest)
22 digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]
23
24 if len(digest_entries) == 0:
25 raise Exception(f"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.")
26 elif len(digest_entries) > 1:
27 raise Exception(
28 f"ExtractFileDigest: Unexpected error: '{request.file_path}' found multiple times in {request.digest}"
29 )
30
31 file_info = digest_entries[0]
32 return file_info.file_digest
33
34
35 def rules():
36 return [*collect_rules()]
37
[end of src/python/pants/jvm/util_rules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/jvm/util_rules.py b/src/python/pants/jvm/util_rules.py
--- a/src/python/pants/jvm/util_rules.py
+++ b/src/python/pants/jvm/util_rules.py
@@ -18,8 +18,7 @@
@rule
async def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:
digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))
- files_or_directories = await Get(DigestEntries, Digest, digest)
- digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]
+ digest_entries = await Get(DigestEntries, Digest, digest)
if len(digest_entries) == 0:
raise Exception(f"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.")
@@ -29,6 +28,12 @@
)
file_info = digest_entries[0]
+
+ if not isinstance(file_info, FileEntry):
+ raise AssertionError(
+ f"Unexpected error: '{request.file_path}' refers to a directory, not a file."
+ )
+
return file_info.file_digest
| {"golden_diff": "diff --git a/src/python/pants/jvm/util_rules.py b/src/python/pants/jvm/util_rules.py\n--- a/src/python/pants/jvm/util_rules.py\n+++ b/src/python/pants/jvm/util_rules.py\n@@ -18,8 +18,7 @@\n @rule\n async def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:\n digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))\n- files_or_directories = await Get(DigestEntries, Digest, digest)\n- digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]\n+ digest_entries = await Get(DigestEntries, Digest, digest)\n \n if len(digest_entries) == 0:\n raise Exception(f\"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.\")\n@@ -29,6 +28,12 @@\n )\n \n file_info = digest_entries[0]\n+\n+ if not isinstance(file_info, FileEntry):\n+ raise AssertionError(\n+ f\"Unexpected error: '{request.file_path}' refers to a directory, not a file.\"\n+ )\n+\n return file_info.file_digest\n", "issue": "`DigestEntries` returns a `Directory` instead of an empty vector for non-matching digest\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pants.engine.fs import Digest, DigestEntries, DigestSubset, FileDigest, FileEntry, PathGlobs\nfrom pants.engine.rules import Get, collect_rules, rule\n\n\n@dataclass(frozen=True)\nclass ExtractFileDigest:\n digest: Digest\n file_path: str\n\n\n@rule\nasync def digest_to_file_digest(request: ExtractFileDigest) -> FileDigest:\n digest = await Get(Digest, DigestSubset(request.digest, PathGlobs([request.file_path])))\n files_or_directories = await Get(DigestEntries, Digest, digest)\n digest_entries = [entry for entry in files_or_directories if isinstance(entry, FileEntry)]\n\n if len(digest_entries) == 0:\n raise Exception(f\"ExtractFileDigest: '{request.file_path}' not found in {request.digest}.\")\n elif len(digest_entries) > 1:\n raise Exception(\n f\"ExtractFileDigest: Unexpected error: '{request.file_path}' found multiple times in {request.digest}\"\n )\n\n file_info = digest_entries[0]\n return file_info.file_digest\n\n\ndef rules():\n return [*collect_rules()]\n", "path": "src/python/pants/jvm/util_rules.py"}]} | 917 | 262 |
gh_patches_debug_29212 | rasdani/github-patches | git_diff | NVIDIA-Merlin__NVTabular-1414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEA] Simplify AddMetadata Tag for NVTabular
**Is your feature request related to a problem? Please describe.**
Currently, we provide the functionality for the user to tag columns with the operator `AddMetadata`.
The use case is that users will use the operator mainly for adding tags.
Should we provide a wrapper called `AddTag` or `TagAs` to simplify the AddMetadata operator?
Should we provide multiple wrappers for common tags - e.g.
`TagAsUserID()`, `TagAsItemID()`, `TagAsUserFeatures()`, `TagAsItemFeatures()`, etc.
</issue>
<code>
[start of nvtabular/ops/add_metadata.py]
1 #
2 # Copyright (c) 2021, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 from nvtabular.dispatch import DataFrameType
17
18 from .operator import ColumnSelector, Operator
19
20
21 class AddMetadata(Operator):
22 """
23 This operator will add user defined tags and properties
24 to a Schema.
25 """
26
27 def __init__(self, tags=None, properties=None):
28 super().__init__()
29 self.tags = tags or []
30 self.properties = properties or {}
31
32 def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
33 return df
34
35 @property
36 def output_tags(self):
37 return self.tags
38
39 @property
40 def output_properties(self):
41 return self.properties
42
[end of nvtabular/ops/add_metadata.py]
[start of nvtabular/ops/__init__.py]
1 #
2 # Copyright (c) 2021, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 # alias submodules here to avoid breaking everything with moving to submodules
18 # flake8: noqa
19 from .add_metadata import AddMetadata
20 from .bucketize import Bucketize
21 from .categorify import Categorify, get_embedding_sizes
22 from .clip import Clip
23 from .column_similarity import ColumnSimilarity
24 from .data_stats import DataStats
25 from .difference_lag import DifferenceLag
26 from .drop_low_cardinality import DropLowCardinality
27 from .dropna import Dropna
28 from .fill import FillMedian, FillMissing
29 from .filter import Filter
30 from .groupby import Groupby
31 from .hash_bucket import HashBucket
32 from .hashed_cross import HashedCross
33 from .join_external import JoinExternal
34 from .join_groupby import JoinGroupby
35 from .lambdaop import LambdaOp
36 from .list_slice import ListSlice
37 from .logop import LogOp
38 from .normalize import Normalize, NormalizeMinMax
39 from .operator import ColumnSelector, Operator
40 from .reduce_dtype_size import ReduceDtypeSize
41 from .rename import Rename
42 from .stat_operator import StatOperator
43 from .target_encoding import TargetEncoding
44 from .value_counts import ValueCount
45
[end of nvtabular/ops/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py
--- a/nvtabular/ops/__init__.py
+++ b/nvtabular/ops/__init__.py
@@ -16,7 +16,15 @@
# alias submodules here to avoid breaking everything with moving to submodules
# flake8: noqa
-from .add_metadata import AddMetadata
+from .add_metadata import (
+ AddMetadata,
+ AddProperties,
+ AddTags,
+ TagAsItemFeatures,
+ TagAsItemID,
+ TagAsUserFeatures,
+ TagAsUserID,
+)
from .bucketize import Bucketize
from .categorify import Categorify, get_embedding_sizes
from .clip import Clip
diff --git a/nvtabular/ops/add_metadata.py b/nvtabular/ops/add_metadata.py
--- a/nvtabular/ops/add_metadata.py
+++ b/nvtabular/ops/add_metadata.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+from merlin.schema.tags import Tags
from nvtabular.dispatch import DataFrameType
from .operator import ColumnSelector, Operator
@@ -39,3 +40,38 @@
@property
def output_properties(self):
return self.properties
+
+
+class AddTags(AddMetadata):
+ def __init__(self, tags=None):
+ super().__init__(tags=tags)
+
+
+class AddProperties(AddMetadata):
+ def __init__(self, properties=None):
+ super().__init__(properties=properties)
+
+
+# Wrappers for common features
+class TagAsUserID(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.USER_ID]
+
+
+class TagAsItemID(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.ITEM_ID]
+
+
+class TagAsUserFeatures(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.USER]
+
+
+class TagAsItemFeatures(Operator):
+ @property
+ def output_tags(self):
+ return [Tags.ITEM]
| {"golden_diff": "diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py\n--- a/nvtabular/ops/__init__.py\n+++ b/nvtabular/ops/__init__.py\n@@ -16,7 +16,15 @@\n \n # alias submodules here to avoid breaking everything with moving to submodules\n # flake8: noqa\n-from .add_metadata import AddMetadata\n+from .add_metadata import (\n+ AddMetadata,\n+ AddProperties,\n+ AddTags,\n+ TagAsItemFeatures,\n+ TagAsItemID,\n+ TagAsUserFeatures,\n+ TagAsUserID,\n+)\n from .bucketize import Bucketize\n from .categorify import Categorify, get_embedding_sizes\n from .clip import Clip\ndiff --git a/nvtabular/ops/add_metadata.py b/nvtabular/ops/add_metadata.py\n--- a/nvtabular/ops/add_metadata.py\n+++ b/nvtabular/ops/add_metadata.py\n@@ -13,6 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n #\n+from merlin.schema.tags import Tags\n from nvtabular.dispatch import DataFrameType\n \n from .operator import ColumnSelector, Operator\n@@ -39,3 +40,38 @@\n @property\n def output_properties(self):\n return self.properties\n+\n+\n+class AddTags(AddMetadata):\n+ def __init__(self, tags=None):\n+ super().__init__(tags=tags)\n+\n+\n+class AddProperties(AddMetadata):\n+ def __init__(self, properties=None):\n+ super().__init__(properties=properties)\n+\n+\n+# Wrappers for common features\n+class TagAsUserID(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.USER_ID]\n+\n+\n+class TagAsItemID(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.ITEM_ID]\n+\n+\n+class TagAsUserFeatures(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.USER]\n+\n+\n+class TagAsItemFeatures(Operator):\n+ @property\n+ def output_tags(self):\n+ return [Tags.ITEM]\n", "issue": "[FEA] Simplify AddMetadata Tag for NVTabular\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, we provide the functionality for the user to tag columns with the operator `AddMetadata`.\r\nThe use case is that users will use the operator mainly for adding tags. \r\n\r\nShould we provide a wrapper called `AddTag` or `TagAs` to simplify the AddMetadata operator?\r\nShould we provide multiple wrappers for common tags - e.g.\r\n\r\n`TagAsUserID()`, `TagAsItemID()`, `TagAsUserFeatures()`, `TagAsItemFeatures()`, etc.\r\n\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom nvtabular.dispatch import DataFrameType\n\nfrom .operator import ColumnSelector, Operator\n\n\nclass AddMetadata(Operator):\n \"\"\"\n This operator will add user defined tags and properties\n to a Schema.\n \"\"\"\n\n def __init__(self, tags=None, properties=None):\n super().__init__()\n self.tags = tags or []\n self.properties = properties or {}\n\n def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:\n return df\n\n @property\n def output_tags(self):\n return self.tags\n\n @property\n def output_properties(self):\n return self.properties\n", "path": "nvtabular/ops/add_metadata.py"}, {"content": "#\n# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# alias submodules here to avoid breaking everything with moving to submodules\n# flake8: noqa\nfrom .add_metadata import AddMetadata\nfrom .bucketize import Bucketize\nfrom .categorify import Categorify, get_embedding_sizes\nfrom .clip import Clip\nfrom .column_similarity import ColumnSimilarity\nfrom .data_stats import DataStats\nfrom .difference_lag import DifferenceLag\nfrom .drop_low_cardinality import DropLowCardinality\nfrom .dropna import Dropna\nfrom .fill import FillMedian, FillMissing\nfrom .filter import Filter\nfrom .groupby import Groupby\nfrom .hash_bucket import HashBucket\nfrom .hashed_cross import HashedCross\nfrom .join_external import JoinExternal\nfrom .join_groupby import JoinGroupby\nfrom .lambdaop import LambdaOp\nfrom .list_slice import ListSlice\nfrom .logop import LogOp\nfrom .normalize import Normalize, NormalizeMinMax\nfrom .operator import ColumnSelector, Operator\nfrom .reduce_dtype_size import ReduceDtypeSize\nfrom .rename import Rename\nfrom .stat_operator import StatOperator\nfrom .target_encoding import TargetEncoding\nfrom .value_counts import ValueCount\n", "path": "nvtabular/ops/__init__.py"}]} | 1,509 | 500 |
gh_patches_debug_17762 | rasdani/github-patches | git_diff | pytorch__TensorRT-2505 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`aten.arange.start_step`
</issue>
<code>
[start of py/torch_tensorrt/dynamo/conversion/ops_evaluators.py]
1 import logging
2 import operator
3 from typing import Dict, Sequence, Tuple, Union
4
5 import torch
6 from torch.fx.node import Argument, Node, Target
7 from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
8 from torch_tensorrt.dynamo.conversion._ConverterRegistry import (
9 ConverterRegistry,
10 dynamo_tensorrt_converter,
11 )
12 from torch_tensorrt.fx.types import TRTTensor
13
14 _LOGGER: logging.Logger = logging.getLogger(__name__)
15
16
17 def getitem_validator(getitem_node: Node) -> bool:
18 from torch_tensorrt.dynamo.conversion._ConverterRegistry import DYNAMO_CONVERTERS
19
20 # Getitem nodes can only be converted if their parent node also can
21 return getitem_node.args[0] in DYNAMO_CONVERTERS
22
23
24 # TODO: Subsequent evaluators should be registered here with their own validators
25 @dynamo_tensorrt_converter(operator.getitem, capability_validator=getitem_validator)
26 @dynamo_tensorrt_converter(torch.ops.aten.detach.default)
27 def generic_evaluator(
28 ctx: ConversionContext,
29 target: Target,
30 args: Tuple[Argument, ...],
31 kwargs: Dict[str, Argument],
32 name: str,
33 ) -> Union[TRTTensor, Sequence[TRTTensor]]:
34 _LOGGER.debug(
35 f"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}"
36 )
37 return target(*args)
38
[end of py/torch_tensorrt/dynamo/conversion/ops_evaluators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py
--- a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py
+++ b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py
@@ -2,6 +2,7 @@
import operator
from typing import Dict, Sequence, Tuple, Union
+import numpy as np
import torch
from torch.fx.node import Argument, Node, Target
from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
@@ -35,3 +36,14 @@
f"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}"
)
return target(*args)
+
+
+@dynamo_tensorrt_converter(torch.ops.aten.arange.start_step)
+def aten_ops_arange_start_step(
+ ctx: ConversionContext,
+ target: Target,
+ args: Tuple[Argument, ...],
+ kwargs: Dict[str, Argument],
+ name: str,
+) -> Union[TRTTensor, Sequence[TRTTensor]]:
+ return np.arange(*args)
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py\n--- a/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py\n+++ b/py/torch_tensorrt/dynamo/conversion/ops_evaluators.py\n@@ -2,6 +2,7 @@\n import operator\n from typing import Dict, Sequence, Tuple, Union\n \n+import numpy as np\n import torch\n from torch.fx.node import Argument, Node, Target\n from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext\n@@ -35,3 +36,14 @@\n f\"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}\"\n )\n return target(*args)\n+\n+\n+@dynamo_tensorrt_converter(torch.ops.aten.arange.start_step)\n+def aten_ops_arange_start_step(\n+ ctx: ConversionContext,\n+ target: Target,\n+ args: Tuple[Argument, ...],\n+ kwargs: Dict[str, Argument],\n+ name: str,\n+) -> Union[TRTTensor, Sequence[TRTTensor]]:\n+ return np.arange(*args)\n", "issue": "`aten.arange.start_step`\n\n", "before_files": [{"content": "import logging\nimport operator\nfrom typing import Dict, Sequence, Tuple, Union\n\nimport torch\nfrom torch.fx.node import Argument, Node, Target\nfrom torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext\nfrom torch_tensorrt.dynamo.conversion._ConverterRegistry import (\n ConverterRegistry,\n dynamo_tensorrt_converter,\n)\nfrom torch_tensorrt.fx.types import TRTTensor\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\ndef getitem_validator(getitem_node: Node) -> bool:\n from torch_tensorrt.dynamo.conversion._ConverterRegistry import DYNAMO_CONVERTERS\n\n # Getitem nodes can only be converted if their parent node also can\n return getitem_node.args[0] in DYNAMO_CONVERTERS\n\n\n# TODO: Subsequent evaluators should be registered here with their own validators\n@dynamo_tensorrt_converter(operator.getitem, capability_validator=getitem_validator)\n@dynamo_tensorrt_converter(torch.ops.aten.detach.default)\ndef generic_evaluator(\n ctx: ConversionContext,\n target: Target,\n args: Tuple[Argument, ...],\n kwargs: Dict[str, Argument],\n name: str,\n) -> Union[TRTTensor, Sequence[TRTTensor]]:\n _LOGGER.debug(\n f\"Evaluating {ConverterRegistry.qualified_name_or_str(target)} on object with name: {name}\"\n )\n return target(*args)\n", "path": "py/torch_tensorrt/dynamo/conversion/ops_evaluators.py"}]} | 927 | 263 |
gh_patches_debug_7041 | rasdani/github-patches | git_diff | mozilla__bugbug-2806 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change needsdiagnosis model to consider moved milestone as `needsdiagnosis = True`
We have observed that needsdiagnosis model classifies certain issues that potentially need diagnosis as `needsdiagnosis = False`. While this is expected, I think it might be getting worse, as the issues data is unbalanced and has much more data points for `needsdiagnosis = False`. We've started a discussion in https://github.com/mozilla/webcompat-team-okrs/issues/256
It's worth mentioning that in the [recent sample of 22 issues](https://docs.google.com/spreadsheets/d/1F9vcSpLQ_hNBeZinsytGXlfXpJLW6vh7C0BJYtd9hIY/edit?pli=1#gid=1640243023) most of the issues that looked like false negatives didn't end up needing diagnosis (not reproducible, out of the scope of the project, etc.), so they're true negatives (prediction was correct for 21 of them). We'll continue tracking them to get a more representative sample.
As an example, issues that looked like they need diagnosis, but in the end, they didn't (prediction was correct):
https://github.com/webcompat/web-bugs/issues/100746
https://github.com/webcompat/web-bugs/issues/100676
https://github.com/webcompat/web-bugs/issues/100687
Issues that are false negatives (prediction was incorrect):
https://github.com/webcompat/web-bugs/issues/100495
https://github.com/webcompat/web-bugs/issues/100645
I was thinking of including an additional set of issues that will contribute to `needsdiagnosis = True` pool. We have recently added a `moved` [milestone](https://github.com/webcompat/web-bugs/issues?q=is%3Aissue+milestone%3Amoved+is%3Aclosed). These issues often don't need diagnosis and are moved to bugzilla or elsewhere, but their content should be contributed to `needsdiagnosis = True` rather than false.
</issue>
<code>
[start of bugbug/models/needsdiagnosis.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7
8 import xgboost
9 from sklearn.compose import ColumnTransformer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import feature_cleanup, issue_features, utils
13 from bugbug.model import IssueModel
14
15 logger = logging.getLogger(__name__)
16
17
18 class NeedsDiagnosisModel(IssueModel):
19 def __init__(self, lemmatization=False):
20 IssueModel.__init__(
21 self, owner="webcompat", repo="web-bugs", lemmatization=lemmatization
22 )
23
24 self.calculate_importance = False
25
26 feature_extractors = []
27
28 cleanup_functions = [
29 feature_cleanup.fileref(),
30 feature_cleanup.url(),
31 feature_cleanup.synonyms(),
32 ]
33
34 self.extraction_pipeline = Pipeline(
35 [
36 (
37 "issue_extractor",
38 issue_features.IssueExtractor(
39 feature_extractors, cleanup_functions, rollback=True
40 ),
41 ),
42 (
43 "union",
44 ColumnTransformer(
45 [
46 ("title", self.text_vectorizer(min_df=0.0001), "title"),
47 (
48 "first_comment",
49 self.text_vectorizer(min_df=0.0001),
50 "first_comment",
51 ),
52 ]
53 ),
54 ),
55 ]
56 )
57
58 self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())
59 self.clf.set_params(predictor="cpu_predictor")
60
61 def get_labels(self):
62 classes = {}
63
64 for issue in self.github.get_issues():
65 # Skip issues with empty title or body
66 if issue["title"] is None or issue["body"] is None:
67 continue
68
69 # Skip issues that are not moderated yet as they don't have a meaningful title or body
70 if issue["title"] == "In the moderation queue.":
71 continue
72
73 for event in issue["events"]:
74 if (
75 event["event"] == "milestoned"
76 and event["milestone"]["title"] == "needsdiagnosis"
77 ):
78 classes[issue["number"]] = 0
79
80 if issue["number"] not in classes:
81 classes[issue["number"]] = 1
82
83 logger.info(
84 f"{sum(1 for label in classes.values() if label == 1)} issues have not been moved to needsdiagnosis"
85 )
86 logger.info(
87 f"{sum(1 for label in classes.values() if label == 0)} issues have been moved to needsdiagnosis"
88 )
89
90 return classes, [0, 1]
91
92 def get_feature_names(self):
93 return self.extraction_pipeline.named_steps["union"].get_feature_names()
94
[end of bugbug/models/needsdiagnosis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/needsdiagnosis.py b/bugbug/models/needsdiagnosis.py
--- a/bugbug/models/needsdiagnosis.py
+++ b/bugbug/models/needsdiagnosis.py
@@ -71,9 +71,9 @@
continue
for event in issue["events"]:
- if (
- event["event"] == "milestoned"
- and event["milestone"]["title"] == "needsdiagnosis"
+ if event["event"] == "milestoned" and (
+ event["milestone"]["title"] == "needsdiagnosis"
+ or event["milestone"]["title"] == "moved"
):
classes[issue["number"]] = 0
| {"golden_diff": "diff --git a/bugbug/models/needsdiagnosis.py b/bugbug/models/needsdiagnosis.py\n--- a/bugbug/models/needsdiagnosis.py\n+++ b/bugbug/models/needsdiagnosis.py\n@@ -71,9 +71,9 @@\n continue\n \n for event in issue[\"events\"]:\n- if (\n- event[\"event\"] == \"milestoned\"\n- and event[\"milestone\"][\"title\"] == \"needsdiagnosis\"\n+ if event[\"event\"] == \"milestoned\" and (\n+ event[\"milestone\"][\"title\"] == \"needsdiagnosis\"\n+ or event[\"milestone\"][\"title\"] == \"moved\"\n ):\n classes[issue[\"number\"]] = 0\n", "issue": "Change needsdiagnosis model to consider moved milestone as `needsdiagnosis = True`\nWe have observed that needsdiagnosis model classifies certain issues that potentially need diagnosis as `needsdiagnosis = False`. While this is expected, I think it might be getting worse, as the issues data is unbalanced and has much more data points for `needsdiagnosis = False`. We've started a discussion in https://github.com/mozilla/webcompat-team-okrs/issues/256 \r\n\r\nIt's worth mentioning that in the [recent sample of 22 issues](https://docs.google.com/spreadsheets/d/1F9vcSpLQ_hNBeZinsytGXlfXpJLW6vh7C0BJYtd9hIY/edit?pli=1#gid=1640243023) most of the issues that looked like false negatives didn't end up needing diagnosis (not reproducible, out of the scope of the project, etc.), so they're true negatives (prediction was correct for 21 of them). We'll continue tracking them to get a more representative sample.\r\n\r\nAs an example, issues that looked like they need diagnosis, but in the end, they didn't (prediction was correct):\r\nhttps://github.com/webcompat/web-bugs/issues/100746\r\nhttps://github.com/webcompat/web-bugs/issues/100676\r\nhttps://github.com/webcompat/web-bugs/issues/100687\r\n\r\nIssues that are false negatives (prediction was incorrect): \r\nhttps://github.com/webcompat/web-bugs/issues/100495\r\nhttps://github.com/webcompat/web-bugs/issues/100645\r\n\r\nI was thinking of including an additional set of issues that will contribute to `needsdiagnosis = True` pool. We have recently added a `moved` [milestone](https://github.com/webcompat/web-bugs/issues?q=is%3Aissue+milestone%3Amoved+is%3Aclosed). These issues often don't need diagnosis and are moved to bugzilla or elsewhere, but their content should be contributed to `needsdiagnosis = True` rather than false. \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\n\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import feature_cleanup, issue_features, utils\nfrom bugbug.model import IssueModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass NeedsDiagnosisModel(IssueModel):\n def __init__(self, lemmatization=False):\n IssueModel.__init__(\n self, owner=\"webcompat\", repo=\"web-bugs\", lemmatization=lemmatization\n )\n\n self.calculate_importance = False\n\n feature_extractors = []\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"issue_extractor\",\n issue_features.IssueExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.0001),\n \"first_comment\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for issue in self.github.get_issues():\n # Skip issues with empty title or body\n if issue[\"title\"] is None or issue[\"body\"] is None:\n continue\n\n # Skip issues that are not moderated yet as they don't have a meaningful title or body\n if issue[\"title\"] == \"In the moderation queue.\":\n continue\n\n for event in issue[\"events\"]:\n if (\n event[\"event\"] == \"milestoned\"\n and event[\"milestone\"][\"title\"] == \"needsdiagnosis\"\n ):\n classes[issue[\"number\"]] = 0\n\n if issue[\"number\"] not in classes:\n classes[issue[\"number\"]] = 1\n\n logger.info(\n f\"{sum(1 for label in classes.values() if label == 1)} issues have not been moved to needsdiagnosis\"\n )\n logger.info(\n f\"{sum(1 for label in classes.values() if label == 0)} issues have been moved to needsdiagnosis\"\n )\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/needsdiagnosis.py"}]} | 1,801 | 161 |
gh_patches_debug_6599 | rasdani/github-patches | git_diff | svthalia__concrexit-2585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'Event' object has no attribute 'number_regs'
Sentry Issue: [CONCREXIT-HC](https://sentry.io/organizations/thalia/issues/3639420824/?referrer=github_integration)
```
AttributeError: 'Event' object has no attribute 'number_regs'
(11 additional frame(s) were not displayed)
...
File "rest_framework/serializers.py", line 253, in data
self._data = self.to_representation(self.instance)
File "rest_framework/serializers.py", line 522, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "rest_framework/serializers.py", line 522, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "rest_framework/fields.py", line 1838, in to_representation
return method(value)
File "events/api/v2/serializers/event.py", line 86, in _num_participants
participant_count = instance.number_regs
```
</issue>
<code>
[start of website/events/api/v2/serializers/event.py]
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
4 from announcements.api.v2.serializers import SlideSerializer
5 from documents.api.v2.serializers.document import DocumentSerializer
6 from events import services
7 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
8 from events.models import Event, EventRegistration
9 from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer
10 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer
11 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
12 CleanedModelSerializer,
13 )
14 from utils.snippets import create_google_maps_url
15
16
17 class EventSerializer(CleanedModelSerializer):
18 """Serializer for events."""
19
20 class Meta:
21 model = Event
22 fields = (
23 "pk",
24 "title",
25 "description",
26 "caption",
27 "start",
28 "end",
29 "category",
30 "registration_start",
31 "registration_end",
32 "cancel_deadline",
33 "optional_registrations",
34 "location",
35 "price",
36 "fine",
37 "num_participants",
38 "max_participants",
39 "no_registration_message",
40 "cancel_too_late_message",
41 "has_fields",
42 "food_event",
43 "maps_url",
44 "user_permissions",
45 "user_registration",
46 "organisers",
47 "slide",
48 "documents",
49 )
50
51 description = CleanedHTMLSerializer()
52 organisers = MemberGroupSerializer(many=True)
53 user_registration = serializers.SerializerMethodField("_user_registration")
54 num_participants = serializers.SerializerMethodField("_num_participants")
55 maps_url = serializers.SerializerMethodField("_maps_url")
56 price = PaymentAmountSerializer()
57 fine = PaymentAmountSerializer()
58 slide = SlideSerializer()
59 documents = DocumentSerializer(many=True)
60 user_permissions = serializers.SerializerMethodField("_user_permissions")
61
62 def _user_registration(self, instance):
63 try:
64 if self.context["request"].member:
65 reg = instance.eventregistration_set.get(
66 member=self.context["request"].member
67 )
68 return EventRegistrationSerializer(
69 reg,
70 context=self.context,
71 fields=(
72 "pk",
73 "present",
74 "queue_position",
75 "is_cancelled",
76 "is_late_cancellation",
77 "date",
78 "payment",
79 ),
80 ).data
81 except EventRegistration.DoesNotExist:
82 pass
83 return None
84
85 def _num_participants(self, instance):
86 participant_count = instance.number_regs
87 if instance.max_participants and participant_count > instance.max_participants:
88 return instance.max_participants
89 return participant_count
90
91 def _user_permissions(self, instance):
92 member = self.context["request"].member
93 return services.event_permissions(member, instance)
94
95 def _maps_url(self, instance):
96 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
97
[end of website/events/api/v2/serializers/event.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py
--- a/website/events/api/v2/serializers/event.py
+++ b/website/events/api/v2/serializers/event.py
@@ -83,10 +83,7 @@
return None
def _num_participants(self, instance):
- participant_count = instance.number_regs
- if instance.max_participants and participant_count > instance.max_participants:
- return instance.max_participants
- return participant_count
+ return instance.participants.count()
def _user_permissions(self, instance):
member = self.context["request"].member
| {"golden_diff": "diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -83,10 +83,7 @@\n return None\n \n def _num_participants(self, instance):\n- participant_count = instance.number_regs\n- if instance.max_participants and participant_count > instance.max_participants:\n- return instance.max_participants\n- return participant_count\n+ return instance.participants.count()\n \n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n", "issue": "AttributeError: 'Event' object has no attribute 'number_regs'\nSentry Issue: [CONCREXIT-HC](https://sentry.io/organizations/thalia/issues/3639420824/?referrer=github_integration)\n\n```\nAttributeError: 'Event' object has no attribute 'number_regs'\n(11 additional frame(s) were not displayed)\n...\n File \"rest_framework/serializers.py\", line 253, in data\n self._data = self.to_representation(self.instance)\n File \"rest_framework/serializers.py\", line 522, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/serializers.py\", line 522, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/fields.py\", line 1838, in to_representation\n return method(value)\n File \"events/api/v2/serializers/event.py\", line 86, in _num_participants\n participant_count = instance.number_regs\n```\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom announcements.api.v2.serializers import SlideSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event, EventRegistration\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(CleanedModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organisers\",\n \"slide\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organisers = MemberGroupSerializer(many=True)\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n price = PaymentAmountSerializer()\n fine = PaymentAmountSerializer()\n slide = SlideSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n member=self.context[\"request\"].member\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n fields=(\n \"pk\",\n \"present\",\n \"queue_position\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"date\",\n \"payment\",\n ),\n ).data\n except EventRegistration.DoesNotExist:\n pass\n return None\n\n def _num_participants(self, instance):\n participant_count = instance.number_regs\n if instance.max_participants and participant_count > instance.max_participants:\n return instance.max_participants\n return participant_count\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}]} | 1,583 | 151 |
gh_patches_debug_2279 | rasdani/github-patches | git_diff | geopandas__geopandas-648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Descartes dependency
In the docs, geopandas lists descartes and matplotlib as optional dependencies. However, descartes is listed as an install_requires in the setup.py.
One of the two should be updated. I'd prefer to be able to pip install geopandas without installing matplotlib.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env/python
2 """Installation script
3
4 """
5
6 import os
7
8 try:
9 from setuptools import setup
10 except ImportError:
11 from distutils.core import setup
12
13 import versioneer
14
15 LONG_DESCRIPTION = """GeoPandas is a project to add support for geographic data to
16 `pandas`_ objects.
17
18 The goal of GeoPandas is to make working with geospatial data in
19 python easier. It combines the capabilities of `pandas`_ and `shapely`_,
20 providing geospatial operations in pandas and a high-level interface
21 to multiple geometries to shapely. GeoPandas enables you to easily do
22 operations in python that would otherwise require a spatial database
23 such as PostGIS.
24
25 .. _pandas: http://pandas.pydata.org
26 .. _shapely: http://toblerity.github.io/shapely
27 """
28
29 if os.environ.get('READTHEDOCS', False) == 'True':
30 INSTALL_REQUIRES = []
31 else:
32 INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']
33
34 # get all data dirs in the datasets module
35 data_files = []
36
37 for item in os.listdir("geopandas/datasets"):
38 if not item.startswith('__'):
39 if os.path.isdir(os.path.join("geopandas/datasets/", item)):
40 data_files.append(os.path.join("datasets", item, '*'))
41 elif item.endswith('.zip'):
42 data_files.append(os.path.join("datasets", item))
43
44
45 setup(name='geopandas',
46 version=versioneer.get_version(),
47 description='Geographic pandas extensions',
48 license='BSD',
49 author='GeoPandas contributors',
50 author_email='[email protected]',
51 url='http://geopandas.org',
52 long_description=LONG_DESCRIPTION,
53 packages=['geopandas', 'geopandas.io', 'geopandas.tools',
54 'geopandas.datasets',
55 'geopandas.tests', 'geopandas.tools.tests'],
56 package_data={'geopandas': data_files},
57 install_requires=INSTALL_REQUIRES,
58 cmdclass=versioneer.get_cmdclass())
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@
if os.environ.get('READTHEDOCS', False) == 'True':
INSTALL_REQUIRES = []
else:
- INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']
+ INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'pyproj']
# get all data dirs in the datasets module
data_files = []
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,7 @@\n if os.environ.get('READTHEDOCS', False) == 'True':\n INSTALL_REQUIRES = []\n else:\n- INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']\n+ INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'pyproj']\n \n # get all data dirs in the datasets module\n data_files = []\n", "issue": "Descartes dependency\nIn the docs, geopandas lists descartes and matplotlib as optional dependencies. However, descartes is listed as an install_requires in the setup.py.\r\n\r\nOne of the two should be updated. I'd prefer to be able to pip install geopandas without installing matplotlib.\n", "before_files": [{"content": "#!/usr/bin/env/python\n\"\"\"Installation script\n\n\"\"\"\n\nimport os\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nimport versioneer\n\nLONG_DESCRIPTION = \"\"\"GeoPandas is a project to add support for geographic data to\n`pandas`_ objects.\n\nThe goal of GeoPandas is to make working with geospatial data in\npython easier. It combines the capabilities of `pandas`_ and `shapely`_,\nproviding geospatial operations in pandas and a high-level interface\nto multiple geometries to shapely. GeoPandas enables you to easily do\noperations in python that would otherwise require a spatial database\nsuch as PostGIS.\n\n.. _pandas: http://pandas.pydata.org\n.. _shapely: http://toblerity.github.io/shapely\n\"\"\"\n\nif os.environ.get('READTHEDOCS', False) == 'True':\n INSTALL_REQUIRES = []\nelse:\n INSTALL_REQUIRES = ['pandas', 'shapely', 'fiona', 'descartes', 'pyproj']\n\n# get all data dirs in the datasets module\ndata_files = []\n\nfor item in os.listdir(\"geopandas/datasets\"):\n if not item.startswith('__'):\n if os.path.isdir(os.path.join(\"geopandas/datasets/\", item)):\n data_files.append(os.path.join(\"datasets\", item, '*'))\n elif item.endswith('.zip'):\n data_files.append(os.path.join(\"datasets\", item))\n\n\nsetup(name='geopandas',\n version=versioneer.get_version(),\n description='Geographic pandas extensions',\n license='BSD',\n author='GeoPandas contributors',\n author_email='[email protected]',\n url='http://geopandas.org',\n long_description=LONG_DESCRIPTION,\n packages=['geopandas', 'geopandas.io', 'geopandas.tools',\n 'geopandas.datasets',\n 'geopandas.tests', 'geopandas.tools.tests'],\n package_data={'geopandas': data_files},\n install_requires=INSTALL_REQUIRES,\n cmdclass=versioneer.get_cmdclass())\n", "path": "setup.py"}]} | 1,162 | 124 |
gh_patches_debug_38231 | rasdani/github-patches | git_diff | pyro-ppl__pyro-365 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect result from Delta's batch_log_pdf
It looks like there may be a bug in Delta's `batch_log_pdf` method. When the value we're computing the log prob of doesn't match the parameter I expect `batch_log_pdf` to return `-inf` but it doesn't. For example:
```
x = Variable(torch.Tensor([[1.0]]))
y = Variable(torch.Tensor([[2.0]]))
# This is OK, it returns zero as expected:
print(Delta(x).batch_log_pdf(x))
# Here I expect -inf, but get 2.5500e+08
print(Delta(x).batch_log_pdf(y))
```
`log_pdf` works as expected.
(This isn't high priority for me.)
</issue>
<code>
[start of pyro/distributions/__init__.py]
1 # abstract base class
2 from pyro.distributions.bernoulli import Bernoulli
3 from pyro.distributions.beta import Beta
4 from pyro.distributions.categorical import Categorical
5 from pyro.distributions.cauchy import Cauchy
6 from pyro.distributions.half_cauchy import HalfCauchy
7 from pyro.distributions.delta import Delta
8 from pyro.distributions.distribution import Distribution # noqa: F401
9 # specific distributions
10 from pyro.distributions.diag_normal import DiagNormal
11 from pyro.distributions.dirichlet import Dirichlet
12 from pyro.distributions.exponential import Exponential
13 from pyro.distributions.gamma import Gamma
14 from pyro.distributions.log_normal import LogNormal
15 from pyro.distributions.multinomial import Multinomial
16 from pyro.distributions.poisson import Poisson
17 from pyro.distributions.random_primitive import RandomPrimitive
18 from pyro.distributions.uniform import Uniform
19
20 # function aliases
21 diagnormal = DiagNormal()
22 lognormal = RandomPrimitive(LogNormal)
23 categorical = Categorical()
24 bernoulli = RandomPrimitive(Bernoulli)
25 beta = RandomPrimitive(Beta)
26 delta = Delta()
27 exponential = RandomPrimitive(Exponential)
28 gamma = RandomPrimitive(Gamma)
29 multinomial = RandomPrimitive(Multinomial)
30 poisson = RandomPrimitive(Poisson)
31 uniform = RandomPrimitive(Uniform)
32 dirichlet = RandomPrimitive(Dirichlet)
33 cauchy = RandomPrimitive(Cauchy)
34 halfcauchy = RandomPrimitive(HalfCauchy)
35
[end of pyro/distributions/__init__.py]
[start of pyro/distributions/delta.py]
1 import torch
2 from torch.autograd import Variable
3
4 from pyro.distributions.distribution import Distribution
5
6
7 class Delta(Distribution):
8 """
9 :param v: support element *(any)*
10
11 Discrete distribution that assigns probability one to the single element in
12 its support. Delta distribution parameterized by a random choice should not
13 be used with MCMC based inference, as doing so produces incorrect results.
14 """
15 enumerable = True
16
17 def _sanitize_input(self, v):
18 if v is not None:
19 # stateless distribution
20 return v
21 elif self.v is not None:
22 # stateful distribution
23 return self.v
24 else:
25 raise ValueError("Parameter(s) were None")
26
27 def __init__(self, v=None, batch_size=1, *args, **kwargs):
28 """
29 Params:
30 `v` - value
31 """
32 self.v = v
33 if v is not None:
34 if v.dim() == 1 and batch_size > 1:
35 self.v = v.expand(v, v.size(0))
36 super(Delta, self).__init__(*args, **kwargs)
37
38 def sample(self, v=None):
39 v = self._sanitize_input(v)
40 if isinstance(v, Variable):
41 return v
42 return Variable(v)
43
44 def batch_log_pdf(self, x, v=None, batch_size=1):
45 v = self._sanitize_input(v)
46 if x.dim == 1:
47 x = x.expand(batch_size, x.size(0))
48 return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999
49
50 def log_pdf(self, x, v=None, *args, **kwargs):
51 v = self._sanitize_input(v)
52 if torch.equal(x.data, v.data.expand_as(x.data)):
53 return Variable(torch.zeros(1).type_as(v.data))
54 return Variable(torch.Tensor([-float("inf")]).type_as(v.data))
55
56 def support(self, v=None):
57 """
58 Returns the delta distribution's support, as a tensor along the first dimension.
59
60 :param v: torch variable where each element of the tensor represents the point at
61 which the delta distribution is concentrated.
62 :return: torch variable enumerating the support of the delta distribution.
63 :rtype: torch.autograd.Variable.
64 """
65 v = self._sanitize_input(v)
66 # univariate case
67 return Variable(v.data)
68
[end of pyro/distributions/delta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/distributions/__init__.py b/pyro/distributions/__init__.py
--- a/pyro/distributions/__init__.py
+++ b/pyro/distributions/__init__.py
@@ -23,7 +23,7 @@
categorical = Categorical()
bernoulli = RandomPrimitive(Bernoulli)
beta = RandomPrimitive(Beta)
-delta = Delta()
+delta = RandomPrimitive(Delta)
exponential = RandomPrimitive(Exponential)
gamma = RandomPrimitive(Gamma)
multinomial = RandomPrimitive(Multinomial)
diff --git a/pyro/distributions/delta.py b/pyro/distributions/delta.py
--- a/pyro/distributions/delta.py
+++ b/pyro/distributions/delta.py
@@ -14,44 +14,40 @@
"""
enumerable = True
- def _sanitize_input(self, v):
- if v is not None:
- # stateless distribution
- return v
- elif self.v is not None:
- # stateful distribution
- return self.v
- else:
- raise ValueError("Parameter(s) were None")
-
- def __init__(self, v=None, batch_size=1, *args, **kwargs):
+ def __init__(self, v, batch_size=None, *args, **kwargs):
"""
Params:
`v` - value
"""
self.v = v
- if v is not None:
- if v.dim() == 1 and batch_size > 1:
- self.v = v.expand(v, v.size(0))
+ if not isinstance(self.v, Variable):
+ self.v = Variable(self.v)
+ if v.dim() == 1 and batch_size is not None:
+ self.v = v.expand(v, v.size(0))
super(Delta, self).__init__(*args, **kwargs)
- def sample(self, v=None):
- v = self._sanitize_input(v)
- if isinstance(v, Variable):
- return v
- return Variable(v)
+ def batch_shape(self, x=None):
+ event_dim = 1
+ v = self.v
+ if x is not None and x.size() != v.size():
+ v = self.v.expand_as(x)
+ return v.size()[:-event_dim]
+
+ def event_shape(self):
+ event_dim = 1
+ return self.v.size()[-event_dim:]
+
+ def shape(self, x=None):
+ return self.batch_shape(x) + self.event_shape()
- def batch_log_pdf(self, x, v=None, batch_size=1):
- v = self._sanitize_input(v)
- if x.dim == 1:
- x = x.expand(batch_size, x.size(0))
- return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999
+ def sample(self):
+ return self.v
- def log_pdf(self, x, v=None, *args, **kwargs):
- v = self._sanitize_input(v)
- if torch.equal(x.data, v.data.expand_as(x.data)):
- return Variable(torch.zeros(1).type_as(v.data))
- return Variable(torch.Tensor([-float("inf")]).type_as(v.data))
+ def batch_log_pdf(self, x):
+ v = self.v
+ if x.size() != v.size():
+ v = v.expand_as(x)
+ return torch.sum(torch.eq(x, v).float().log(), -1)
def support(self, v=None):
"""
@@ -62,6 +58,4 @@
:return: torch variable enumerating the support of the delta distribution.
:rtype: torch.autograd.Variable.
"""
- v = self._sanitize_input(v)
- # univariate case
- return Variable(v.data)
+ return Variable(self.v.data)
| {"golden_diff": "diff --git a/pyro/distributions/__init__.py b/pyro/distributions/__init__.py\n--- a/pyro/distributions/__init__.py\n+++ b/pyro/distributions/__init__.py\n@@ -23,7 +23,7 @@\n categorical = Categorical()\n bernoulli = RandomPrimitive(Bernoulli)\n beta = RandomPrimitive(Beta)\n-delta = Delta()\n+delta = RandomPrimitive(Delta)\n exponential = RandomPrimitive(Exponential)\n gamma = RandomPrimitive(Gamma)\n multinomial = RandomPrimitive(Multinomial)\ndiff --git a/pyro/distributions/delta.py b/pyro/distributions/delta.py\n--- a/pyro/distributions/delta.py\n+++ b/pyro/distributions/delta.py\n@@ -14,44 +14,40 @@\n \"\"\"\n enumerable = True\n \n- def _sanitize_input(self, v):\n- if v is not None:\n- # stateless distribution\n- return v\n- elif self.v is not None:\n- # stateful distribution\n- return self.v\n- else:\n- raise ValueError(\"Parameter(s) were None\")\n-\n- def __init__(self, v=None, batch_size=1, *args, **kwargs):\n+ def __init__(self, v, batch_size=None, *args, **kwargs):\n \"\"\"\n Params:\n `v` - value\n \"\"\"\n self.v = v\n- if v is not None:\n- if v.dim() == 1 and batch_size > 1:\n- self.v = v.expand(v, v.size(0))\n+ if not isinstance(self.v, Variable):\n+ self.v = Variable(self.v)\n+ if v.dim() == 1 and batch_size is not None:\n+ self.v = v.expand(v, v.size(0))\n super(Delta, self).__init__(*args, **kwargs)\n \n- def sample(self, v=None):\n- v = self._sanitize_input(v)\n- if isinstance(v, Variable):\n- return v\n- return Variable(v)\n+ def batch_shape(self, x=None):\n+ event_dim = 1\n+ v = self.v\n+ if x is not None and x.size() != v.size():\n+ v = self.v.expand_as(x)\n+ return v.size()[:-event_dim]\n+\n+ def event_shape(self):\n+ event_dim = 1\n+ return self.v.size()[-event_dim:]\n+\n+ def shape(self, x=None):\n+ return self.batch_shape(x) + self.event_shape()\n \n- def batch_log_pdf(self, x, v=None, batch_size=1):\n- v = self._sanitize_input(v)\n- if x.dim == 1:\n- x = x.expand(batch_size, x.size(0))\n- return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999\n+ def sample(self):\n+ return self.v\n \n- def log_pdf(self, x, v=None, *args, **kwargs):\n- v = self._sanitize_input(v)\n- if torch.equal(x.data, v.data.expand_as(x.data)):\n- return Variable(torch.zeros(1).type_as(v.data))\n- return Variable(torch.Tensor([-float(\"inf\")]).type_as(v.data))\n+ def batch_log_pdf(self, x):\n+ v = self.v\n+ if x.size() != v.size():\n+ v = v.expand_as(x)\n+ return torch.sum(torch.eq(x, v).float().log(), -1)\n \n def support(self, v=None):\n \"\"\"\n@@ -62,6 +58,4 @@\n :return: torch variable enumerating the support of the delta distribution.\n :rtype: torch.autograd.Variable.\n \"\"\"\n- v = self._sanitize_input(v)\n- # univariate case\n- return Variable(v.data)\n+ return Variable(self.v.data)\n", "issue": "Incorrect result from Delta's batch_log_pdf\nIt looks like there may be a bug in Delta's `batch_log_pdf` method. When the value we're computing the log prob of doesn't match the parameter I expect `batch_log_pdf` to return `-inf` but it doesn't. For example:\r\n\r\n```\r\nx = Variable(torch.Tensor([[1.0]]))\r\ny = Variable(torch.Tensor([[2.0]]))\r\n\r\n# This is OK, it returns zero as expected:\r\nprint(Delta(x).batch_log_pdf(x))\r\n\r\n# Here I expect -inf, but get 2.5500e+08\r\nprint(Delta(x).batch_log_pdf(y))\r\n```\r\n\r\n`log_pdf` works as expected.\r\n\r\n(This isn't high priority for me.)\n", "before_files": [{"content": "# abstract base class\nfrom pyro.distributions.bernoulli import Bernoulli\nfrom pyro.distributions.beta import Beta\nfrom pyro.distributions.categorical import Categorical\nfrom pyro.distributions.cauchy import Cauchy\nfrom pyro.distributions.half_cauchy import HalfCauchy\nfrom pyro.distributions.delta import Delta\nfrom pyro.distributions.distribution import Distribution # noqa: F401\n# specific distributions\nfrom pyro.distributions.diag_normal import DiagNormal\nfrom pyro.distributions.dirichlet import Dirichlet\nfrom pyro.distributions.exponential import Exponential\nfrom pyro.distributions.gamma import Gamma\nfrom pyro.distributions.log_normal import LogNormal\nfrom pyro.distributions.multinomial import Multinomial\nfrom pyro.distributions.poisson import Poisson\nfrom pyro.distributions.random_primitive import RandomPrimitive\nfrom pyro.distributions.uniform import Uniform\n\n# function aliases\ndiagnormal = DiagNormal()\nlognormal = RandomPrimitive(LogNormal)\ncategorical = Categorical()\nbernoulli = RandomPrimitive(Bernoulli)\nbeta = RandomPrimitive(Beta)\ndelta = Delta()\nexponential = RandomPrimitive(Exponential)\ngamma = RandomPrimitive(Gamma)\nmultinomial = RandomPrimitive(Multinomial)\npoisson = RandomPrimitive(Poisson)\nuniform = RandomPrimitive(Uniform)\ndirichlet = RandomPrimitive(Dirichlet)\ncauchy = RandomPrimitive(Cauchy)\nhalfcauchy = RandomPrimitive(HalfCauchy)\n", "path": "pyro/distributions/__init__.py"}, {"content": "import torch\nfrom torch.autograd import Variable\n\nfrom pyro.distributions.distribution import Distribution\n\n\nclass Delta(Distribution):\n \"\"\"\n :param v: support element *(any)*\n\n Discrete distribution that assigns probability one to the single element in\n its support. Delta distribution parameterized by a random choice should not\n be used with MCMC based inference, as doing so produces incorrect results.\n \"\"\"\n enumerable = True\n\n def _sanitize_input(self, v):\n if v is not None:\n # stateless distribution\n return v\n elif self.v is not None:\n # stateful distribution\n return self.v\n else:\n raise ValueError(\"Parameter(s) were None\")\n\n def __init__(self, v=None, batch_size=1, *args, **kwargs):\n \"\"\"\n Params:\n `v` - value\n \"\"\"\n self.v = v\n if v is not None:\n if v.dim() == 1 and batch_size > 1:\n self.v = v.expand(v, v.size(0))\n super(Delta, self).__init__(*args, **kwargs)\n\n def sample(self, v=None):\n v = self._sanitize_input(v)\n if isinstance(v, Variable):\n return v\n return Variable(v)\n\n def batch_log_pdf(self, x, v=None, batch_size=1):\n v = self._sanitize_input(v)\n if x.dim == 1:\n x = x.expand(batch_size, x.size(0))\n return (torch.eq(x, v.expand_as(x)) - 1).float() * 999999\n\n def log_pdf(self, x, v=None, *args, **kwargs):\n v = self._sanitize_input(v)\n if torch.equal(x.data, v.data.expand_as(x.data)):\n return Variable(torch.zeros(1).type_as(v.data))\n return Variable(torch.Tensor([-float(\"inf\")]).type_as(v.data))\n\n def support(self, v=None):\n \"\"\"\n Returns the delta distribution's support, as a tensor along the first dimension.\n\n :param v: torch variable where each element of the tensor represents the point at\n which the delta distribution is concentrated.\n :return: torch variable enumerating the support of the delta distribution.\n :rtype: torch.autograd.Variable.\n \"\"\"\n v = self._sanitize_input(v)\n # univariate case\n return Variable(v.data)\n", "path": "pyro/distributions/delta.py"}]} | 1,755 | 860 |
gh_patches_debug_7945 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4253 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/booster/mixed_precision/mixed_precision_base.py]
1 from abc import ABC, abstractmethod
2 from typing import Callable, Optional, Tuple
3
4 import torch.nn as nn
5 from torch.optim import Optimizer
6
7 from colossalai.interface import OptimizerWrapper
8
9
10 class MixedPrecision(ABC):
11 """
12 An abstract class for mixed precision training.
13 """
14
15 @abstractmethod
16 def configure(self,
17 model: nn.Module,
18 optimizer: Optional[Optimizer] = None,
19 criterion: Optional[Callable] = None,
20 ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:
21 # TODO: implement this method
22 pass
23
[end of colossalai/booster/mixed_precision/mixed_precision_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/booster/mixed_precision/mixed_precision_base.py b/colossalai/booster/mixed_precision/mixed_precision_base.py
--- a/colossalai/booster/mixed_precision/mixed_precision_base.py
+++ b/colossalai/booster/mixed_precision/mixed_precision_base.py
@@ -13,10 +13,11 @@
"""
@abstractmethod
- def configure(self,
- model: nn.Module,
- optimizer: Optional[Optimizer] = None,
- criterion: Optional[Callable] = None,
- ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:
+ def configure(
+ self,
+ model: nn.Module,
+ optimizer: Optional[Optimizer] = None,
+ criterion: Optional[Callable] = None,
+ ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:
# TODO: implement this method
pass
| {"golden_diff": "diff --git a/colossalai/booster/mixed_precision/mixed_precision_base.py b/colossalai/booster/mixed_precision/mixed_precision_base.py\n--- a/colossalai/booster/mixed_precision/mixed_precision_base.py\n+++ b/colossalai/booster/mixed_precision/mixed_precision_base.py\n@@ -13,10 +13,11 @@\n \"\"\"\n \n @abstractmethod\n- def configure(self,\n- model: nn.Module,\n- optimizer: Optional[Optimizer] = None,\n- criterion: Optional[Callable] = None,\n- ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:\n+ def configure(\n+ self,\n+ model: nn.Module,\n+ optimizer: Optional[Optimizer] = None,\n+ criterion: Optional[Callable] = None,\n+ ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:\n # TODO: implement this method\n pass\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Callable, Optional, Tuple\n\nimport torch.nn as nn\nfrom torch.optim import Optimizer\n\nfrom colossalai.interface import OptimizerWrapper\n\n\nclass MixedPrecision(ABC):\n \"\"\"\n An abstract class for mixed precision training.\n \"\"\"\n\n @abstractmethod\n def configure(self,\n model: nn.Module,\n optimizer: Optional[Optimizer] = None,\n criterion: Optional[Callable] = None,\n ) -> Tuple[nn.Module, OptimizerWrapper, Callable]:\n # TODO: implement this method\n pass\n", "path": "colossalai/booster/mixed_precision/mixed_precision_base.py"}]} | 736 | 208 |
gh_patches_debug_16262 | rasdani/github-patches | git_diff | web2py__web2py-1459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
using web2py with Passenger
I'm using web2py on [Dreamhost ](https://www.dreamhost.com/)with the recommended [Phusion Passenger](https://www.phusionpassenger.com/) as the deployment web server. I configured the web2py [handlers/wsgihandler.py](https://github.com/web2py/web2py/blob/master/handlers/wsgihandler.py) as recommended by Dreamhost users (adding lines to launch python in a virtual environment), creating passenger_wsgi.py. With just these edits, Passenger reported a timeout error from the application (web2py).
The Passenger [debugging startup page](https://github.com/phusion/passenger/wiki/Debugging-application-startup-problems) notes that Passenger uses stdout for communication with the application and assumes that it is not "closed, overwritten, or redirected" by the application.
Web2py overwrites stdout with stderr on line 31 in wsgihandler.py. When I comment out this line, web2py (and Passenger) startup just fine and seem to work, although I haven't done much testing beyond startup.
So, my question is, is this line necessary? If so, is there a fix to make this setup work properly? If web2py requires both file descriptors pointing to the same place, maybe set stderr = stdout instead?
</issue>
<code>
[start of handlers/wsgihandler.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 This file is part of the web2py Web Framework
6 Copyrighted by Massimo Di Pierro <[email protected]>
7 License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
8
9
10 This is a WSGI handler for Apache
11 Requires apache+mod_wsgi.
12
13 In httpd.conf put something like:
14
15 LoadModule wsgi_module modules/mod_wsgi.so
16 WSGIScriptAlias / /path/to/wsgihandler.py
17
18 """
19
20 # change these parameters as required
21 LOGGING = False
22 SOFTCRON = False
23
24 import sys
25 import os
26
27 path = os.path.dirname(os.path.abspath(__file__))
28 os.chdir(path)
29
30 if not os.path.isdir('applications'):
31 raise RuntimeError('Running from the wrong folder')
32
33 sys.path = [path] + [p for p in sys.path if not p == path]
34
35 sys.stdout = sys.stderr
36
37 import gluon.main
38
39 if LOGGING:
40 application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
41 logfilename='httpserver.log',
42 profiler_dir=None)
43 else:
44 application = gluon.main.wsgibase
45
46 if SOFTCRON:
47 from gluon.settings import global_settings
48 global_settings.web2py_crontype = 'soft'
49
[end of handlers/wsgihandler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/handlers/wsgihandler.py b/handlers/wsgihandler.py
--- a/handlers/wsgihandler.py
+++ b/handlers/wsgihandler.py
@@ -7,22 +7,16 @@
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-This is a WSGI handler for Apache
-Requires apache+mod_wsgi.
-
-In httpd.conf put something like:
-
- LoadModule wsgi_module modules/mod_wsgi.so
- WSGIScriptAlias / /path/to/wsgihandler.py
-
+This is a WSGI handler
"""
+import sys
+import os
+
# change these parameters as required
LOGGING = False
SOFTCRON = False
-import sys
-import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
@@ -32,8 +26,6 @@
sys.path = [path] + [p for p in sys.path if not p == path]
-sys.stdout = sys.stderr
-
import gluon.main
if LOGGING:
| {"golden_diff": "diff --git a/handlers/wsgihandler.py b/handlers/wsgihandler.py\n--- a/handlers/wsgihandler.py\n+++ b/handlers/wsgihandler.py\n@@ -7,22 +7,16 @@\n License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n \n \n-This is a WSGI handler for Apache\n-Requires apache+mod_wsgi.\n-\n-In httpd.conf put something like:\n-\n- LoadModule wsgi_module modules/mod_wsgi.so\n- WSGIScriptAlias / /path/to/wsgihandler.py\n-\n+This is a WSGI handler\n \"\"\"\n \n+import sys\n+import os\n+\n # change these parameters as required\n LOGGING = False\n SOFTCRON = False\n \n-import sys\n-import os\n \n path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(path)\n@@ -32,8 +26,6 @@\n \n sys.path = [path] + [p for p in sys.path if not p == path]\n \n-sys.stdout = sys.stderr\n-\n import gluon.main\n \n if LOGGING:\n", "issue": "using web2py with Passenger\nI'm using web2py on [Dreamhost ](https://www.dreamhost.com/)with the recommended [Phusion Passenger](https://www.phusionpassenger.com/) as the deployment web server. I configured the web2py [handlers/wsgihandler.py](https://github.com/web2py/web2py/blob/master/handlers/wsgihandler.py) as recommended by Dreamhost users (adding lines to launch python in a virtual environment), creating passenger_wsgi.py. With just these edits, Passenger reported a timeout error from the application (web2py). \n\nThe Passenger [debugging startup page](https://github.com/phusion/passenger/wiki/Debugging-application-startup-problems) notes that Passenger uses stdout for communication with the application and assumes that it is not \"closed, overwritten, or redirected\" by the application. \n\nWeb2py overwrites stdout with stderr on line 31 in wsgihandler.py. When I comment out this line, web2py (and Passenger) startup just fine and seem to work, although I haven't done much testing beyond startup.\n\nSo, my question is, is this line necessary? If so, is there a fix to make this setup work properly? If web2py requires both file descriptors pointing to the same place, maybe set stderr = stdout instead?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis file is part of the web2py Web Framework\nCopyrighted by Massimo Di Pierro <[email protected]>\nLicense: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\n\nThis is a WSGI handler for Apache\nRequires apache+mod_wsgi.\n\nIn httpd.conf put something like:\n\n LoadModule wsgi_module modules/mod_wsgi.so\n WSGIScriptAlias / /path/to/wsgihandler.py\n\n\"\"\"\n\n# change these parameters as required\nLOGGING = False\nSOFTCRON = False\n\nimport sys\nimport os\n\npath = os.path.dirname(os.path.abspath(__file__))\nos.chdir(path)\n\nif not os.path.isdir('applications'):\n raise RuntimeError('Running from the wrong folder')\n\nsys.path = [path] + [p for p in sys.path if not p == path]\n\nsys.stdout = sys.stderr\n\nimport gluon.main\n\nif LOGGING:\n application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,\n logfilename='httpserver.log',\n profiler_dir=None)\nelse:\n application = gluon.main.wsgibase\n\nif SOFTCRON:\n from gluon.settings import global_settings\n global_settings.web2py_crontype = 'soft'\n", "path": "handlers/wsgihandler.py"}]} | 1,210 | 244 |
gh_patches_debug_24420 | rasdani/github-patches | git_diff | nautobot__nautobot-1381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nautobot-scheduler can't apply celery_backend_cleanup_1
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.9
* Nautobot version: 1.2.5
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Run the scheduler as systemd-service
2.
3.
<!-- What did you expect to happen? -->
### Expected Behavior
Internal jobs run without an error
<!-- What happened instead? -->
### Observed Behavior
```
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: [2022-02-08 04:00:00,000: INFO/MainProcess] Scheduler: Sending due task celery.backend_cleanup_1 (celery.backend_cleanup)
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: [2022-02-08 04:00:00,001: ERROR/MainProcess] Message Error: Couldn't apply scheduled task celery.backend_cleanup_1: 'str' object has no attribute 'items'
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: [' File "/opt/nautobot/bin/nautobot-server", line 8, in <module>\n sys.exit(main())\n', ' File "/opt/nautobot/lib/python3.9/site-packages/nautobot/cor>
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: Traceback (most recent call last):
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: File "/opt/nautobot/lib/python3.9/site-packages/celery/beat.py", line 402, in apply_async
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: entry_kwargs = _evaluate_entry_kwargs(entry.kwargs)
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: File "/opt/nautobot/lib/python3.9/site-packages/celery/beat.py", line 220, in _evaluate_entry_kwargs
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: for k, v in entry_kwargs.items()
Feb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: AttributeError: 'str' object has no attribute 'items'
```
I see this error for every day.
</issue>
<code>
[start of nautobot/core/celery/schedulers.py]
1 import logging
2
3 from celery import current_app
4 from django_celery_beat.schedulers import ModelEntry, DatabaseScheduler
5
6 from nautobot.extras.models import ScheduledJob, ScheduledJobs
7
8
9 logger = logging.getLogger(__name__)
10
11
12 class NautobotScheduleEntry(ModelEntry):
13 """
14 Nautobot variant of the django-celery-beat ModelEntry which uses the
15 nautobot.extras.models.ScheduledJob model
16 """
17
18 def __init__(self, model, app=None):
19 """Initialize the model entry."""
20 self.app = app or current_app._get_current_object()
21 self.name = "{}_{}".format(model.name, model.pk)
22 self.task = model.task
23 self.args = model.args
24 self.kwargs = model.kwargs
25 try:
26 self.schedule = model.schedule
27 except model.DoesNotExist:
28 logger.error(
29 "Disabling schedule %s that was removed from database",
30 self.name,
31 )
32 self._disable(model)
33
34 self.options = {}
35 if model.queue:
36 self.options["queue"] = model.queue
37
38 self.options["headers"] = {}
39 self.total_run_count = model.total_run_count
40 self.model = model
41
42 if not model.last_run_at:
43 model.last_run_at = self._default_now()
44
45 self.last_run_at = model.last_run_at
46
47
48 class NautobotDatabaseScheduler(DatabaseScheduler):
49 """
50 Nautobot variant of the django-celery-beat DatabaseScheduler which uses the
51 nautobot.extras.models.ScheduledJob model
52 """
53
54 Entry = NautobotScheduleEntry
55 Model = ScheduledJob
56 Changes = ScheduledJobs
57
[end of nautobot/core/celery/schedulers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/core/celery/schedulers.py b/nautobot/core/celery/schedulers.py
--- a/nautobot/core/celery/schedulers.py
+++ b/nautobot/core/celery/schedulers.py
@@ -2,6 +2,7 @@
from celery import current_app
from django_celery_beat.schedulers import ModelEntry, DatabaseScheduler
+from kombu.utils.json import loads
from nautobot.extras.models import ScheduledJob, ScheduledJobs
@@ -20,8 +21,14 @@
self.app = app or current_app._get_current_object()
self.name = "{}_{}".format(model.name, model.pk)
self.task = model.task
- self.args = model.args
- self.kwargs = model.kwargs
+ try:
+ # Nautobot scheduled jobs pass args/kwargs as constructed objects,
+ # but Celery built-in jobs such as celery.backend_cleanup pass them as JSON to be parsed
+ self.args = model.args if isinstance(model.args, (tuple, list)) else loads(model.args or "[]")
+ self.kwargs = model.kwargs if isinstance(model.kwargs, dict) else loads(model.kwargs or "{}")
+ except (TypeError, ValueError) as exc:
+ logger.exception("Removing schedule %s for argument deserialization error: %s", self.name, exc)
+ self._disable(model)
try:
self.schedule = model.schedule
except model.DoesNotExist:
| {"golden_diff": "diff --git a/nautobot/core/celery/schedulers.py b/nautobot/core/celery/schedulers.py\n--- a/nautobot/core/celery/schedulers.py\n+++ b/nautobot/core/celery/schedulers.py\n@@ -2,6 +2,7 @@\n \n from celery import current_app\n from django_celery_beat.schedulers import ModelEntry, DatabaseScheduler\n+from kombu.utils.json import loads\n \n from nautobot.extras.models import ScheduledJob, ScheduledJobs\n \n@@ -20,8 +21,14 @@\n self.app = app or current_app._get_current_object()\n self.name = \"{}_{}\".format(model.name, model.pk)\n self.task = model.task\n- self.args = model.args\n- self.kwargs = model.kwargs\n+ try:\n+ # Nautobot scheduled jobs pass args/kwargs as constructed objects,\n+ # but Celery built-in jobs such as celery.backend_cleanup pass them as JSON to be parsed\n+ self.args = model.args if isinstance(model.args, (tuple, list)) else loads(model.args or \"[]\")\n+ self.kwargs = model.kwargs if isinstance(model.kwargs, dict) else loads(model.kwargs or \"{}\")\n+ except (TypeError, ValueError) as exc:\n+ logger.exception(\"Removing schedule %s for argument deserialization error: %s\", self.name, exc)\n+ self._disable(model)\n try:\n self.schedule = model.schedule\n except model.DoesNotExist:\n", "issue": "nautobot-scheduler can't apply celery_backend_cleanup_1\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.9\r\n* Nautobot version: 1.2.5\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Run the scheduler as systemd-service\r\n2.\r\n3.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nInternal jobs run without an error\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n```\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: [2022-02-08 04:00:00,000: INFO/MainProcess] Scheduler: Sending due task celery.backend_cleanup_1 (celery.backend_cleanup)\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: [2022-02-08 04:00:00,001: ERROR/MainProcess] Message Error: Couldn't apply scheduled task celery.backend_cleanup_1: 'str' object has no attribute 'items'\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: [' File \"/opt/nautobot/bin/nautobot-server\", line 8, in <module>\\n sys.exit(main())\\n', ' File \"/opt/nautobot/lib/python3.9/site-packages/nautobot/cor>\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: Traceback (most recent call last):\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: File \"/opt/nautobot/lib/python3.9/site-packages/celery/beat.py\", line 402, in apply_async\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: entry_kwargs = _evaluate_entry_kwargs(entry.kwargs)\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: File \"/opt/nautobot/lib/python3.9/site-packages/celery/beat.py\", line 220, in _evaluate_entry_kwargs\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: for k, v in entry_kwargs.items()\r\nFeb 08 04:00:00 dh01-a-06-18 nautobot-server[3033678]: AttributeError: 'str' object has no attribute 'items'\r\n```\r\n\r\nI see this error for every day.\n", "before_files": [{"content": "import logging\n\nfrom celery import current_app\nfrom django_celery_beat.schedulers import ModelEntry, DatabaseScheduler\n\nfrom nautobot.extras.models import ScheduledJob, ScheduledJobs\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NautobotScheduleEntry(ModelEntry):\n \"\"\"\n Nautobot variant of the django-celery-beat ModelEntry which uses the\n nautobot.extras.models.ScheduledJob model\n \"\"\"\n\n def __init__(self, model, app=None):\n \"\"\"Initialize the model entry.\"\"\"\n self.app = app or current_app._get_current_object()\n self.name = \"{}_{}\".format(model.name, model.pk)\n self.task = model.task\n self.args = model.args\n self.kwargs = model.kwargs\n try:\n self.schedule = model.schedule\n except model.DoesNotExist:\n logger.error(\n \"Disabling schedule %s that was removed from database\",\n self.name,\n )\n self._disable(model)\n\n self.options = {}\n if model.queue:\n self.options[\"queue\"] = model.queue\n\n self.options[\"headers\"] = {}\n self.total_run_count = model.total_run_count\n self.model = model\n\n if not model.last_run_at:\n model.last_run_at = self._default_now()\n\n self.last_run_at = model.last_run_at\n\n\nclass NautobotDatabaseScheduler(DatabaseScheduler):\n \"\"\"\n Nautobot variant of the django-celery-beat DatabaseScheduler which uses the\n nautobot.extras.models.ScheduledJob model\n \"\"\"\n\n Entry = NautobotScheduleEntry\n Model = ScheduledJob\n Changes = ScheduledJobs\n", "path": "nautobot/core/celery/schedulers.py"}]} | 1,897 | 315 |
gh_patches_debug_601 | rasdani/github-patches | git_diff | pex-tool__pex-1288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.35
On the docket:
+ [x] Ensure venv pex does not enter a re-exec loop. #1286
+ [x] Improve resolve error information. #1287
+ [x] Expose Pex tools via a pex-tools console script. #1279
+ [x] Fix auto-created `--venv` core scripts. (#1278)
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.34"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.34"
+__version__ = "2.1.35"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.34\"\n+__version__ = \"2.1.35\"\n", "issue": "Release 2.1.35\nOn the docket:\r\n+ [x] Ensure venv pex does not enter a re-exec loop. #1286\r\n+ [x] Improve resolve error information. #1287 \r\n+ [x] Expose Pex tools via a pex-tools console script. #1279\r\n+ [x] Fix auto-created `--venv` core scripts. (#1278)\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.34\"\n", "path": "pex/version.py"}]} | 682 | 97 |
gh_patches_debug_17947 | rasdani/github-patches | git_diff | conda__conda-build-690 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Entry point pattern doesn't match entrypoints:with.dots
Hi,
Could you please have a look to this. I define entry point as
``` python
entry_points={
'console_scripts': [
'poultry = poultry.main:dispatcher.dispatch',
],
},
```
https://github.com/dimazest/poultry/blob/e0f39277f0a219a4d3cd461b69ce5dd4422fc9dd/setup.py#L62
which entry point pattern at https://github.com/conda/conda-build/blob/master/conda_build/scripts.py#L27 doesn't match.
Thanks.
</issue>
<code>
[start of conda_build/scripts.py]
1 '''
2 Module for creating entry points and scripts for PyPI packages.
3 '''
4
5 from __future__ import absolute_import, division, print_function
6
7 import re
8 import os
9 import sys
10 import shutil
11 from os.path import dirname, isdir, join
12
13 import conda.config as cc
14
15 from conda_build.config import config
16
17
18 PY_TMPL = """\
19 if __name__ == '__main__':
20 import sys
21 from %s import %s
22
23 sys.exit(%s())
24 """
25
26 bin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'
27
28 entry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):(\w+)\s*$')
29
30
31 def iter_entry_points(items):
32 for item in items:
33 m = entry_pat.match(item)
34 if m is None:
35 sys.exit("Error cound not match entry point: %r" % item)
36 yield m.groups()
37
38
39 def create_entry_point(path, module, func):
40 pyscript = PY_TMPL % (module, func, func)
41 if sys.platform == 'win32':
42 with open(path + '-script.py', 'w') as fo:
43 fo.write(pyscript)
44 shutil.copyfile(join(dirname(__file__), 'cli-%d.exe' % cc.bits),
45 path + '.exe')
46 else:
47 with open(path, 'w') as fo:
48 fo.write('#!%s\n' % config.build_python)
49 fo.write(pyscript)
50 os.chmod(path, int('755', 8))
51
52
53 def create_entry_points(items):
54 if not items:
55 return
56 bin_dir = join(config.build_prefix, bin_dirname)
57 if not isdir(bin_dir):
58 os.mkdir(bin_dir)
59 for cmd, module, func in iter_entry_points(items):
60 create_entry_point(join(bin_dir, cmd), module, func)
61
62
63 def prepend_bin_path(env, prefix, prepend_prefix=False):
64 env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']
65 if sys.platform == "win32":
66 env['PATH'] = join(prefix, "Library", "bin") + os.pathsep + env['PATH']
67 if prepend_prefix:
68 env['PATH'] = prefix + os.pathsep + env['PATH']
69 return env
70
[end of conda_build/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_build/scripts.py b/conda_build/scripts.py
--- a/conda_build/scripts.py
+++ b/conda_build/scripts.py
@@ -18,14 +18,14 @@
PY_TMPL = """\
if __name__ == '__main__':
import sys
- from %s import %s
+ import %(module)s
- sys.exit(%s())
+ sys.exit(%(module)s.%(func)s())
"""
bin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'
-entry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):(\w+)\s*$')
+entry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):([\w.]+)\s*$')
def iter_entry_points(items):
@@ -37,7 +37,7 @@
def create_entry_point(path, module, func):
- pyscript = PY_TMPL % (module, func, func)
+ pyscript = PY_TMPL % {'module': module, 'func': func}
if sys.platform == 'win32':
with open(path + '-script.py', 'w') as fo:
fo.write(pyscript)
| {"golden_diff": "diff --git a/conda_build/scripts.py b/conda_build/scripts.py\n--- a/conda_build/scripts.py\n+++ b/conda_build/scripts.py\n@@ -18,14 +18,14 @@\n PY_TMPL = \"\"\"\\\n if __name__ == '__main__':\n import sys\n- from %s import %s\n+ import %(module)s\n \n- sys.exit(%s())\n+ sys.exit(%(module)s.%(func)s())\n \"\"\"\n \n bin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'\n \n-entry_pat = re.compile('\\s*([\\w\\-\\.]+)\\s*=\\s*([\\w.]+):(\\w+)\\s*$')\n+entry_pat = re.compile('\\s*([\\w\\-\\.]+)\\s*=\\s*([\\w.]+):([\\w.]+)\\s*$')\n \n \n def iter_entry_points(items):\n@@ -37,7 +37,7 @@\n \n \n def create_entry_point(path, module, func):\n- pyscript = PY_TMPL % (module, func, func)\n+ pyscript = PY_TMPL % {'module': module, 'func': func}\n if sys.platform == 'win32':\n with open(path + '-script.py', 'w') as fo:\n fo.write(pyscript)\n", "issue": "Entry point pattern doesn't match entrypoints:with.dots\nHi,\n\nCould you please have a look to this. I define entry point as\n\n``` python\nentry_points={\n 'console_scripts': [\n 'poultry = poultry.main:dispatcher.dispatch',\n ],\n},\n```\n\nhttps://github.com/dimazest/poultry/blob/e0f39277f0a219a4d3cd461b69ce5dd4422fc9dd/setup.py#L62\n\nwhich entry point pattern at https://github.com/conda/conda-build/blob/master/conda_build/scripts.py#L27 doesn't match.\n\nThanks.\n\n", "before_files": [{"content": "'''\nModule for creating entry points and scripts for PyPI packages.\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport re\nimport os\nimport sys\nimport shutil\nfrom os.path import dirname, isdir, join\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\n\nPY_TMPL = \"\"\"\\\nif __name__ == '__main__':\n import sys\n from %s import %s\n\n sys.exit(%s())\n\"\"\"\n\nbin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'\n\nentry_pat = re.compile('\\s*([\\w\\-\\.]+)\\s*=\\s*([\\w.]+):(\\w+)\\s*$')\n\n\ndef iter_entry_points(items):\n for item in items:\n m = entry_pat.match(item)\n if m is None:\n sys.exit(\"Error cound not match entry point: %r\" % item)\n yield m.groups()\n\n\ndef create_entry_point(path, module, func):\n pyscript = PY_TMPL % (module, func, func)\n if sys.platform == 'win32':\n with open(path + '-script.py', 'w') as fo:\n fo.write(pyscript)\n shutil.copyfile(join(dirname(__file__), 'cli-%d.exe' % cc.bits),\n path + '.exe')\n else:\n with open(path, 'w') as fo:\n fo.write('#!%s\\n' % config.build_python)\n fo.write(pyscript)\n os.chmod(path, int('755', 8))\n\n\ndef create_entry_points(items):\n if not items:\n return\n bin_dir = join(config.build_prefix, bin_dirname)\n if not isdir(bin_dir):\n os.mkdir(bin_dir)\n for cmd, module, func in iter_entry_points(items):\n create_entry_point(join(bin_dir, cmd), module, func)\n\n\ndef prepend_bin_path(env, prefix, prepend_prefix=False):\n env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']\n if sys.platform == \"win32\":\n env['PATH'] = join(prefix, \"Library\", \"bin\") + os.pathsep + env['PATH']\n if prepend_prefix:\n env['PATH'] = prefix + os.pathsep + env['PATH']\n return env\n", "path": "conda_build/scripts.py"}]} | 1,317 | 284 |
gh_patches_debug_22050 | rasdani/github-patches | git_diff | kartoza__prj.app-301 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
In the pending approval menu, only show menu items that have pending approvals
The **Pending approval** menu should be filtered to show only items in the active project where there are pending approvals.
</issue>
<code>
[start of django_project/core/custom_middleware.py]
1 # coding=utf-8
2 # flake8: noqa
3 """
4 core.custom_middleware
5 """
6 from base.models import Project
7
8
9 class NavContextMiddleware(object):
10 """
11 Adds the required navigation variables to each response
12 """
13
14 def __init__(self):
15 pass
16
17 @staticmethod
18 def process_template_response(request, response):
19 """
20 Add 'the_project', 'the_entry', 'the_version' to context for the
21 navigation.
22
23 Justification: To make the navigation functional, we need to know
24 which Project (or Version, Committee etc) the current context
25 relates to. This is required for URLs. Rather than include lots of
26 if/else in the navigation template, it seems cleaner to add the
27 above variables to the context here.
28
29 :param request: Http Request obj
30 :param response: Http Response obj
31 :return: context :rtype: dict
32 """
33 context = response.context_data
34
35 if context.get('project', None):
36 context['the_project'] = context.get('project')
37 else:
38 if request.user.is_staff:
39 context['the_projects'] = Project.objects.all()
40 else:
41 context['the_projects'] = Project.approved_objects.filter(
42 private=False
43 )
44
45 if context.get('version', None):
46 context['the_version'] = context.get('version')
47 context['the_project'] = context.get('version').project
48
49 if context.get('committee', None):
50 context['the_committee'] = context.get('committee')
51 context['the_project'] = context.get('committee').project
52
53 if context.get('ballot', None):
54 context['the_committee'] = context.get('ballot').committee
55 context['the_project'] = context.get('ballot').committee.project
56
57 if context.get('category', None):
58 context['the_project'] = context.get('category').project
59
60 if context.get('ballots', None):
61 try:
62 context['the_project'] = \
63 context.get('ballots')[0].committee.project
64 except (KeyError, IndexError):
65 pass
66
67 if context.get('entry', None):
68 context['the_entry'] = context.get('entry')
69 context['the_version'] = context.get('entry').version
70 context['the_project'] = context.get('entry').version.project
71
72 if context.get('committees', None):
73 try:
74 context['the_project'] = context.get('committees')[0].project
75 except (KeyError, IndexError):
76 pass
77
78 if context.get('versions', None):
79 try:
80 context['the_project'] = context.get('versions')[0].project
81 except (KeyError, IndexError):
82 pass
83
84 if context.get('entries', None):
85 try:
86 context['the_version'] = context.get('entries')[0].version
87 context['the_project'] = \
88 context.get('entries')[0].version.project
89 except (KeyError, IndexError):
90 pass
91
92 if context.get('categories', None):
93 try:
94 context['the_project'] = \
95 context.get('categories')[0].project
96 except (KeyError, IndexError):
97 pass
98
99 return response
100
[end of django_project/core/custom_middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/core/custom_middleware.py b/django_project/core/custom_middleware.py
--- a/django_project/core/custom_middleware.py
+++ b/django_project/core/custom_middleware.py
@@ -3,7 +3,8 @@
"""
core.custom_middleware
"""
-from base.models import Project
+from base.models import Project, Version
+from changes.models import Category, SponsorshipLevel, SponsorshipPeriod
class NavContextMiddleware(object):
@@ -34,6 +35,15 @@
if context.get('project', None):
context['the_project'] = context.get('project')
+ context['has_pending_versions'] = Version.unapproved_objects.filter(
+ project=context.get('project')).exists()
+ context['has_pending_categories'] = Category.unapproved_objects.filter(
+ project=context.get('project')).exists()
+ context['has_pending_sponsor_lvl'] = SponsorshipLevel.unapproved_objects.filter(
+ project=context.get('project')).exists()
+ context['has_pending_sponsor_period'] = SponsorshipPeriod.unapproved_objects.filter(
+ project=context.get('project')).exists()
+
else:
if request.user.is_staff:
context['the_projects'] = Project.objects.all()
| {"golden_diff": "diff --git a/django_project/core/custom_middleware.py b/django_project/core/custom_middleware.py\n--- a/django_project/core/custom_middleware.py\n+++ b/django_project/core/custom_middleware.py\n@@ -3,7 +3,8 @@\n \"\"\"\n core.custom_middleware\n \"\"\"\n-from base.models import Project\n+from base.models import Project, Version\n+from changes.models import Category, SponsorshipLevel, SponsorshipPeriod\n \n \n class NavContextMiddleware(object):\n@@ -34,6 +35,15 @@\n \n if context.get('project', None):\n context['the_project'] = context.get('project')\n+ context['has_pending_versions'] = Version.unapproved_objects.filter(\n+ project=context.get('project')).exists()\n+ context['has_pending_categories'] = Category.unapproved_objects.filter(\n+ project=context.get('project')).exists()\n+ context['has_pending_sponsor_lvl'] = SponsorshipLevel.unapproved_objects.filter(\n+ project=context.get('project')).exists()\n+ context['has_pending_sponsor_period'] = SponsorshipPeriod.unapproved_objects.filter(\n+ project=context.get('project')).exists()\n+\n else:\n if request.user.is_staff:\n context['the_projects'] = Project.objects.all()\n", "issue": "In the pending approval menu, only show menu items that have pending approvals\nThe **Pending approval** menu should be filtered to show only items in the active project where there are pending approvals.\n\n", "before_files": [{"content": "# coding=utf-8\n# flake8: noqa\n\"\"\"\ncore.custom_middleware\n\"\"\"\nfrom base.models import Project\n\n\nclass NavContextMiddleware(object):\n \"\"\"\n Adds the required navigation variables to each response\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def process_template_response(request, response):\n \"\"\"\n Add 'the_project', 'the_entry', 'the_version' to context for the\n navigation.\n\n Justification: To make the navigation functional, we need to know\n which Project (or Version, Committee etc) the current context\n relates to. This is required for URLs. Rather than include lots of\n if/else in the navigation template, it seems cleaner to add the\n above variables to the context here.\n\n :param request: Http Request obj\n :param response: Http Response obj\n :return: context :rtype: dict\n \"\"\"\n context = response.context_data\n\n if context.get('project', None):\n context['the_project'] = context.get('project')\n else:\n if request.user.is_staff:\n context['the_projects'] = Project.objects.all()\n else:\n context['the_projects'] = Project.approved_objects.filter(\n private=False\n )\n\n if context.get('version', None):\n context['the_version'] = context.get('version')\n context['the_project'] = context.get('version').project\n\n if context.get('committee', None):\n context['the_committee'] = context.get('committee')\n context['the_project'] = context.get('committee').project\n\n if context.get('ballot', None):\n context['the_committee'] = context.get('ballot').committee\n context['the_project'] = context.get('ballot').committee.project\n\n if context.get('category', None):\n context['the_project'] = context.get('category').project\n\n if context.get('ballots', None):\n try:\n context['the_project'] = \\\n context.get('ballots')[0].committee.project\n except (KeyError, IndexError):\n pass\n\n if context.get('entry', None):\n context['the_entry'] = context.get('entry')\n context['the_version'] = context.get('entry').version\n context['the_project'] = context.get('entry').version.project\n\n if context.get('committees', None):\n try:\n context['the_project'] = context.get('committees')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('versions', None):\n try:\n context['the_project'] = context.get('versions')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('entries', None):\n try:\n context['the_version'] = context.get('entries')[0].version\n context['the_project'] = \\\n context.get('entries')[0].version.project\n except (KeyError, IndexError):\n pass\n\n if context.get('categories', None):\n try:\n context['the_project'] = \\\n context.get('categories')[0].project\n except (KeyError, IndexError):\n pass\n\n return response\n", "path": "django_project/core/custom_middleware.py"}]} | 1,465 | 264 |
gh_patches_debug_18 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-2012 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Title of the Colombia page should be "Colombia Country Page" or alike
Right now the title of the [Colombia country page](https://data.hdx.rwlabs.org/group/col) is "Colombia crisis page". I think it should read "Colombia Country Page" or similar. Any ideas?

</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.5.10'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.10'
+hdx_version = 'v0.5.11'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.10'\n+hdx_version = 'v0.5.11'\n", "issue": "Title of the Colombia page should be \"Colombia Country Page\" or alike\nRight now the title of the [Colombia country page](https://data.hdx.rwlabs.org/group/col) is \"Colombia crisis page\". I think it should read \"Colombia Country Page\" or similar. Any ideas? \n\n\n\n", "before_files": [{"content": "hdx_version = 'v0.5.10'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 724 | 109 |
gh_patches_debug_43071 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1917 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: rctcbc_gov_uk returning extra incorrect dates
### I Have A Problem With:
A specific source
### What's Your Problem
I've been using this integration for a while without issue.
I've been away for a few weeks, and come home to find 3 entries for each entry for each week, when there should only be one.
See attached screenshot.
The Wednesday entries are the only valid ones. Not sure why they're being duplicated though. :-/
<img width="1184" alt="Screenshot 2024-03-28 at 03 44 55" src="https://github.com/mampfes/hacs_waste_collection_schedule/assets/63560223/7cecf6a6-9ee5-42cb-875f-50d91877ceeb">
### Source (if relevant)
rctcbc_gov_uk
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: rctcbc_gov_uk
args:
uprn: "200003766278"
calendar_title: "Bin Collection"
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py]
1 from datetime import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection
6
7 TITLE = "Rhondda Cynon Taf County Borough Council"
8 DESCRIPTION = "Source for rctcbc.gov.uk services for Rhondda Cynon Taf County Borough Council, Wales, UK"
9 URL = "rctcbc.gov.uk"
10 TEST_CASES = {
11 "Test_001": {"uprn": "10024274791"},
12 "Test_002": {"uprn": "100100718352"},
13 "Test_003": {"uprn": 100100733093},
14 }
15 ICON_MAP = {
16 "BLACK BAGS": "mdi:trash-can",
17 "RECYCLING": "mdi:recycle",
18 "FOOD WASTE": "mdi:food",
19 "GARDEN WASTE": "mdi:leaf",
20 }
21
22
23 class Source:
24 def __init__(self, uprn):
25 self._uprn = str(uprn)
26
27 def fetch(self):
28 s = requests.Session()
29 # website appears to display ~4 months worth of collections, so iterate through those pages
30 entries = []
31 for month in range(0, 4):
32 r = s.get(
33 f"https://www.rctcbc.gov.uk/EN/Resident/RecyclingandWaste/RecyclingandWasteCollectionDays.aspx?uprn={self._uprn}&month={month}"
34 )
35 soup = BeautifulSoup(r.text, "html.parser")
36 calendar_month = soup.find("div", {"class": "calendar-month"})
37 calendar_day = soup.find_all(
38 "div", {"class": "card-body card-body-padding"}
39 )
40 for day in calendar_day:
41 pickups = day.find_all("a")
42 if len(pickups) != 0:
43 d = day.find("div", {"class": "card-title"})
44 dt = d.text.strip() + " " + calendar_month.text.strip()
45 for pickup in pickups:
46 entries.append(
47 Collection(
48 date=datetime.strptime(
49 dt,
50 "%d %B %Y",
51 ).date(),
52 t=pickup.text,
53 icon=ICON_MAP.get(pickup.text.upper()),
54 )
55 )
56
57 return entries
58
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py
@@ -1,8 +1,8 @@
from datetime import datetime
import requests
-from bs4 import BeautifulSoup
-from waste_collection_schedule import Collection
+from bs4 import BeautifulSoup, Tag
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Rhondda Cynon Taf County Borough Council"
DESCRIPTION = "Source for rctcbc.gov.uk services for Rhondda Cynon Taf County Borough Council, Wales, UK"
@@ -24,34 +24,73 @@
def __init__(self, uprn):
self._uprn = str(uprn)
- def fetch(self):
+ def extract_collections(self, calendar: Tag | BeautifulSoup) -> list[Collection]:
+ calendar_month = calendar.find("div", {"class": "calendar-month"})
+ if not calendar_month or not isinstance(calendar_month, Tag):
+ return []
+ month = calendar_month.text.strip()
+ calendar_days = calendar.find_all(
+ "div", {"class": "card-body card-body-padding"}
+ )
+
+ entries = []
+ for day in calendar_days:
+ pickups = day.find_all("a")
+ if len(pickups) != 0:
+ d = day.find("div", {"class": "card-title"})
+ if not d or not isinstance(d, Tag):
+ continue
+ dt = d.text.strip() + " " + month
+ for pickup in pickups:
+ entries.append(
+ Collection(
+ date=datetime.strptime(
+ dt,
+ "%d %B %Y",
+ ).date(),
+ t=pickup.text,
+ icon=ICON_MAP.get(pickup.text.upper()),
+ )
+ )
+ return entries
+
+ def extract_from_printable_calendar(
+ self, soup: BeautifulSoup
+ ) -> list[Collection] | None:
+ entries = []
+ printable_calendar = soup.find("div", {"class": "printableCalendar"})
+ if not printable_calendar or not isinstance(printable_calendar, Tag):
+ return None
+
+ calendars = printable_calendar.find_all(
+ "div", {"class": "calendar-wrap onlyPrint"}
+ )
+ if not calendars:
+ return None
+
+ for calendar in calendars:
+ if not calendar or not isinstance(calendar, Tag):
+ continue
+ entries += self.extract_collections(calendar)
+ return entries or None
+
+ def fetch(self) -> list[Collection]:
s = requests.Session()
# website appears to display ~4 months worth of collections, so iterate through those pages
- entries = []
+ entries: list[Collection] = []
for month in range(0, 4):
r = s.get(
f"https://www.rctcbc.gov.uk/EN/Resident/RecyclingandWaste/RecyclingandWasteCollectionDays.aspx?uprn={self._uprn}&month={month}"
)
soup = BeautifulSoup(r.text, "html.parser")
- calendar_month = soup.find("div", {"class": "calendar-month"})
- calendar_day = soup.find_all(
- "div", {"class": "card-body card-body-padding"}
- )
- for day in calendar_day:
- pickups = day.find_all("a")
- if len(pickups) != 0:
- d = day.find("div", {"class": "card-title"})
- dt = d.text.strip() + " " + calendar_month.text.strip()
- for pickup in pickups:
- entries.append(
- Collection(
- date=datetime.strptime(
- dt,
- "%d %B %Y",
- ).date(),
- t=pickup.text,
- icon=ICON_MAP.get(pickup.text.upper()),
- )
- )
+ printable_calendar_entries = self.extract_from_printable_calendar(soup)
+ if printable_calendar_entries:
+ return printable_calendar_entries
+
+ # OLD METHOD IF THEY EVER REMOVE THE PRINTABLE CALENDAR AGAIN:
+ calendar = soup.find("div", {"class": "monthlyCalendar"}) or soup
+ if not isinstance(calendar, Tag):
+ continue
+ entries += self.extract_collections(calendar)
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py\n@@ -1,8 +1,8 @@\n from datetime import datetime\n \n import requests\n-from bs4 import BeautifulSoup\n-from waste_collection_schedule import Collection\n+from bs4 import BeautifulSoup, Tag\n+from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"Rhondda Cynon Taf County Borough Council\"\n DESCRIPTION = \"Source for rctcbc.gov.uk services for Rhondda Cynon Taf County Borough Council, Wales, UK\"\n@@ -24,34 +24,73 @@\n def __init__(self, uprn):\n self._uprn = str(uprn)\n \n- def fetch(self):\n+ def extract_collections(self, calendar: Tag | BeautifulSoup) -> list[Collection]:\n+ calendar_month = calendar.find(\"div\", {\"class\": \"calendar-month\"})\n+ if not calendar_month or not isinstance(calendar_month, Tag):\n+ return []\n+ month = calendar_month.text.strip()\n+ calendar_days = calendar.find_all(\n+ \"div\", {\"class\": \"card-body card-body-padding\"}\n+ )\n+\n+ entries = []\n+ for day in calendar_days:\n+ pickups = day.find_all(\"a\")\n+ if len(pickups) != 0:\n+ d = day.find(\"div\", {\"class\": \"card-title\"})\n+ if not d or not isinstance(d, Tag):\n+ continue\n+ dt = d.text.strip() + \" \" + month\n+ for pickup in pickups:\n+ entries.append(\n+ Collection(\n+ date=datetime.strptime(\n+ dt,\n+ \"%d %B %Y\",\n+ ).date(),\n+ t=pickup.text,\n+ icon=ICON_MAP.get(pickup.text.upper()),\n+ )\n+ )\n+ return entries\n+\n+ def extract_from_printable_calendar(\n+ self, soup: BeautifulSoup\n+ ) -> list[Collection] | None:\n+ entries = []\n+ printable_calendar = soup.find(\"div\", {\"class\": \"printableCalendar\"})\n+ if not printable_calendar or not isinstance(printable_calendar, Tag):\n+ return None\n+\n+ calendars = printable_calendar.find_all(\n+ \"div\", {\"class\": \"calendar-wrap onlyPrint\"}\n+ )\n+ if not calendars:\n+ return None\n+\n+ for calendar in calendars:\n+ if not calendar or not isinstance(calendar, Tag):\n+ continue\n+ entries += self.extract_collections(calendar)\n+ return entries or None\n+\n+ def fetch(self) -> list[Collection]:\n s = requests.Session()\n # website appears to display ~4 months worth of collections, so iterate through those pages\n- entries = []\n+ entries: list[Collection] = []\n for month in range(0, 4):\n r = s.get(\n f\"https://www.rctcbc.gov.uk/EN/Resident/RecyclingandWaste/RecyclingandWasteCollectionDays.aspx?uprn={self._uprn}&month={month}\"\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n- calendar_month = soup.find(\"div\", {\"class\": \"calendar-month\"})\n- calendar_day = soup.find_all(\n- \"div\", {\"class\": \"card-body card-body-padding\"}\n- )\n- for day in calendar_day:\n- pickups = day.find_all(\"a\")\n- if len(pickups) != 0:\n- d = day.find(\"div\", {\"class\": \"card-title\"})\n- dt = d.text.strip() + \" \" + calendar_month.text.strip()\n- for pickup in pickups:\n- entries.append(\n- Collection(\n- date=datetime.strptime(\n- dt,\n- \"%d %B %Y\",\n- ).date(),\n- t=pickup.text,\n- icon=ICON_MAP.get(pickup.text.upper()),\n- )\n- )\n+ printable_calendar_entries = self.extract_from_printable_calendar(soup)\n+ if printable_calendar_entries:\n+ return printable_calendar_entries\n+\n+ # OLD METHOD IF THEY EVER REMOVE THE PRINTABLE CALENDAR AGAIN:\n+ calendar = soup.find(\"div\", {\"class\": \"monthlyCalendar\"}) or soup\n+ if not isinstance(calendar, Tag):\n+ continue\n+ entries += self.extract_collections(calendar)\n \n return entries\n", "issue": "[Bug]: rctcbc_gov_uk returning extra incorrect dates \n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nI've been using this integration for a while without issue.\r\nI've been away for a few weeks, and come home to find 3 entries for each entry for each week, when there should only be one.\r\n\r\nSee attached screenshot.\r\nThe Wednesday entries are the only valid ones. Not sure why they're being duplicated though. :-/\r\n\r\n<img width=\"1184\" alt=\"Screenshot 2024-03-28 at 03 44 55\" src=\"https://github.com/mampfes/hacs_waste_collection_schedule/assets/63560223/7cecf6a6-9ee5-42cb-875f-50d91877ceeb\">\r\n\n\n### Source (if relevant)\n\nrctcbc_gov_uk\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: rctcbc_gov_uk\r\n args:\r\n uprn: \"200003766278\"\r\n calendar_title: \"Bin Collection\"\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Rhondda Cynon Taf County Borough Council\"\nDESCRIPTION = \"Source for rctcbc.gov.uk services for Rhondda Cynon Taf County Borough Council, Wales, UK\"\nURL = \"rctcbc.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"10024274791\"},\n \"Test_002\": {\"uprn\": \"100100718352\"},\n \"Test_003\": {\"uprn\": 100100733093},\n}\nICON_MAP = {\n \"BLACK BAGS\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"FOOD WASTE\": \"mdi:food\",\n \"GARDEN WASTE\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn)\n\n def fetch(self):\n s = requests.Session()\n # website appears to display ~4 months worth of collections, so iterate through those pages\n entries = []\n for month in range(0, 4):\n r = s.get(\n f\"https://www.rctcbc.gov.uk/EN/Resident/RecyclingandWaste/RecyclingandWasteCollectionDays.aspx?uprn={self._uprn}&month={month}\"\n )\n soup = BeautifulSoup(r.text, \"html.parser\")\n calendar_month = soup.find(\"div\", {\"class\": \"calendar-month\"})\n calendar_day = soup.find_all(\n \"div\", {\"class\": \"card-body card-body-padding\"}\n )\n for day in calendar_day:\n pickups = day.find_all(\"a\")\n if len(pickups) != 0:\n d = day.find(\"div\", {\"class\": \"card-title\"})\n dt = d.text.strip() + \" \" + calendar_month.text.strip()\n for pickup in pickups:\n entries.append(\n Collection(\n date=datetime.strptime(\n dt,\n \"%d %B %Y\",\n ).date(),\n t=pickup.text,\n icon=ICON_MAP.get(pickup.text.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/rctcbc_gov_uk.py"}]} | 1,662 | 1,014 |
gh_patches_debug_9314 | rasdani/github-patches | git_diff | ansible__ansible-lint-110 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive detecting OctalPermissionsRule
When trying to setgid permissions
```
[ANSIBLE0008] Octal file permissions must contain leading zero
mode: 02775
```
Looks like the regex requires exactly 3 digits, which is not always correct.
```
# At least an indent, "mode:", optional whitespace, any digits, EOL
mode_regex = re.compile(r'^\s+mode:\s*[0-9]+\s*$')
# Same as above, but with a leading zero before three digits
valid_mode_regex = re.compile(r'^\s+mode:\s*0[0-7]{3}\s*$')
```
</issue>
<code>
[start of lib/ansiblelint/rules/OctalPermissionsRule.py]
1 # Copyright (c) 2013-2014 Will Thames <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from ansiblelint import AnsibleLintRule
22 import re
23
24
25 class OctalPermissionsRule(AnsibleLintRule):
26 id = 'ANSIBLE0008'
27 shortdesc = 'Octal file permissions must contain leading zero'
28 description = 'Numeric file permissions without leading zero can behave' + \
29 'in unexpected ways. See ' + \
30 'http://docs.ansible.com/ansible/file_module.html'
31 tags = ['formatting']
32
33 # At least an indent, "mode:", optional whitespace, any digits, EOL
34 mode_regex = re.compile(r'^\s+mode:\s*[0-9]+\s*$')
35 # Same as above, but with a leading zero before three digits
36 valid_mode_regex = re.compile(r'^\s+mode:\s*0[0-7]{3}\s*$')
37
38 def match(self, file, line):
39 if re.match(self.mode_regex, line):
40 return not re.match(self.valid_mode_regex, line)
41
[end of lib/ansiblelint/rules/OctalPermissionsRule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansiblelint/rules/OctalPermissionsRule.py b/lib/ansiblelint/rules/OctalPermissionsRule.py
--- a/lib/ansiblelint/rules/OctalPermissionsRule.py
+++ b/lib/ansiblelint/rules/OctalPermissionsRule.py
@@ -33,7 +33,7 @@
# At least an indent, "mode:", optional whitespace, any digits, EOL
mode_regex = re.compile(r'^\s+mode:\s*[0-9]+\s*$')
# Same as above, but with a leading zero before three digits
- valid_mode_regex = re.compile(r'^\s+mode:\s*0[0-7]{3}\s*$')
+ valid_mode_regex = re.compile(r'^\s+mode:\s*0[0-7]{3,4}\s*$')
def match(self, file, line):
if re.match(self.mode_regex, line):
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/OctalPermissionsRule.py b/lib/ansiblelint/rules/OctalPermissionsRule.py\n--- a/lib/ansiblelint/rules/OctalPermissionsRule.py\n+++ b/lib/ansiblelint/rules/OctalPermissionsRule.py\n@@ -33,7 +33,7 @@\n # At least an indent, \"mode:\", optional whitespace, any digits, EOL\n mode_regex = re.compile(r'^\\s+mode:\\s*[0-9]+\\s*$')\n # Same as above, but with a leading zero before three digits\n- valid_mode_regex = re.compile(r'^\\s+mode:\\s*0[0-7]{3}\\s*$')\n+ valid_mode_regex = re.compile(r'^\\s+mode:\\s*0[0-7]{3,4}\\s*$')\n \n def match(self, file, line):\n if re.match(self.mode_regex, line):\n", "issue": "False positive detecting OctalPermissionsRule\nWhen trying to setgid permissions\n\n```\n[ANSIBLE0008] Octal file permissions must contain leading zero\nmode: 02775\n```\n\nLooks like the regex requires exactly 3 digits, which is not always correct.\n\n```\n# At least an indent, \"mode:\", optional whitespace, any digits, EOL\nmode_regex = re.compile(r'^\\s+mode:\\s*[0-9]+\\s*$')\n# Same as above, but with a leading zero before three digits\nvalid_mode_regex = re.compile(r'^\\s+mode:\\s*0[0-7]{3}\\s*$')\n```\n\n", "before_files": [{"content": "# Copyright (c) 2013-2014 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nimport re\n\n\nclass OctalPermissionsRule(AnsibleLintRule):\n id = 'ANSIBLE0008'\n shortdesc = 'Octal file permissions must contain leading zero'\n description = 'Numeric file permissions without leading zero can behave' + \\\n 'in unexpected ways. See ' + \\\n 'http://docs.ansible.com/ansible/file_module.html'\n tags = ['formatting']\n\n # At least an indent, \"mode:\", optional whitespace, any digits, EOL\n mode_regex = re.compile(r'^\\s+mode:\\s*[0-9]+\\s*$')\n # Same as above, but with a leading zero before three digits\n valid_mode_regex = re.compile(r'^\\s+mode:\\s*0[0-7]{3}\\s*$')\n\n def match(self, file, line):\n if re.match(self.mode_regex, line):\n return not re.match(self.valid_mode_regex, line)\n", "path": "lib/ansiblelint/rules/OctalPermissionsRule.py"}]} | 1,221 | 199 |
gh_patches_debug_10929 | rasdani/github-patches | git_diff | crytic__slither-403 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix embark and etherscan GH actions
- Embark fails on all our github actions (including crytic-compile) since ~1 month. The tests work locally, but it seems that some recent changes in the latest embark makes it fails on a CI
- Etherscan has now a constraint on the number of request per IP, which makes the CI fails from time to time. We need to add an API key to prevent it
Fix embark and etherscan GH actions
- Embark fails on all our github actions (including crytic-compile) since ~1 month. The tests work locally, but it seems that some recent changes in the latest embark makes it fails on a CI
- Etherscan has now a constraint on the number of request per IP, which makes the CI fails from time to time. We need to add an API key to prevent it
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 setup(
4 name='slither-analyzer',
5 description='Slither is a Solidity static analysis framework written in Python 3.',
6 url='https://github.com/crytic/slither',
7 author='Trail of Bits',
8 version='0.6.9',
9 packages=find_packages(),
10 python_requires='>=3.6',
11 install_requires=['prettytable>=0.7.2',
12 'pysha3>=1.0.2',
13 'crytic-compile>=0.1.6'],
14 # 'crytic-compile'],
15 # dependency_links=['git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile'],
16 license='AGPL-3.0',
17 long_description=open('README.md').read(),
18 entry_points={
19 'console_scripts': [
20 'slither = slither.__main__:main',
21 'slither-check-upgradeability = slither.tools.upgradeability.__main__:main',
22 'slither-find-paths = slither.tools.possible_paths.__main__:main',
23 'slither-simil = slither.tools.similarity.__main__:main',
24 'slither-flat = slither.tools.flattening.__main__:main',
25 'slither-format = slither.tools.slither_format.__main__:main',
26 'slither-check-erc = slither.tools.erc_conformance.__main__:main',
27 'slither-check-kspec = slither.tools.kspec_coverage.__main__:main'
28 ]
29 }
30 )
31
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,9 +10,9 @@
python_requires='>=3.6',
install_requires=['prettytable>=0.7.2',
'pysha3>=1.0.2',
- 'crytic-compile>=0.1.6'],
-# 'crytic-compile'],
-# dependency_links=['git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile'],
+# 'crytic-compile>=0.1.6'],
+ 'crytic-compile'],
+ dependency_links=['git+https://github.com/crytic/crytic-compile.git@dev#egg=crytic-compile'],
license='AGPL-3.0',
long_description=open('README.md').read(),
entry_points={
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,9 +10,9 @@\n python_requires='>=3.6',\n install_requires=['prettytable>=0.7.2',\n 'pysha3>=1.0.2',\n- 'crytic-compile>=0.1.6'],\n-# 'crytic-compile'],\n-# dependency_links=['git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile'],\n+# 'crytic-compile>=0.1.6'],\n+ 'crytic-compile'],\n+ dependency_links=['git+https://github.com/crytic/crytic-compile.git@dev#egg=crytic-compile'],\n license='AGPL-3.0',\n long_description=open('README.md').read(),\n entry_points={\n", "issue": "Fix embark and etherscan GH actions\n- Embark fails on all our github actions (including crytic-compile) since ~1 month. The tests work locally, but it seems that some recent changes in the latest embark makes it fails on a CI\r\n- Etherscan has now a constraint on the number of request per IP, which makes the CI fails from time to time. We need to add an API key to prevent it\nFix embark and etherscan GH actions\n- Embark fails on all our github actions (including crytic-compile) since ~1 month. The tests work locally, but it seems that some recent changes in the latest embark makes it fails on a CI\r\n- Etherscan has now a constraint on the number of request per IP, which makes the CI fails from time to time. We need to add an API key to prevent it\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='slither-analyzer',\n description='Slither is a Solidity static analysis framework written in Python 3.',\n url='https://github.com/crytic/slither',\n author='Trail of Bits',\n version='0.6.9',\n packages=find_packages(),\n python_requires='>=3.6',\n install_requires=['prettytable>=0.7.2',\n 'pysha3>=1.0.2',\n 'crytic-compile>=0.1.6'],\n# 'crytic-compile'],\n# dependency_links=['git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile'],\n license='AGPL-3.0',\n long_description=open('README.md').read(),\n entry_points={\n 'console_scripts': [\n 'slither = slither.__main__:main',\n 'slither-check-upgradeability = slither.tools.upgradeability.__main__:main',\n 'slither-find-paths = slither.tools.possible_paths.__main__:main',\n 'slither-simil = slither.tools.similarity.__main__:main',\n 'slither-flat = slither.tools.flattening.__main__:main',\n 'slither-format = slither.tools.slither_format.__main__:main',\n 'slither-check-erc = slither.tools.erc_conformance.__main__:main',\n 'slither-check-kspec = slither.tools.kspec_coverage.__main__:main'\n ]\n }\n)\n", "path": "setup.py"}]} | 1,096 | 197 |
gh_patches_debug_8216 | rasdani/github-patches | git_diff | huggingface__accelerate-50 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mismatch between `accelerate config` cli and `default_config.yaml`
The generated `default_config.yaml` is mismatch with `accelerate config`.
Here are my cli outputs and `default_config.yaml`
cli outputs
```
In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0
Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): 1
How many different machines will you use (use more than 1 for multi-node training)? [1]: 2
What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: 1
What is the IP address of the machine that will host the main process? 10.29.150.50
What is the port you will use to communicate with the main process? 2333
How many processes in total will you use? [1]: 6
Do you wish to use FP16 (mixed precision)? [yes/NO]: yes
```
`default_config.yaml`
```
compute_environment: LOCAL_MACHINE
distributed_type: MULTI_GPU
fp16: true
machine_rank: 1
main_process_ip: 2333
main_process_port: null
main_training_function: main
num_machines: 2
num_processes: 6
```
</issue>
<code>
[start of src/accelerate/commands/config/cluster.py]
1 #!/usr/bin/env python
2
3 # Copyright 2021 The HuggingFace Team. All rights reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from accelerate.state import ComputeEnvironment, DistributedType
18
19 from .config_args import ClusterConfig
20 from .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool
21
22
23 def get_cluster_input():
24 distributed_type = _ask_field(
25 "Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): ",
26 _convert_distributed_mode,
27 error_message="Please enter 0, 1 or 2.",
28 )
29
30 machine_rank = 0
31 num_machines = 1
32 main_process_ip = None
33 main_process_port = None
34 if distributed_type == DistributedType.MULTI_GPU:
35 num_machines = _ask_field(
36 "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
37 lambda x: int(x),
38 default=1,
39 )
40 if num_machines > 1:
41 machine_rank = _ask_field(
42 "What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: ",
43 lambda x: int(x),
44 default=0,
45 )
46 main_process_ip = _ask_field(
47 "What is the IP address of the machine that will host the main process? ",
48 )
49 main_process_ip = _ask_field(
50 "What is the port you will use to communicate with the main process? ",
51 lambda x: int(x),
52 )
53 if distributed_type == DistributedType.TPU:
54 main_training_function = _ask_field(
55 "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
56 default="main",
57 )
58 else:
59 main_training_function = "main"
60
61 num_processes = _ask_field(
62 "How many processes in total will you use? [1]: ",
63 lambda x: int(x),
64 default=1,
65 error_message="Please enter an integer.",
66 )
67
68 if distributed_type != DistributedType.TPU:
69 fp16 = _ask_field(
70 "Do you wish to use FP16 (mixed precision)? [yes/NO]: ",
71 _convert_yes_no_to_bool,
72 default=False,
73 error_message="Please enter yes or no.",
74 )
75 else:
76 fp16 = False
77
78 return ClusterConfig(
79 compute_environment=ComputeEnvironment.LOCAL_MACHINE,
80 distributed_type=distributed_type,
81 num_processes=num_processes,
82 fp16=fp16,
83 machine_rank=machine_rank,
84 num_machines=num_machines,
85 main_process_ip=main_process_ip,
86 main_process_port=main_process_port,
87 main_training_function=main_training_function,
88 )
89
[end of src/accelerate/commands/config/cluster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py
--- a/src/accelerate/commands/config/cluster.py
+++ b/src/accelerate/commands/config/cluster.py
@@ -46,7 +46,7 @@
main_process_ip = _ask_field(
"What is the IP address of the machine that will host the main process? ",
)
- main_process_ip = _ask_field(
+ main_process_port = _ask_field(
"What is the port you will use to communicate with the main process? ",
lambda x: int(x),
)
| {"golden_diff": "diff --git a/src/accelerate/commands/config/cluster.py b/src/accelerate/commands/config/cluster.py\n--- a/src/accelerate/commands/config/cluster.py\n+++ b/src/accelerate/commands/config/cluster.py\n@@ -46,7 +46,7 @@\n main_process_ip = _ask_field(\n \"What is the IP address of the machine that will host the main process? \",\n )\n- main_process_ip = _ask_field(\n+ main_process_port = _ask_field(\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n", "issue": "Mismatch between `accelerate config` cli and `default_config.yaml`\nThe generated `default_config.yaml` is mismatch with `accelerate config`.\r\n\r\nHere are my cli outputs and `default_config.yaml`\r\n\r\ncli outputs\r\n\r\n```\r\nIn which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 0\r\nWhich type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): 1\r\nHow many different machines will you use (use more than 1 for multi-node training)? [1]: 2\r\nWhat is the rank of this machine (from 0 to the number of machines - 1 )? [0]: 1\r\nWhat is the IP address of the machine that will host the main process? 10.29.150.50\r\nWhat is the port you will use to communicate with the main process? 2333\r\nHow many processes in total will you use? [1]: 6\r\nDo you wish to use FP16 (mixed precision)? [yes/NO]: yes\r\n\r\n```\r\n\r\n`default_config.yaml`\r\n```\r\ncompute_environment: LOCAL_MACHINE\r\ndistributed_type: MULTI_GPU\r\nfp16: true\r\nmachine_rank: 1\r\nmain_process_ip: 2333\r\nmain_process_port: null\r\nmain_training_function: main\r\nnum_machines: 2\r\nnum_processes: 6\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom accelerate.state import ComputeEnvironment, DistributedType\n\nfrom .config_args import ClusterConfig\nfrom .config_utils import _ask_field, _convert_distributed_mode, _convert_yes_no_to_bool\n\n\ndef get_cluster_input():\n distributed_type = _ask_field(\n \"Which type of machine are you using? ([0] No distributed training, [1] multi-GPU, [2] TPU): \",\n _convert_distributed_mode,\n error_message=\"Please enter 0, 1 or 2.\",\n )\n\n machine_rank = 0\n num_machines = 1\n main_process_ip = None\n main_process_port = None\n if distributed_type == DistributedType.MULTI_GPU:\n num_machines = _ask_field(\n \"How many different machines will you use (use more than 1 for multi-node training)? [1]: \",\n lambda x: int(x),\n default=1,\n )\n if num_machines > 1:\n machine_rank = _ask_field(\n \"What is the rank of this machine (from 0 to the number of machines - 1 )? [0]: \",\n lambda x: int(x),\n default=0,\n )\n main_process_ip = _ask_field(\n \"What is the IP address of the machine that will host the main process? \",\n )\n main_process_ip = _ask_field(\n \"What is the port you will use to communicate with the main process? \",\n lambda x: int(x),\n )\n if distributed_type == DistributedType.TPU:\n main_training_function = _ask_field(\n \"What is the name of the function in your script that should be launched in all parallel scripts? [main]: \",\n default=\"main\",\n )\n else:\n main_training_function = \"main\"\n\n num_processes = _ask_field(\n \"How many processes in total will you use? [1]: \",\n lambda x: int(x),\n default=1,\n error_message=\"Please enter an integer.\",\n )\n\n if distributed_type != DistributedType.TPU:\n fp16 = _ask_field(\n \"Do you wish to use FP16 (mixed precision)? [yes/NO]: \",\n _convert_yes_no_to_bool,\n default=False,\n error_message=\"Please enter yes or no.\",\n )\n else:\n fp16 = False\n\n return ClusterConfig(\n compute_environment=ComputeEnvironment.LOCAL_MACHINE,\n distributed_type=distributed_type,\n num_processes=num_processes,\n fp16=fp16,\n machine_rank=machine_rank,\n num_machines=num_machines,\n main_process_ip=main_process_ip,\n main_process_port=main_process_port,\n main_training_function=main_training_function,\n )\n", "path": "src/accelerate/commands/config/cluster.py"}]} | 1,757 | 140 |
gh_patches_debug_11831 | rasdani/github-patches | git_diff | beeware__toga-1485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RuntimeError in toga-demo
**Describe the bug**
when you run the toga-demo app and click on "Action 2" and then either Yes or No at the dialog, you get ```RuntimeError: Can't check dialog result directly; use await or an on_result handler``` printed on the console and no followup dialog.
**To Reproduce**
Steps to reproduce the behavior:
1. Install toga-core, toga-gtk, toga and demo using `pip install -e`
2. Run toga-demo
3. Click on Action 2
4. Click on either Yes or No in popup dialog
5. Note it logs a RuntimeError in the terminal you ran it from.
**Expected behavior**
Should not throw an error and instead it should pop up an enthusiastic response dialog.
**Screenshots**
```
Traceback (most recent call last):
File "/home/nick/Work/beeware/toga/src/core/toga/handlers.py", line 66, in _handler
result = handler(interface, *args, **kwargs)
File "/home/nick/Work/beeware/toga/src/core/toga/handlers.py", line 66, in _handler
result = handler(interface, *args, **kwargs)
File "/home/nick/Work/beeware/toga/demo/toga_demo/app.py", line 91, in action2
if self.main_window.question_dialog('Toga', 'Is this cool or what?'):
File "/home/nick/Work/beeware/toga/src/gtk/toga_gtk/dialogs.py", line 16, in __bool__
raise RuntimeError("Can't check dialog result directly; use await or an on_result handler")
RuntimeError: Can't check dialog result directly; use await or an on_result handler
```
If applicable, add screenshots to help explain your problem.
**Environment:**
- Operating System: Ubuntu 20.04.4
- Python version: 3.8.10
- Software versions:
- Toga: 0.3.0.dev34 400b6935c4689bedb134324b38eb1286af5b5ec6
**Suggested Fix**
It works if you make the `action2` function async, and make it await the question_dialog.
I'll submit an extremely trivial PR for this soon :-)
</issue>
<code>
[start of demo/toga_demo/app.py]
1 import toga
2 from toga.constants import COLUMN
3 from toga.style import Pack
4
5
6 class TogaDemo(toga.App):
7
8 def startup(self):
9 # Create the main window
10 self.main_window = toga.MainWindow(self.name)
11
12 left_container = toga.OptionContainer()
13
14 left_table = toga.Table(
15 headings=['Hello', 'World'],
16 data=[
17 ('root1', 'value1'),
18 ('root2', 'value2'),
19 ('root3', 'value3'),
20 ('root4', 'value4'),
21 ]
22 )
23
24 left_tree = toga.Tree(
25 headings=['Navigate'],
26 data={
27 ('root1',): {
28 },
29 ('root2',): {
30 ('root2.1',): None,
31 ('root2.2',): [
32 ('root2.2.1',),
33 ('root2.2.2',),
34 ('root2.2.3',),
35 ]
36 }
37 }
38 )
39
40 left_container.add('Table', left_table)
41 left_container.add('Tree', left_tree)
42
43 right_content = toga.Box(style=Pack(direction=COLUMN))
44 for b in range(0, 10):
45 right_content.add(
46 toga.Button(
47 'Hello world %s' % b,
48 on_press=self.button_handler,
49 style=Pack(padding=20)
50 )
51 )
52
53 right_container = toga.ScrollContainer()
54
55 right_container.content = right_content
56
57 split = toga.SplitContainer()
58
59 split.content = [left_container, right_container]
60
61 cmd1 = toga.Command(
62 self.action1,
63 'Action 1',
64 tooltip='Perform action 1',
65 icon='resources/brutus',
66 )
67 cmd2 = toga.Command(
68 self.action2,
69 'Action 2',
70 tooltip='Perform action 2',
71 icon=toga.Icon.TOGA_ICON
72 )
73
74 self.main_window.toolbar.add(cmd1, cmd2)
75
76 self.main_window.content = split
77
78 # Show the main window
79 self.main_window.show()
80
81 def button_handler(self, widget):
82 print("button press")
83 for i in range(0, 10):
84 yield 1
85 print('still running... (iteration %s)' % i)
86
87 def action1(self, widget):
88 self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')
89
90 def action2(self, widget):
91 if self.main_window.question_dialog('Toga', 'Is this cool or what?'):
92 self.main_window.info_dialog('Happiness', 'I know, right! :-)')
93 else:
94 self.main_window.info_dialog('Shucks...', "Well aren't you a spoilsport... :-(")
95
96
97 def main():
98 return TogaDemo('Toga Demo', 'org.beeware.toga-demo')
99
[end of demo/toga_demo/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demo/toga_demo/app.py b/demo/toga_demo/app.py
--- a/demo/toga_demo/app.py
+++ b/demo/toga_demo/app.py
@@ -87,8 +87,8 @@
def action1(self, widget):
self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')
- def action2(self, widget):
- if self.main_window.question_dialog('Toga', 'Is this cool or what?'):
+ async def action2(self, widget):
+ if await self.main_window.question_dialog('Toga', 'Is this cool or what?'):
self.main_window.info_dialog('Happiness', 'I know, right! :-)')
else:
self.main_window.info_dialog('Shucks...', "Well aren't you a spoilsport... :-(")
| {"golden_diff": "diff --git a/demo/toga_demo/app.py b/demo/toga_demo/app.py\n--- a/demo/toga_demo/app.py\n+++ b/demo/toga_demo/app.py\n@@ -87,8 +87,8 @@\n def action1(self, widget):\n self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')\n \n- def action2(self, widget):\n- if self.main_window.question_dialog('Toga', 'Is this cool or what?'):\n+ async def action2(self, widget):\n+ if await self.main_window.question_dialog('Toga', 'Is this cool or what?'):\n self.main_window.info_dialog('Happiness', 'I know, right! :-)')\n else:\n self.main_window.info_dialog('Shucks...', \"Well aren't you a spoilsport... :-(\")\n", "issue": "RuntimeError in toga-demo\n**Describe the bug**\r\nwhen you run the toga-demo app and click on \"Action 2\" and then either Yes or No at the dialog, you get ```RuntimeError: Can't check dialog result directly; use await or an on_result handler``` printed on the console and no followup dialog.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install toga-core, toga-gtk, toga and demo using `pip install -e`\r\n2. Run toga-demo\r\n3. Click on Action 2\r\n4. Click on either Yes or No in popup dialog\r\n5. Note it logs a RuntimeError in the terminal you ran it from.\r\n\r\n**Expected behavior**\r\nShould not throw an error and instead it should pop up an enthusiastic response dialog.\r\n\r\n**Screenshots**\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/nick/Work/beeware/toga/src/core/toga/handlers.py\", line 66, in _handler\r\n result = handler(interface, *args, **kwargs)\r\n File \"/home/nick/Work/beeware/toga/src/core/toga/handlers.py\", line 66, in _handler\r\n result = handler(interface, *args, **kwargs)\r\n File \"/home/nick/Work/beeware/toga/demo/toga_demo/app.py\", line 91, in action2\r\n if self.main_window.question_dialog('Toga', 'Is this cool or what?'):\r\n File \"/home/nick/Work/beeware/toga/src/gtk/toga_gtk/dialogs.py\", line 16, in __bool__\r\n raise RuntimeError(\"Can't check dialog result directly; use await or an on_result handler\")\r\nRuntimeError: Can't check dialog result directly; use await or an on_result handler\r\n```\r\n\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Environment:**\r\n - Operating System: Ubuntu 20.04.4\r\n - Python version: 3.8.10\r\n - Software versions:\r\n - Toga: 0.3.0.dev34 400b6935c4689bedb134324b38eb1286af5b5ec6\r\n \r\n**Suggested Fix**\r\nIt works if you make the `action2` function async, and make it await the question_dialog.\r\nI'll submit an extremely trivial PR for this soon :-)\r\n\n", "before_files": [{"content": "import toga\nfrom toga.constants import COLUMN\nfrom toga.style import Pack\n\n\nclass TogaDemo(toga.App):\n\n def startup(self):\n # Create the main window\n self.main_window = toga.MainWindow(self.name)\n\n left_container = toga.OptionContainer()\n\n left_table = toga.Table(\n headings=['Hello', 'World'],\n data=[\n ('root1', 'value1'),\n ('root2', 'value2'),\n ('root3', 'value3'),\n ('root4', 'value4'),\n ]\n )\n\n left_tree = toga.Tree(\n headings=['Navigate'],\n data={\n ('root1',): {\n },\n ('root2',): {\n ('root2.1',): None,\n ('root2.2',): [\n ('root2.2.1',),\n ('root2.2.2',),\n ('root2.2.3',),\n ]\n }\n }\n )\n\n left_container.add('Table', left_table)\n left_container.add('Tree', left_tree)\n\n right_content = toga.Box(style=Pack(direction=COLUMN))\n for b in range(0, 10):\n right_content.add(\n toga.Button(\n 'Hello world %s' % b,\n on_press=self.button_handler,\n style=Pack(padding=20)\n )\n )\n\n right_container = toga.ScrollContainer()\n\n right_container.content = right_content\n\n split = toga.SplitContainer()\n\n split.content = [left_container, right_container]\n\n cmd1 = toga.Command(\n self.action1,\n 'Action 1',\n tooltip='Perform action 1',\n icon='resources/brutus',\n )\n cmd2 = toga.Command(\n self.action2,\n 'Action 2',\n tooltip='Perform action 2',\n icon=toga.Icon.TOGA_ICON\n )\n\n self.main_window.toolbar.add(cmd1, cmd2)\n\n self.main_window.content = split\n\n # Show the main window\n self.main_window.show()\n\n def button_handler(self, widget):\n print(\"button press\")\n for i in range(0, 10):\n yield 1\n print('still running... (iteration %s)' % i)\n\n def action1(self, widget):\n self.main_window.info_dialog('Toga', 'THIS! IS! TOGA!!')\n\n def action2(self, widget):\n if self.main_window.question_dialog('Toga', 'Is this cool or what?'):\n self.main_window.info_dialog('Happiness', 'I know, right! :-)')\n else:\n self.main_window.info_dialog('Shucks...', \"Well aren't you a spoilsport... :-(\")\n\n\ndef main():\n return TogaDemo('Toga Demo', 'org.beeware.toga-demo')\n", "path": "demo/toga_demo/app.py"}]} | 1,867 | 181 |
gh_patches_debug_37486 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-4443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create small random change of position to resume the Bot after some time off
When you stop the Bot (by more than X hours for example) does not return the exact same position. It is strange to stay a few hours without running the bot and it resumes in the same previous position.
</issue>
<code>
[start of pokemongo_bot/cell_workers/sleep_schedule.py]
1 from datetime import datetime, timedelta
2 from time import sleep
3 from random import uniform
4 from pokemongo_bot.base_task import BaseTask
5
6
7 class SleepSchedule(BaseTask):
8 """Pauses the execution of the bot every day for some time
9
10 Simulates the user going to sleep every day for some time, the sleep time
11 and the duration is changed every day by a random offset defined in the
12 config file
13 Example Config:
14 {
15 "type": "SleepSchedule",
16 "config": {
17 "time": "12:00",
18 "duration":"5:30",
19 "time_random_offset": "00:30",
20 "duration_random_offset": "00:30"
21 }
22 }
23 time: (HH:MM) local time that the bot should sleep
24 duration: (HH:MM) the duration of sleep
25 time_random_offset: (HH:MM) random offset of time that the sleep will start
26 for this example the possible start time is 11:30-12:30
27 duration_random_offset: (HH:MM) random offset of duration of sleep
28 for this example the possible duration is 5:00-6:00
29 """
30 SUPPORTED_TASK_API_VERSION = 1
31
32 LOG_INTERVAL_SECONDS = 600
33 SCHEDULING_MARGIN = timedelta(minutes=10) # Skip if next sleep is RESCHEDULING_MARGIN from now
34
35 def initialize(self):
36 # self.bot.event_manager.register_event('sleeper_scheduled', parameters=('datetime',))
37 self._process_config()
38 self._schedule_next_sleep()
39 self._calculate_current_sleep()
40
41 def work(self):
42 if self._should_sleep_now():
43 self._sleep()
44 self._schedule_next_sleep()
45 self.bot.login()
46
47 def _process_config(self):
48 self.time = datetime.strptime(self.config.get('time', '01:00'), '%H:%M')
49
50 # Using datetime for easier stripping of timedeltas
51 duration = datetime.strptime(self.config.get('duration', '07:00'), '%H:%M')
52 self.duration = int(timedelta(hours=duration.hour, minutes=duration.minute).total_seconds())
53
54 time_random_offset = datetime.strptime(self.config.get('time_random_offset', '01:00'), '%H:%M')
55 self.time_random_offset = int(
56 timedelta(
57 hours=time_random_offset.hour, minutes=time_random_offset.minute).total_seconds())
58
59 duration_random_offset = datetime.strptime(self.config.get('duration_random_offset', '00:30'), '%H:%M')
60 self.duration_random_offset = int(
61 timedelta(
62 hours=duration_random_offset.hour, minutes=duration_random_offset.minute).total_seconds())
63
64 def _schedule_next_sleep(self):
65 self._next_sleep = self._get_next_sleep_schedule()
66 self._next_duration = self._get_next_duration()
67 self.emit_event(
68 'next_sleep',
69 formatted="Next sleep at {time}",
70 data={
71 'time': str(self._next_sleep)
72 }
73 )
74
75 def _calculate_current_sleep(self):
76 self._current_sleep = self._next_sleep - timedelta(days=1)
77 current_duration = self._get_next_duration()
78 self._current_end = self._current_sleep + timedelta(seconds = current_duration)
79
80 def _should_sleep_now(self):
81 if datetime.now() >= self._next_sleep:
82 return True
83 if datetime.now() >= self._current_sleep and datetime.now() < self._current_end:
84 self._next_duration = (self._current_end - datetime.now()).total_seconds()
85 return True
86
87 return False
88
89 def _get_next_sleep_schedule(self):
90 now = datetime.now() + self.SCHEDULING_MARGIN
91 next_time = now.replace(hour=self.time.hour, minute=self.time.minute)
92
93 next_time += timedelta(seconds=self._get_random_offset(self.time_random_offset))
94
95 # If sleep time is passed add one day
96 if next_time <= now:
97 next_time += timedelta(days=1)
98
99 return next_time
100
101 def _get_next_duration(self):
102 duration = self.duration + self._get_random_offset(self.duration_random_offset)
103 return duration
104
105 def _get_random_offset(self, max_offset):
106 offset = uniform(-max_offset, max_offset)
107 return int(offset)
108
109 def _sleep(self):
110 sleep_to_go = self._next_duration
111
112 sleep_m, sleep_s = divmod(sleep_to_go, 60)
113 sleep_h, sleep_m = divmod(sleep_m, 60)
114 sleep_hms = '%02d:%02d:%02d' % (sleep_h, sleep_m, sleep_s)
115
116 now = datetime.now()
117 wake = str(now + timedelta(seconds=sleep_to_go))
118
119 self.emit_event(
120 'bot_sleep',
121 formatted="Sleeping for {time_hms}, wake at {wake}",
122 data={
123 'time_hms': sleep_hms,
124 'wake': wake
125 }
126 )
127 while sleep_to_go > 0:
128 if sleep_to_go < self.LOG_INTERVAL_SECONDS:
129 sleep(sleep_to_go)
130 sleep_to_go = 0
131 else:
132 sleep(self.LOG_INTERVAL_SECONDS)
133 sleep_to_go -= self.LOG_INTERVAL_SECONDS
134
[end of pokemongo_bot/cell_workers/sleep_schedule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/cell_workers/sleep_schedule.py b/pokemongo_bot/cell_workers/sleep_schedule.py
--- a/pokemongo_bot/cell_workers/sleep_schedule.py
+++ b/pokemongo_bot/cell_workers/sleep_schedule.py
@@ -18,6 +18,7 @@
"duration":"5:30",
"time_random_offset": "00:30",
"duration_random_offset": "00:30"
+ "wake_up_at_location": ""
}
}
time: (HH:MM) local time that the bot should sleep
@@ -26,7 +27,8 @@
for this example the possible start time is 11:30-12:30
duration_random_offset: (HH:MM) random offset of duration of sleep
for this example the possible duration is 5:00-6:00
- """
+ wake_up_at_location: (lat, long | lat, long, alt | "") the location at which the bot wake up
+ *Note that an empty string ("") will not change the location*. """
SUPPORTED_TASK_API_VERSION = 1
LOG_INTERVAL_SECONDS = 600
@@ -42,6 +44,9 @@
if self._should_sleep_now():
self._sleep()
self._schedule_next_sleep()
+ wake_up_at_location = self.config.get("wake_up_at_location", "")
+ if wake_up_at_location:
+ self.bot.api.set_position(self.wake_up_at_location[0],self.wake_up_at_location[1],self.wake_up_at_location[2])
self.bot.login()
def _process_config(self):
@@ -60,6 +65,21 @@
self.duration_random_offset = int(
timedelta(
hours=duration_random_offset.hour, minutes=duration_random_offset.minute).total_seconds())
+
+ wake_up_at_location = self.config.get("wake_up_at_location", "")
+ if wake_up_at_location:
+ try:
+ wake_up_at_location = wake_up_at_location.split(',',2)
+ lat=float(wake_up_at_location[0])
+ lng=float(wake_up_at_location[1])
+ if len(wake_up_at_location) == 3:
+ alt=float(wake_up_at_location[2])
+ else:
+ alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)
+ except ValueError:
+ raise ValueError('SleepSchedule wake_up_at_location, parsing error in location') #TODO there must be a more elegant way to do it...
+
+ self.wake_up_at_location = [lat, lng, alt]
def _schedule_next_sleep(self):
self._next_sleep = self._get_next_sleep_schedule()
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/sleep_schedule.py b/pokemongo_bot/cell_workers/sleep_schedule.py\n--- a/pokemongo_bot/cell_workers/sleep_schedule.py\n+++ b/pokemongo_bot/cell_workers/sleep_schedule.py\n@@ -18,6 +18,7 @@\n \"duration\":\"5:30\",\n \"time_random_offset\": \"00:30\",\n \"duration_random_offset\": \"00:30\"\n+ \"wake_up_at_location\": \"\"\n }\n }\n time: (HH:MM) local time that the bot should sleep\n@@ -26,7 +27,8 @@\n for this example the possible start time is 11:30-12:30\n duration_random_offset: (HH:MM) random offset of duration of sleep\n for this example the possible duration is 5:00-6:00\n- \"\"\"\n+ wake_up_at_location: (lat, long | lat, long, alt | \"\") the location at which the bot wake up \n+ *Note that an empty string (\"\") will not change the location*. \"\"\"\n SUPPORTED_TASK_API_VERSION = 1\n \n LOG_INTERVAL_SECONDS = 600\n@@ -42,6 +44,9 @@\n if self._should_sleep_now():\n self._sleep()\n self._schedule_next_sleep()\n+ wake_up_at_location = self.config.get(\"wake_up_at_location\", \"\")\n+ if wake_up_at_location:\n+ self.bot.api.set_position(self.wake_up_at_location[0],self.wake_up_at_location[1],self.wake_up_at_location[2])\n self.bot.login()\n \n def _process_config(self):\n@@ -60,6 +65,21 @@\n self.duration_random_offset = int(\n timedelta(\n hours=duration_random_offset.hour, minutes=duration_random_offset.minute).total_seconds())\n+ \n+ wake_up_at_location = self.config.get(\"wake_up_at_location\", \"\")\n+ if wake_up_at_location:\n+ try:\n+ wake_up_at_location = wake_up_at_location.split(',',2) \n+ lat=float(wake_up_at_location[0])\n+ lng=float(wake_up_at_location[1])\n+ if len(wake_up_at_location) == 3:\n+ alt=float(wake_up_at_location[2])\n+ else:\n+ alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)\n+ except ValueError:\n+ raise ValueError('SleepSchedule wake_up_at_location, parsing error in location') #TODO there must be a more elegant way to do it...\n+\n+ self.wake_up_at_location = [lat, lng, alt]\n \n def _schedule_next_sleep(self):\n self._next_sleep = self._get_next_sleep_schedule()\n", "issue": "Create small random change of position to resume the Bot after some time off\nWhen you stop the Bot (by more than X hours for example) does not return the exact same position. It is strange to stay a few hours without running the bot and it resumes in the same previous position.\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom time import sleep\nfrom random import uniform\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass SleepSchedule(BaseTask):\n \"\"\"Pauses the execution of the bot every day for some time\n\n Simulates the user going to sleep every day for some time, the sleep time\n and the duration is changed every day by a random offset defined in the\n config file\n Example Config:\n {\n \"type\": \"SleepSchedule\",\n \"config\": {\n \"time\": \"12:00\",\n \"duration\":\"5:30\",\n \"time_random_offset\": \"00:30\",\n \"duration_random_offset\": \"00:30\"\n }\n }\n time: (HH:MM) local time that the bot should sleep\n duration: (HH:MM) the duration of sleep\n time_random_offset: (HH:MM) random offset of time that the sleep will start\n for this example the possible start time is 11:30-12:30\n duration_random_offset: (HH:MM) random offset of duration of sleep\n for this example the possible duration is 5:00-6:00\n \"\"\"\n SUPPORTED_TASK_API_VERSION = 1\n\n LOG_INTERVAL_SECONDS = 600\n SCHEDULING_MARGIN = timedelta(minutes=10) # Skip if next sleep is RESCHEDULING_MARGIN from now\n\n def initialize(self):\n # self.bot.event_manager.register_event('sleeper_scheduled', parameters=('datetime',))\n self._process_config()\n self._schedule_next_sleep()\n self._calculate_current_sleep()\n\n def work(self):\n if self._should_sleep_now():\n self._sleep()\n self._schedule_next_sleep()\n self.bot.login()\n\n def _process_config(self):\n self.time = datetime.strptime(self.config.get('time', '01:00'), '%H:%M')\n\n # Using datetime for easier stripping of timedeltas\n duration = datetime.strptime(self.config.get('duration', '07:00'), '%H:%M')\n self.duration = int(timedelta(hours=duration.hour, minutes=duration.minute).total_seconds())\n\n time_random_offset = datetime.strptime(self.config.get('time_random_offset', '01:00'), '%H:%M')\n self.time_random_offset = int(\n timedelta(\n hours=time_random_offset.hour, minutes=time_random_offset.minute).total_seconds())\n\n duration_random_offset = datetime.strptime(self.config.get('duration_random_offset', '00:30'), '%H:%M')\n self.duration_random_offset = int(\n timedelta(\n hours=duration_random_offset.hour, minutes=duration_random_offset.minute).total_seconds())\n\n def _schedule_next_sleep(self):\n self._next_sleep = self._get_next_sleep_schedule()\n self._next_duration = self._get_next_duration()\n self.emit_event(\n 'next_sleep',\n formatted=\"Next sleep at {time}\",\n data={\n 'time': str(self._next_sleep)\n }\n )\n\n def _calculate_current_sleep(self):\n self._current_sleep = self._next_sleep - timedelta(days=1)\n current_duration = self._get_next_duration()\n self._current_end = self._current_sleep + timedelta(seconds = current_duration)\n\n def _should_sleep_now(self):\n if datetime.now() >= self._next_sleep:\n return True\n if datetime.now() >= self._current_sleep and datetime.now() < self._current_end:\n self._next_duration = (self._current_end - datetime.now()).total_seconds()\n return True\n\n return False\n\n def _get_next_sleep_schedule(self):\n now = datetime.now() + self.SCHEDULING_MARGIN\n next_time = now.replace(hour=self.time.hour, minute=self.time.minute)\n\n next_time += timedelta(seconds=self._get_random_offset(self.time_random_offset))\n\n # If sleep time is passed add one day\n if next_time <= now:\n next_time += timedelta(days=1)\n\n return next_time\n\n def _get_next_duration(self):\n duration = self.duration + self._get_random_offset(self.duration_random_offset)\n return duration\n\n def _get_random_offset(self, max_offset):\n offset = uniform(-max_offset, max_offset)\n return int(offset)\n\n def _sleep(self):\n sleep_to_go = self._next_duration\n\n sleep_m, sleep_s = divmod(sleep_to_go, 60)\n sleep_h, sleep_m = divmod(sleep_m, 60)\n sleep_hms = '%02d:%02d:%02d' % (sleep_h, sleep_m, sleep_s)\n\n now = datetime.now()\n wake = str(now + timedelta(seconds=sleep_to_go))\n\n self.emit_event(\n 'bot_sleep',\n formatted=\"Sleeping for {time_hms}, wake at {wake}\",\n data={\n 'time_hms': sleep_hms,\n 'wake': wake\n }\n )\n while sleep_to_go > 0:\n if sleep_to_go < self.LOG_INTERVAL_SECONDS:\n sleep(sleep_to_go)\n sleep_to_go = 0\n else:\n sleep(self.LOG_INTERVAL_SECONDS)\n sleep_to_go -= self.LOG_INTERVAL_SECONDS\n", "path": "pokemongo_bot/cell_workers/sleep_schedule.py"}]} | 2,042 | 614 |
gh_patches_debug_19850 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1894 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in repr(PublicKey)
hi there.
there is a bug in "repo.get_public_key".
i found this bug by trying to use "repo.create_secret", which failed because of this issue
```
repo.get_public_key()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/Users/sschultchen/PycharmProjects/IAC_Main/venv/lib/python3.9/site-packages/github/PublicKey.py", line 55, in __repr__
return self.get__repr__({"key_id": self._key_id.value, "key": self._key.value})
File "/Users/sschultchen/PycharmProjects/IAC_Main/venv/lib/python3.9/site-packages/github/GithubObject.py", line 62, in value
raise GithubException.BadAttributeException(
github.GithubException.BadAttributeException: (1, <class 'str'>, None)
```
i guess the reason for this might be that the pygithub implementation assumes that "key_id" is a string, but it actually is a integer.
at least, the github api gives me an integer, and not a string for this attribute.
</issue>
<code>
[start of github/PublicKey.py]
1 ############################ Copyrights and license ############################
2 # #
3 # Copyright 2012 Vincent Jacques <[email protected]> #
4 # Copyright 2012 Zearin <[email protected]> #
5 # Copyright 2013 AKFish <[email protected]> #
6 # Copyright 2013 Vincent Jacques <[email protected]> #
7 # Copyright 2014 Vincent Jacques <[email protected]> #
8 # Copyright 2016 Jannis Gebauer <[email protected]> #
9 # Copyright 2016 Peter Buckley <[email protected]> #
10 # Copyright 2018 Wan Liuyang <[email protected]> #
11 # Copyright 2018 sfdye <[email protected]> #
12 # #
13 # This file is part of PyGithub. #
14 # http://pygithub.readthedocs.io/ #
15 # #
16 # PyGithub is free software: you can redistribute it and/or modify it under #
17 # the terms of the GNU Lesser General Public License as published by the Free #
18 # Software Foundation, either version 3 of the License, or (at your option) #
19 # any later version. #
20 # #
21 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
22 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
23 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
24 # details. #
25 # #
26 # You should have received a copy of the GNU Lesser General Public License #
27 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
28 # #
29 ################################################################################
30
31 # https://docs.github.com/en/rest/reference/actions#example-encrypting-a-secret-using-python
32 from base64 import b64encode
33
34 from nacl import encoding, public
35
36 import github.GithubObject
37
38
39 def encrypt(public_key: str, secret_value: str) -> str:
40 """Encrypt a Unicode string using the public key."""
41 public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
42 sealed_box = public.SealedBox(public_key)
43 encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
44 return b64encode(encrypted).decode("utf-8")
45
46
47 class PublicKey(github.GithubObject.CompletableGithubObject):
48 """
49 This class represents either an organization public key or a repository public key.
50 The reference can be found here https://docs.github.com/en/rest/reference/actions#get-an-organization-public-key
51 or here https://docs.github.com/en/rest/reference/actions#get-a-repository-public-key
52 """
53
54 def __repr__(self):
55 return self.get__repr__({"key_id": self._key_id.value, "key": self._key.value})
56
57 @property
58 def key(self):
59 """
60 :type: string
61 """
62 self._completeIfNotSet(self._key)
63 return self._key.value
64
65 @property
66 def key_id(self):
67 """
68 :type: string
69 """
70 self._completeIfNotSet(self._key_id)
71 return self._key_id.value
72
73 def _initAttributes(self):
74 self._key = github.GithubObject.NotSet
75 self._key_id = github.GithubObject.NotSet
76
77 def _useAttributes(self, attributes):
78 if "key" in attributes: # pragma no branch
79 self._key = self._makeStringAttribute(attributes["key"])
80 if "key_id" in attributes: # pragma no branch
81 self._key_id = self._makeStringAttribute(attributes["key_id"])
82
83 def encrypt(self, unencrypted_value):
84 return encrypt(self._key.value, unencrypted_value)
85
[end of github/PublicKey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/github/PublicKey.py b/github/PublicKey.py
--- a/github/PublicKey.py
+++ b/github/PublicKey.py
@@ -65,7 +65,7 @@
@property
def key_id(self):
"""
- :type: string
+ :type: string or int
"""
self._completeIfNotSet(self._key_id)
return self._key_id.value
@@ -78,7 +78,10 @@
if "key" in attributes: # pragma no branch
self._key = self._makeStringAttribute(attributes["key"])
if "key_id" in attributes: # pragma no branch
- self._key_id = self._makeStringAttribute(attributes["key_id"])
+ if type(attributes["key_id"]) == str:
+ self._key_id = self._makeStringAttribute(attributes["key_id"])
+ else:
+ self._key_id = self._makeIntAttribute(attributes["key_id"])
def encrypt(self, unencrypted_value):
return encrypt(self._key.value, unencrypted_value)
| {"golden_diff": "diff --git a/github/PublicKey.py b/github/PublicKey.py\n--- a/github/PublicKey.py\n+++ b/github/PublicKey.py\n@@ -65,7 +65,7 @@\n @property\n def key_id(self):\n \"\"\"\n- :type: string\n+ :type: string or int\n \"\"\"\n self._completeIfNotSet(self._key_id)\n return self._key_id.value\n@@ -78,7 +78,10 @@\n if \"key\" in attributes: # pragma no branch\n self._key = self._makeStringAttribute(attributes[\"key\"])\n if \"key_id\" in attributes: # pragma no branch\n- self._key_id = self._makeStringAttribute(attributes[\"key_id\"])\n+ if type(attributes[\"key_id\"]) == str:\n+ self._key_id = self._makeStringAttribute(attributes[\"key_id\"])\n+ else:\n+ self._key_id = self._makeIntAttribute(attributes[\"key_id\"])\n \n def encrypt(self, unencrypted_value):\n return encrypt(self._key.value, unencrypted_value)\n", "issue": "Bug in repr(PublicKey)\nhi there.\r\n\r\nthere is a bug in \"repo.get_public_key\".\r\n\r\ni found this bug by trying to use \"repo.create_secret\", which failed because of this issue\r\n\r\n```\r\nrepo.get_public_key()\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"/Users/sschultchen/PycharmProjects/IAC_Main/venv/lib/python3.9/site-packages/github/PublicKey.py\", line 55, in __repr__\r\n return self.get__repr__({\"key_id\": self._key_id.value, \"key\": self._key.value})\r\n File \"/Users/sschultchen/PycharmProjects/IAC_Main/venv/lib/python3.9/site-packages/github/GithubObject.py\", line 62, in value\r\n raise GithubException.BadAttributeException(\r\ngithub.GithubException.BadAttributeException: (1, <class 'str'>, None)\r\n```\r\n\r\ni guess the reason for this might be that the pygithub implementation assumes that \"key_id\" is a string, but it actually is a integer.\r\n\r\nat least, the github api gives me an integer, and not a string for this attribute.\n", "before_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\n# https://docs.github.com/en/rest/reference/actions#example-encrypting-a-secret-using-python\nfrom base64 import b64encode\n\nfrom nacl import encoding, public\n\nimport github.GithubObject\n\n\ndef encrypt(public_key: str, secret_value: str) -> str:\n \"\"\"Encrypt a Unicode string using the public key.\"\"\"\n public_key = public.PublicKey(public_key.encode(\"utf-8\"), encoding.Base64Encoder())\n sealed_box = public.SealedBox(public_key)\n encrypted = sealed_box.encrypt(secret_value.encode(\"utf-8\"))\n return b64encode(encrypted).decode(\"utf-8\")\n\n\nclass PublicKey(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents either an organization public key or a repository public key.\n The reference can be found here https://docs.github.com/en/rest/reference/actions#get-an-organization-public-key\n or here https://docs.github.com/en/rest/reference/actions#get-a-repository-public-key\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"key_id\": self._key_id.value, \"key\": self._key.value})\n\n @property\n def key(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._key)\n return self._key.value\n\n @property\n def key_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._key_id)\n return self._key_id.value\n\n def _initAttributes(self):\n self._key = github.GithubObject.NotSet\n self._key_id = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"key\" in attributes: # pragma no branch\n self._key = self._makeStringAttribute(attributes[\"key\"])\n if \"key_id\" in attributes: # pragma no branch\n self._key_id = self._makeStringAttribute(attributes[\"key_id\"])\n\n def encrypt(self, unencrypted_value):\n return encrypt(self._key.value, unencrypted_value)\n", "path": "github/PublicKey.py"}]} | 1,787 | 236 |
gh_patches_debug_6748 | rasdani/github-patches | git_diff | internetarchive__openlibrary-7922 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update solr Docker tag to v8.11.2
[](https://renovatebot.com)
This PR contains the following updates:
| Package | Update | Change |
|---|---|---|
| [solr](https://togithub.com/apache/solr) | minor | `8.10.1` -> `8.11.2` |
---
### Configuration
📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined).
🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.
♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.
🔕 **Ignore**: Close this PR and you won't be reminded about this update again.
---
- [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check this box
---
This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/internetarchive/openlibrary).
<!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzMi4xNTQuMiIsInVwZGF0ZWRJblZlciI6IjM2LjQwLjMiLCJ0YXJnZXRCcmFuY2giOiJtYXN0ZXIifQ==-->
</issue>
<code>
[start of openlibrary/solr/types_generator.py]
1 #!/usr/bin/env python
2 import os
3
4 root = os.path.dirname(__file__)
5 OVERRIDES = {'type': "Literal['work', 'author', 'subject']"}
6
7
8 def generate():
9 """This function generates the types.py file."""
10 import xml.etree.ElementTree as ET
11
12 # read the managed-schema xml file
13 solr_schema = ET.parse(os.path.join(root, '../../conf/solr/conf/managed-schema'))
14 python_fields: list[str] = []
15 seen_names: set[str] = set()
16 for field in solr_schema.getroot().findall('field'):
17 name = field.get('name')
18 if name.startswith('_'):
19 continue
20
21 required = field.get('required') == 'true'
22 typ = field.get('type')
23 multivalued = field.get('multiValued') == 'true'
24 type_map = {
25 'pint': 'int',
26 'string': 'str',
27 'text_en_splitting': 'str',
28 'text_general': 'str',
29 'text_international': 'str',
30 'text_title_sort': 'str',
31 'boolean': 'bool',
32 'pfloat': 'float',
33 }
34
35 if name in OVERRIDES:
36 python_type = OVERRIDES[name]
37 elif typ in type_map:
38 python_type = type_map[typ]
39 elif (
40 field_type := solr_schema.find(f".//fieldType[@name='{typ}']")
41 ) is not None:
42 field_class = field_type.get('class')
43 if field_class == 'solr.EnumFieldType':
44 enumsConfigFile = field_type.get('enumsConfig')
45 enumsConfig = ET.parse(
46 os.path.join(root, '../../conf/solr/conf/', enumsConfigFile)
47 )
48 enum_values = [
49 el.text
50 for el in enumsConfig.findall(
51 f".//enum[@name='{field_type.get('enumName')}']/value"
52 )
53 ]
54 python_type = f"Literal[{', '.join(map(repr, enum_values))}]"
55 else:
56 raise Exception(f"Unknown field type class {field_class}")
57 else:
58 raise Exception(f"Unknown field type {typ}")
59
60 if name not in OVERRIDES:
61 if multivalued:
62 python_type = f"list[{python_type}]"
63 if not required:
64 python_type = f"Optional[{python_type}]"
65
66 seen_names.add(name)
67 python_fields.append(f" {name}: {python_type}")
68
69 for key in set(OVERRIDES) - seen_names:
70 python_fields.append(f" {key}: {OVERRIDES[key]}")
71
72 body = '\n'.join(python_fields)
73 python = f"""# This file is auto-generated by types_generator.py
74 # fmt: off
75 from typing import Literal, TypedDict, Optional
76
77
78 class SolrDocument(TypedDict):
79 {body}
80
81 # fmt: on"""
82
83 return python
84
85
86 if __name__ == '__main__':
87 print(generate())
88
[end of openlibrary/solr/types_generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/solr/types_generator.py b/openlibrary/solr/types_generator.py
--- a/openlibrary/solr/types_generator.py
+++ b/openlibrary/solr/types_generator.py
@@ -10,7 +10,9 @@
import xml.etree.ElementTree as ET
# read the managed-schema xml file
- solr_schema = ET.parse(os.path.join(root, '../../conf/solr/conf/managed-schema'))
+ solr_schema = ET.parse(
+ os.path.join(root, '../../conf/solr/conf/managed-schema.xml')
+ )
python_fields: list[str] = []
seen_names: set[str] = set()
for field in solr_schema.getroot().findall('field'):
| {"golden_diff": "diff --git a/openlibrary/solr/types_generator.py b/openlibrary/solr/types_generator.py\n--- a/openlibrary/solr/types_generator.py\n+++ b/openlibrary/solr/types_generator.py\n@@ -10,7 +10,9 @@\n import xml.etree.ElementTree as ET\n \n # read the managed-schema xml file\n- solr_schema = ET.parse(os.path.join(root, '../../conf/solr/conf/managed-schema'))\n+ solr_schema = ET.parse(\n+ os.path.join(root, '../../conf/solr/conf/managed-schema.xml')\n+ )\n python_fields: list[str] = []\n seen_names: set[str] = set()\n for field in solr_schema.getroot().findall('field'):\n", "issue": "Update solr Docker tag to v8.11.2\n[](https://renovatebot.com)\n\nThis PR contains the following updates:\n\n| Package | Update | Change |\n|---|---|---|\n| [solr](https://togithub.com/apache/solr) | minor | `8.10.1` -> `8.11.2` |\n\n---\n\n### Configuration\n\n\ud83d\udcc5 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined).\n\n\ud83d\udea6 **Automerge**: Disabled by config. Please merge this manually once you are satisfied.\n\n\u267b **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.\n\n\ud83d\udd15 **Ignore**: Close this PR and you won't be reminded about this update again.\n\n---\n\n - [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check this box\n\n---\n\nThis PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/internetarchive/openlibrary).\n<!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzMi4xNTQuMiIsInVwZGF0ZWRJblZlciI6IjM2LjQwLjMiLCJ0YXJnZXRCcmFuY2giOiJtYXN0ZXIifQ==-->\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\n\nroot = os.path.dirname(__file__)\nOVERRIDES = {'type': \"Literal['work', 'author', 'subject']\"}\n\n\ndef generate():\n \"\"\"This function generates the types.py file.\"\"\"\n import xml.etree.ElementTree as ET\n\n # read the managed-schema xml file\n solr_schema = ET.parse(os.path.join(root, '../../conf/solr/conf/managed-schema'))\n python_fields: list[str] = []\n seen_names: set[str] = set()\n for field in solr_schema.getroot().findall('field'):\n name = field.get('name')\n if name.startswith('_'):\n continue\n\n required = field.get('required') == 'true'\n typ = field.get('type')\n multivalued = field.get('multiValued') == 'true'\n type_map = {\n 'pint': 'int',\n 'string': 'str',\n 'text_en_splitting': 'str',\n 'text_general': 'str',\n 'text_international': 'str',\n 'text_title_sort': 'str',\n 'boolean': 'bool',\n 'pfloat': 'float',\n }\n\n if name in OVERRIDES:\n python_type = OVERRIDES[name]\n elif typ in type_map:\n python_type = type_map[typ]\n elif (\n field_type := solr_schema.find(f\".//fieldType[@name='{typ}']\")\n ) is not None:\n field_class = field_type.get('class')\n if field_class == 'solr.EnumFieldType':\n enumsConfigFile = field_type.get('enumsConfig')\n enumsConfig = ET.parse(\n os.path.join(root, '../../conf/solr/conf/', enumsConfigFile)\n )\n enum_values = [\n el.text\n for el in enumsConfig.findall(\n f\".//enum[@name='{field_type.get('enumName')}']/value\"\n )\n ]\n python_type = f\"Literal[{', '.join(map(repr, enum_values))}]\"\n else:\n raise Exception(f\"Unknown field type class {field_class}\")\n else:\n raise Exception(f\"Unknown field type {typ}\")\n\n if name not in OVERRIDES:\n if multivalued:\n python_type = f\"list[{python_type}]\"\n if not required:\n python_type = f\"Optional[{python_type}]\"\n\n seen_names.add(name)\n python_fields.append(f\" {name}: {python_type}\")\n\n for key in set(OVERRIDES) - seen_names:\n python_fields.append(f\" {key}: {OVERRIDES[key]}\")\n\n body = '\\n'.join(python_fields)\n python = f\"\"\"# This file is auto-generated by types_generator.py\n# fmt: off\nfrom typing import Literal, TypedDict, Optional\n\n\nclass SolrDocument(TypedDict):\n{body}\n\n# fmt: on\"\"\"\n\n return python\n\n\nif __name__ == '__main__':\n print(generate())\n", "path": "openlibrary/solr/types_generator.py"}]} | 1,699 | 163 |
gh_patches_debug_9247 | rasdani/github-patches | git_diff | lnbits__lnbits-750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switching to `FakeWallet` deletes pending outgoing payments from another backend.
Needs investigation.
What I observed: create outgoing pending payment from other wallet, stop LNbits, change to `FakeWallet`, start LNbits.
-> Payments get deleted because of a `failed` state that is returned in `lnbits.core.models:check_pending:148`.
</issue>
<code>
[start of lnbits/wallets/fake.py]
1 import asyncio
2 import hashlib
3 import random
4 from datetime import datetime
5 from os import getenv
6 from typing import AsyncGenerator, Dict, Optional
7
8 from environs import Env # type: ignore
9 from loguru import logger
10
11 from lnbits.helpers import urlsafe_short_hash
12
13 from ..bolt11 import decode, encode
14 from .base import (
15 InvoiceResponse,
16 PaymentResponse,
17 PaymentStatus,
18 StatusResponse,
19 Wallet,
20 )
21
22 env = Env()
23 env.read_env()
24
25
26 class FakeWallet(Wallet):
27 async def status(self) -> StatusResponse:
28 logger.info(
29 "FakeWallet funding source is for using LNbits as a centralised, stand-alone payment system with brrrrrr."
30 )
31 return StatusResponse(None, float("inf"))
32
33 async def create_invoice(
34 self,
35 amount: int,
36 memo: Optional[str] = None,
37 description_hash: Optional[bytes] = None,
38 ) -> InvoiceResponse:
39 # we set a default secret since FakeWallet is used for internal=True invoices
40 # and the user might not have configured a secret yet
41 secret = env.str("FAKE_WALLET_SECTRET", default="ToTheMoon1")
42 data: Dict = {
43 "out": False,
44 "amount": amount,
45 "currency": "bc",
46 "privkey": hashlib.pbkdf2_hmac(
47 "sha256",
48 secret.encode("utf-8"),
49 ("FakeWallet").encode("utf-8"),
50 2048,
51 32,
52 ).hex(),
53 "memo": None,
54 "description_hash": None,
55 "description": "",
56 "fallback": None,
57 "expires": None,
58 "route": None,
59 }
60 data["amount"] = amount * 1000
61 data["timestamp"] = datetime.now().timestamp()
62 if description_hash:
63 data["tags_set"] = ["h"]
64 data["description_hash"] = description_hash.hex()
65 else:
66 data["tags_set"] = ["d"]
67 data["memo"] = memo
68 data["description"] = memo
69 randomHash = (
70 data["privkey"][:6]
71 + hashlib.sha256(str(random.getrandbits(256)).encode("utf-8")).hexdigest()[
72 6:
73 ]
74 )
75 data["paymenthash"] = randomHash
76 payment_request = encode(data)
77 checking_id = randomHash
78
79 return InvoiceResponse(True, checking_id, payment_request)
80
81 async def pay_invoice(self, bolt11: str, fee_limit_msat: int) -> PaymentResponse:
82 invoice = decode(bolt11)
83 if (
84 hasattr(invoice, "checking_id")
85 and invoice.checking_id[6:] == data["privkey"][:6]
86 ):
87 return PaymentResponse(True, invoice.payment_hash, 0)
88 else:
89 return PaymentResponse(
90 ok=False, error_message="Only internal invoices can be used!"
91 )
92
93 async def get_invoice_status(self, checking_id: str) -> PaymentStatus:
94 return PaymentStatus(False)
95
96 async def get_payment_status(self, checking_id: str) -> PaymentStatus:
97 return PaymentStatus(False)
98
99 async def paid_invoices_stream(self) -> AsyncGenerator[str, None]:
100 self.queue = asyncio.Queue(0)
101 while True:
102 value = await self.queue.get()
103 yield value
104
[end of lnbits/wallets/fake.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lnbits/wallets/fake.py b/lnbits/wallets/fake.py
--- a/lnbits/wallets/fake.py
+++ b/lnbits/wallets/fake.py
@@ -91,10 +91,10 @@
)
async def get_invoice_status(self, checking_id: str) -> PaymentStatus:
- return PaymentStatus(False)
+ return PaymentStatus(None)
async def get_payment_status(self, checking_id: str) -> PaymentStatus:
- return PaymentStatus(False)
+ return PaymentStatus(None)
async def paid_invoices_stream(self) -> AsyncGenerator[str, None]:
self.queue = asyncio.Queue(0)
| {"golden_diff": "diff --git a/lnbits/wallets/fake.py b/lnbits/wallets/fake.py\n--- a/lnbits/wallets/fake.py\n+++ b/lnbits/wallets/fake.py\n@@ -91,10 +91,10 @@\n )\n \n async def get_invoice_status(self, checking_id: str) -> PaymentStatus:\n- return PaymentStatus(False)\n+ return PaymentStatus(None)\n \n async def get_payment_status(self, checking_id: str) -> PaymentStatus:\n- return PaymentStatus(False)\n+ return PaymentStatus(None)\n \n async def paid_invoices_stream(self) -> AsyncGenerator[str, None]:\n self.queue = asyncio.Queue(0)\n", "issue": "Switching to `FakeWallet` deletes pending outgoing payments from another backend.\nNeeds investigation.\r\n\r\nWhat I observed: create outgoing pending payment from other wallet, stop LNbits, change to `FakeWallet`, start LNbits. \r\n\r\n-> Payments get deleted because of a `failed` state that is returned in `lnbits.core.models:check_pending:148`.\n", "before_files": [{"content": "import asyncio\nimport hashlib\nimport random\nfrom datetime import datetime\nfrom os import getenv\nfrom typing import AsyncGenerator, Dict, Optional\n\nfrom environs import Env # type: ignore\nfrom loguru import logger\n\nfrom lnbits.helpers import urlsafe_short_hash\n\nfrom ..bolt11 import decode, encode\nfrom .base import (\n InvoiceResponse,\n PaymentResponse,\n PaymentStatus,\n StatusResponse,\n Wallet,\n)\n\nenv = Env()\nenv.read_env()\n\n\nclass FakeWallet(Wallet):\n async def status(self) -> StatusResponse:\n logger.info(\n \"FakeWallet funding source is for using LNbits as a centralised, stand-alone payment system with brrrrrr.\"\n )\n return StatusResponse(None, float(\"inf\"))\n\n async def create_invoice(\n self,\n amount: int,\n memo: Optional[str] = None,\n description_hash: Optional[bytes] = None,\n ) -> InvoiceResponse:\n # we set a default secret since FakeWallet is used for internal=True invoices\n # and the user might not have configured a secret yet\n secret = env.str(\"FAKE_WALLET_SECTRET\", default=\"ToTheMoon1\")\n data: Dict = {\n \"out\": False,\n \"amount\": amount,\n \"currency\": \"bc\",\n \"privkey\": hashlib.pbkdf2_hmac(\n \"sha256\",\n secret.encode(\"utf-8\"),\n (\"FakeWallet\").encode(\"utf-8\"),\n 2048,\n 32,\n ).hex(),\n \"memo\": None,\n \"description_hash\": None,\n \"description\": \"\",\n \"fallback\": None,\n \"expires\": None,\n \"route\": None,\n }\n data[\"amount\"] = amount * 1000\n data[\"timestamp\"] = datetime.now().timestamp()\n if description_hash:\n data[\"tags_set\"] = [\"h\"]\n data[\"description_hash\"] = description_hash.hex()\n else:\n data[\"tags_set\"] = [\"d\"]\n data[\"memo\"] = memo\n data[\"description\"] = memo\n randomHash = (\n data[\"privkey\"][:6]\n + hashlib.sha256(str(random.getrandbits(256)).encode(\"utf-8\")).hexdigest()[\n 6:\n ]\n )\n data[\"paymenthash\"] = randomHash\n payment_request = encode(data)\n checking_id = randomHash\n\n return InvoiceResponse(True, checking_id, payment_request)\n\n async def pay_invoice(self, bolt11: str, fee_limit_msat: int) -> PaymentResponse:\n invoice = decode(bolt11)\n if (\n hasattr(invoice, \"checking_id\")\n and invoice.checking_id[6:] == data[\"privkey\"][:6]\n ):\n return PaymentResponse(True, invoice.payment_hash, 0)\n else:\n return PaymentResponse(\n ok=False, error_message=\"Only internal invoices can be used!\"\n )\n\n async def get_invoice_status(self, checking_id: str) -> PaymentStatus:\n return PaymentStatus(False)\n\n async def get_payment_status(self, checking_id: str) -> PaymentStatus:\n return PaymentStatus(False)\n\n async def paid_invoices_stream(self) -> AsyncGenerator[str, None]:\n self.queue = asyncio.Queue(0)\n while True:\n value = await self.queue.get()\n yield value\n", "path": "lnbits/wallets/fake.py"}]} | 1,552 | 155 |
gh_patches_debug_15197 | rasdani/github-patches | git_diff | conan-io__conan-3185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
USERPROFILE in conanbuildinfo.props Visual Studio files
PR https://github.com/conan-io/conan/pull/2936 was reverted due to failing things.
cc/ @pawelkami
</issue>
<code>
[start of conans/client/generators/visualstudio.py]
1 from conans.model import Generator
2 from conans.paths import BUILD_INFO_VISUAL_STUDIO
3
4
5 class VisualStudioGenerator(Generator):
6
7 template = '''<?xml version="1.0" encoding="utf-8"?>
8 <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
9 <ImportGroup Label="PropertySheets" />
10 <PropertyGroup Label="UserMacros" />
11 <PropertyGroup Label="Conan-RootDirs">{item_properties}
12 </PropertyGroup>
13 <PropertyGroup Label="ConanVariables">
14 <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>
15 <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>
16 </PropertyGroup>
17 <PropertyGroup>
18 <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
19 <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>
20 </PropertyGroup>
21 <ItemDefinitionGroup>
22 <ClCompile>
23 <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
24 <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
25 <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
26 </ClCompile>
27 <Link>
28 <AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
29 <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
30 <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>
31 </Link>
32 <Midl>
33 <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
34 </Midl>
35 <ResourceCompile>
36 <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
37 <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
38 <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
39 </ResourceCompile>
40 </ItemDefinitionGroup>
41 <ItemGroup />
42 </Project>'''
43
44 item_template = '''
45 <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''
46
47 def _format_items(self):
48 sections = []
49 for dep_name, cpp_info in self.deps_build_info.dependencies:
50 fields = {
51 'root_dir': cpp_info.rootpath.replace("\\", "/"),
52 'name': dep_name.replace(".", "-")
53 }
54 section = self.item_template.format(**fields)
55 sections.append(section)
56 return "".join(sections)
57
58 @property
59 def filename(self):
60 return BUILD_INFO_VISUAL_STUDIO
61
62 @property
63 def content(self):
64 per_item_props = self._format_items()
65 fields = {
66 'item_properties': per_item_props,
67 'bin_dirs': "".join("%s;" % p for p in self._deps_build_info.bin_paths).replace("\\", "/"),
68 'res_dirs': "".join("%s;" % p for p in self._deps_build_info.res_paths).replace("\\", "/"),
69 'include_dirs': "".join("%s;" % p for p in self._deps_build_info.include_paths).replace("\\", "/"),
70 'lib_dirs': "".join("%s;" % p for p in self._deps_build_info.lib_paths).replace("\\", "/"),
71 'libs': "".join(['%s.lib;' % lib if not lib.endswith(".lib")
72 else '%s;' % lib for lib in self._deps_build_info.libs]),
73 'definitions': "".join("%s;" % d for d in self._deps_build_info.defines),
74 'compiler_flags': " ".join(self._deps_build_info.cppflags + self._deps_build_info.cflags),
75 'linker_flags': " ".join(self._deps_build_info.sharedlinkflags),
76 'exe_flags': " ".join(self._deps_build_info.exelinkflags)
77 }
78 formatted_template = self.template.format(**fields)
79 return formatted_template
80
[end of conans/client/generators/visualstudio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py
--- a/conans/client/generators/visualstudio.py
+++ b/conans/client/generators/visualstudio.py
@@ -1,5 +1,8 @@
+import os
+
from conans.model import Generator
from conans.paths import BUILD_INFO_VISUAL_STUDIO
+import re
class VisualStudioGenerator(Generator):
@@ -76,4 +79,8 @@
'exe_flags': " ".join(self._deps_build_info.exelinkflags)
}
formatted_template = self.template.format(**fields)
+ userprofile = os.getenv("USERPROFILE")
+ if userprofile:
+ userprofile = userprofile.replace("\\", "/")
+ formatted_template = re.sub(userprofile, "$(USERPROFILE)", formatted_template, flags=re.I)
return formatted_template
| {"golden_diff": "diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py\n--- a/conans/client/generators/visualstudio.py\n+++ b/conans/client/generators/visualstudio.py\n@@ -1,5 +1,8 @@\n+import os\n+\n from conans.model import Generator\n from conans.paths import BUILD_INFO_VISUAL_STUDIO\n+import re\n \n \n class VisualStudioGenerator(Generator):\n@@ -76,4 +79,8 @@\n 'exe_flags': \" \".join(self._deps_build_info.exelinkflags)\n }\n formatted_template = self.template.format(**fields)\n+ userprofile = os.getenv(\"USERPROFILE\")\n+ if userprofile:\n+ userprofile = userprofile.replace(\"\\\\\", \"/\")\n+ formatted_template = re.sub(userprofile, \"$(USERPROFILE)\", formatted_template, flags=re.I)\n return formatted_template\n", "issue": "USERPROFILE in conanbuildinfo.props Visual Studio files\nPR https://github.com/conan-io/conan/pull/2936 was reverted due to failing things.\r\n\r\ncc/ @pawelkami \n", "before_files": [{"content": "from conans.model import Generator\nfrom conans.paths import BUILD_INFO_VISUAL_STUDIO\n\n\nclass VisualStudioGenerator(Generator):\n\n template = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ImportGroup Label=\"PropertySheets\" />\n <PropertyGroup Label=\"UserMacros\" />\n <PropertyGroup Label=\"Conan-RootDirs\">{item_properties}\n </PropertyGroup>\n <PropertyGroup Label=\"ConanVariables\">\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n </PropertyGroup>\n <PropertyGroup>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>\n </PropertyGroup>\n <ItemDefinitionGroup>\n <ClCompile>\n <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n </Link>\n <Midl>\n <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>\n <ItemGroup />\n</Project>'''\n\n item_template = '''\n <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''\n\n def _format_items(self):\n sections = []\n for dep_name, cpp_info in self.deps_build_info.dependencies:\n fields = {\n 'root_dir': cpp_info.rootpath.replace(\"\\\\\", \"/\"),\n 'name': dep_name.replace(\".\", \"-\")\n }\n section = self.item_template.format(**fields)\n sections.append(section)\n return \"\".join(sections)\n\n @property\n def filename(self):\n return BUILD_INFO_VISUAL_STUDIO\n\n @property\n def content(self):\n per_item_props = self._format_items()\n fields = {\n 'item_properties': per_item_props,\n 'bin_dirs': \"\".join(\"%s;\" % p for p in self._deps_build_info.bin_paths).replace(\"\\\\\", \"/\"),\n 'res_dirs': \"\".join(\"%s;\" % p for p in self._deps_build_info.res_paths).replace(\"\\\\\", \"/\"),\n 'include_dirs': \"\".join(\"%s;\" % p for p in self._deps_build_info.include_paths).replace(\"\\\\\", \"/\"),\n 'lib_dirs': \"\".join(\"%s;\" % p for p in self._deps_build_info.lib_paths).replace(\"\\\\\", \"/\"),\n 'libs': \"\".join(['%s.lib;' % lib if not lib.endswith(\".lib\")\n else '%s;' % lib for lib in self._deps_build_info.libs]),\n 'definitions': \"\".join(\"%s;\" % d for d in self._deps_build_info.defines),\n 'compiler_flags': \" \".join(self._deps_build_info.cppflags + self._deps_build_info.cflags),\n 'linker_flags': \" \".join(self._deps_build_info.sharedlinkflags),\n 'exe_flags': \" \".join(self._deps_build_info.exelinkflags)\n }\n formatted_template = self.template.format(**fields)\n return formatted_template\n", "path": "conans/client/generators/visualstudio.py"}]} | 1,574 | 196 |
gh_patches_debug_657 | rasdani/github-patches | git_diff | pex-tool__pex-1987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.114
On the docket:
+ [ ] Only insert "" to head of sys.path if a venv PEX runs in interpreter mode #1984
+ [x] venv_dir calculation doesn't correctly handle PEX_PYTHON_PATH with symlinks. #1885
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.113"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.113"
+__version__ = "2.1.114"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.113\"\n+__version__ = \"2.1.114\"\n", "issue": "Release 2.1.114\nOn the docket:\r\n+ [ ] Only insert \"\" to head of sys.path if a venv PEX runs in interpreter mode #1984\r\n+ [x] venv_dir calculation doesn't correctly handle PEX_PYTHON_PATH with symlinks. #1885\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.113\"\n", "path": "pex/version.py"}]} | 654 | 99 |
gh_patches_debug_247 | rasdani/github-patches | git_diff | fail2ban__fail2ban-249 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
weak regex'es for apache
See email on fail2ban-users
</issue>
<code>
[start of common/version.py]
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 noet :
3
4 # This file is part of Fail2Ban.
5 #
6 # Fail2Ban is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # Fail2Ban is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Fail2Ban; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19
20 # Author: Cyril Jaquier
21 #
22
23 __author__ = "Cyril Jaquier, Yaroslav Halchenko"
24 __copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko"
25 __license__ = "GPL"
26
27 version = "0.8.9.dev"
28
[end of common/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/common/version.py b/common/version.py
--- a/common/version.py
+++ b/common/version.py
@@ -24,4 +24,4 @@
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko"
__license__ = "GPL"
-version = "0.8.9.dev"
+version = "0.8.10"
| {"golden_diff": "diff --git a/common/version.py b/common/version.py\n--- a/common/version.py\n+++ b/common/version.py\n@@ -24,4 +24,4 @@\n __copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko\"\n __license__ = \"GPL\"\n \n-version = \"0.8.9.dev\"\n+version = \"0.8.10\"\n", "issue": "weak regex'es for apache\nSee email on fail2ban-users\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n# Author: Cyril Jaquier\n#\n\n__author__ = \"Cyril Jaquier, Yaroslav Halchenko\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko\"\n__license__ = \"GPL\"\n\nversion = \"0.8.9.dev\"\n", "path": "common/version.py"}]} | 899 | 102 |
gh_patches_debug_37565 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-456 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Integration tests to pull artifacts from S3Bucket
</issue>
<code>
[start of bundle-workflow/src/manifests/bundle_manifest.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 from manifests.manifest import Manifest
8
9
10 class BundleManifest(Manifest):
11 """
12 A BundleManifest is an immutable view of the outputs from a assemble step
13 The manifest contains information about the bundle that was built (in the `assemble` section),
14 and the components that made up the bundle in the `components` section.
15
16 The format for schema version 1.0 is:
17 schema-version: "1.0"
18 build:
19 name: string
20 version: string
21 architecture: x64 or arm64
22 location: /relative/path/to/tarball
23 components:
24 - name: string
25 repository: URL of git repository
26 ref: git ref that was built (sha, branch, or tag)
27 commit_id: The actual git commit ID that was built (i.e. the resolved "ref")
28 location: /relative/path/to/artifact
29 """
30
31 def __init__(self, data):
32 super().__init__(data)
33
34 self.build = self.Build(data["build"])
35 self.components = list(
36 map(lambda entry: self.Component(entry), data["components"])
37 )
38
39 def __to_dict__(self):
40 return {
41 "schema-version": "1.0",
42 "build": self.build.__to_dict__(),
43 "components": list(
44 map(lambda component: component.__to_dict__(), self.components)
45 ),
46 }
47
48 class Build:
49 def __init__(self, data):
50 self.name = data["name"]
51 self.version = data["version"]
52 self.architecture = data["architecture"]
53 self.location = data["location"]
54 self.id = data["id"]
55
56 def __to_dict__(self):
57 return {
58 "name": self.name,
59 "version": self.version,
60 "architecture": self.architecture,
61 "location": self.location,
62 "id": self.id,
63 }
64
65 class Component:
66 def __init__(self, data):
67 self.name = data["name"]
68 self.repository = data["repository"]
69 self.ref = data["ref"]
70 self.commit_id = data["commit_id"]
71 self.location = data["location"]
72
73 def __to_dict__(self):
74 return {
75 "name": self.name,
76 "repository": self.repository,
77 "ref": self.ref,
78 "commit_id": self.commit_id,
79 "location": self.location,
80 }
81
[end of bundle-workflow/src/manifests/bundle_manifest.py]
[start of bundle-workflow/src/manifests/build_manifest.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 from manifests.manifest import Manifest
8
9 """
10 A BuildManifest is an immutable view of the outputs from a build step
11 The manifest contains information about the product that was built (in the `build` section),
12 and the components that made up the build in the `components` section.
13
14 The format for schema version 1.0 is:
15 schema-version: "1.0"
16 build:
17 name: string
18 version: string
19 architecture: x64 or arm64
20 components:
21 - name: string
22 repository: URL of git repository
23 ref: git ref that was built (sha, branch, or tag)
24 commit_id: The actual git commit ID that was built (i.e. the resolved "ref")
25 artifacts:
26 maven:
27 - maven/relative/path/to/artifact
28 - ...
29 plugins:
30 - plugins/relative/path/to/artifact
31 - ...
32 libs:
33 - libs/relative/path/to/artifact
34 - ...
35 - ...
36 """
37
38
39 class BuildManifest(Manifest):
40 def __init__(self, data):
41 super().__init__(data)
42
43 self.build = self.Build(data["build"])
44 self.components = list(
45 map(lambda entry: self.Component(entry), data["components"])
46 )
47
48 def __to_dict__(self):
49 return {
50 "schema-version": "1.0",
51 "build": self.build.__to_dict__(),
52 "components": list(
53 map(lambda component: component.__to_dict__(), self.components)
54 ),
55 }
56
57 class Build:
58 def __init__(self, data):
59 self.name = data["name"]
60 self.version = data["version"]
61 self.architecture = data["architecture"]
62 self.id = data["id"]
63
64 def __to_dict__(self):
65 return {
66 "name": self.name,
67 "version": self.version,
68 "architecture": self.architecture,
69 "id": self.id,
70 }
71
72 class Component:
73 def __init__(self, data):
74 self.name = data["name"]
75 self.repository = data["repository"]
76 self.ref = data["ref"]
77 self.commit_id = data["commit_id"]
78 self.artifacts = data["artifacts"]
79 self.version = data["version"]
80
81 def __to_dict__(self):
82 return {
83 "name": self.name,
84 "repository": self.repository,
85 "ref": self.ref,
86 "commit_id": self.commit_id,
87 "artifacts": self.artifacts,
88 "version": self.version,
89 }
90
[end of bundle-workflow/src/manifests/build_manifest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/src/manifests/build_manifest.py b/bundle-workflow/src/manifests/build_manifest.py
--- a/bundle-workflow/src/manifests/build_manifest.py
+++ b/bundle-workflow/src/manifests/build_manifest.py
@@ -4,6 +4,9 @@
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
+import os
+
+from aws.s3_bucket import S3Bucket
from manifests.manifest import Manifest
"""
@@ -54,6 +57,20 @@
),
}
+ @staticmethod
+ def get_build_manifest_relative_location(build_id, opensearch_version, architecture):
+ return f"builds/{opensearch_version}/{build_id}/{architecture}/manifest.yml"
+
+ @staticmethod
+ def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):
+ work_dir = work_dir if not None else str(os.getcwd())
+ manifest_s3_path = BuildManifest.get_build_manifest_relative_location(build_id, opensearch_version, architecture)
+ S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)
+ with open('manifest.yml', 'r') as file:
+ build_manifest = BuildManifest.from_file(file)
+ os.remove(os.path.realpath(os.path.join(work_dir, 'manifest.yml')))
+ return build_manifest
+
class Build:
def __init__(self, data):
self.name = data["name"]
diff --git a/bundle-workflow/src/manifests/bundle_manifest.py b/bundle-workflow/src/manifests/bundle_manifest.py
--- a/bundle-workflow/src/manifests/bundle_manifest.py
+++ b/bundle-workflow/src/manifests/bundle_manifest.py
@@ -4,6 +4,9 @@
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
+import os
+
+from aws.s3_bucket import S3Bucket
from manifests.manifest import Manifest
@@ -45,6 +48,28 @@
),
}
+ @staticmethod
+ def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):
+ work_dir = work_dir if not None else str(os.getcwd())
+ manifest_s3_path = BundleManifest.get_bundle_manifest_relative_location(build_id, opensearch_version, architecture)
+ S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)
+ with open('manifest.yml', 'r') as file:
+ bundle_manifest = BundleManifest.from_file(file)
+ os.remove(os.path.realpath(os.path.join(work_dir, 'manifest.yml')))
+ return bundle_manifest
+
+ @staticmethod
+ def get_tarball_relative_location(build_id, opensearch_version, architecture):
+ return f"bundles/{opensearch_version}/{build_id}/{architecture}/opensearch-{opensearch_version}-linux-{architecture}.tar.gz"
+
+ @staticmethod
+ def get_tarball_name(opensearch_version, architecture):
+ return f"opensearch-{opensearch_version}-linux-{architecture}.tar.gz"
+
+ @staticmethod
+ def get_bundle_manifest_relative_location(build_id, opensearch_version, architecture):
+ return f"bundles/{opensearch_version}/{build_id}/{architecture}/manifest.yml"
+
class Build:
def __init__(self, data):
self.name = data["name"]
| {"golden_diff": "diff --git a/bundle-workflow/src/manifests/build_manifest.py b/bundle-workflow/src/manifests/build_manifest.py\n--- a/bundle-workflow/src/manifests/build_manifest.py\n+++ b/bundle-workflow/src/manifests/build_manifest.py\n@@ -4,6 +4,9 @@\n # this file be licensed under the Apache-2.0 license or a\n # compatible open source license.\n \n+import os\n+\n+from aws.s3_bucket import S3Bucket\n from manifests.manifest import Manifest\n \n \"\"\"\n@@ -54,6 +57,20 @@\n ),\n }\n \n+ @staticmethod\n+ def get_build_manifest_relative_location(build_id, opensearch_version, architecture):\n+ return f\"builds/{opensearch_version}/{build_id}/{architecture}/manifest.yml\"\n+\n+ @staticmethod\n+ def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):\n+ work_dir = work_dir if not None else str(os.getcwd())\n+ manifest_s3_path = BuildManifest.get_build_manifest_relative_location(build_id, opensearch_version, architecture)\n+ S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)\n+ with open('manifest.yml', 'r') as file:\n+ build_manifest = BuildManifest.from_file(file)\n+ os.remove(os.path.realpath(os.path.join(work_dir, 'manifest.yml')))\n+ return build_manifest\n+\n class Build:\n def __init__(self, data):\n self.name = data[\"name\"]\ndiff --git a/bundle-workflow/src/manifests/bundle_manifest.py b/bundle-workflow/src/manifests/bundle_manifest.py\n--- a/bundle-workflow/src/manifests/bundle_manifest.py\n+++ b/bundle-workflow/src/manifests/bundle_manifest.py\n@@ -4,6 +4,9 @@\n # this file be licensed under the Apache-2.0 license or a\n # compatible open source license.\n \n+import os\n+\n+from aws.s3_bucket import S3Bucket\n from manifests.manifest import Manifest\n \n \n@@ -45,6 +48,28 @@\n ),\n }\n \n+ @staticmethod\n+ def from_s3(bucket_name, build_id, opensearch_version, architecture, work_dir=None):\n+ work_dir = work_dir if not None else str(os.getcwd())\n+ manifest_s3_path = BundleManifest.get_bundle_manifest_relative_location(build_id, opensearch_version, architecture)\n+ S3Bucket(bucket_name).download_file(manifest_s3_path, work_dir)\n+ with open('manifest.yml', 'r') as file:\n+ bundle_manifest = BundleManifest.from_file(file)\n+ os.remove(os.path.realpath(os.path.join(work_dir, 'manifest.yml')))\n+ return bundle_manifest\n+\n+ @staticmethod\n+ def get_tarball_relative_location(build_id, opensearch_version, architecture):\n+ return f\"bundles/{opensearch_version}/{build_id}/{architecture}/opensearch-{opensearch_version}-linux-{architecture}.tar.gz\"\n+\n+ @staticmethod\n+ def get_tarball_name(opensearch_version, architecture):\n+ return f\"opensearch-{opensearch_version}-linux-{architecture}.tar.gz\"\n+\n+ @staticmethod\n+ def get_bundle_manifest_relative_location(build_id, opensearch_version, architecture):\n+ return f\"bundles/{opensearch_version}/{build_id}/{architecture}/manifest.yml\"\n+\n class Build:\n def __init__(self, data):\n self.name = data[\"name\"]\n", "issue": "Integration tests to pull artifacts from S3Bucket\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nfrom manifests.manifest import Manifest\n\n\nclass BundleManifest(Manifest):\n \"\"\"\n A BundleManifest is an immutable view of the outputs from a assemble step\n The manifest contains information about the bundle that was built (in the `assemble` section),\n and the components that made up the bundle in the `components` section.\n\n The format for schema version 1.0 is:\n schema-version: \"1.0\"\n build:\n name: string\n version: string\n architecture: x64 or arm64\n location: /relative/path/to/tarball\n components:\n - name: string\n repository: URL of git repository\n ref: git ref that was built (sha, branch, or tag)\n commit_id: The actual git commit ID that was built (i.e. the resolved \"ref\")\n location: /relative/path/to/artifact\n \"\"\"\n\n def __init__(self, data):\n super().__init__(data)\n\n self.build = self.Build(data[\"build\"])\n self.components = list(\n map(lambda entry: self.Component(entry), data[\"components\"])\n )\n\n def __to_dict__(self):\n return {\n \"schema-version\": \"1.0\",\n \"build\": self.build.__to_dict__(),\n \"components\": list(\n map(lambda component: component.__to_dict__(), self.components)\n ),\n }\n\n class Build:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.version = data[\"version\"]\n self.architecture = data[\"architecture\"]\n self.location = data[\"location\"]\n self.id = data[\"id\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"version\": self.version,\n \"architecture\": self.architecture,\n \"location\": self.location,\n \"id\": self.id,\n }\n\n class Component:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.repository = data[\"repository\"]\n self.ref = data[\"ref\"]\n self.commit_id = data[\"commit_id\"]\n self.location = data[\"location\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"repository\": self.repository,\n \"ref\": self.ref,\n \"commit_id\": self.commit_id,\n \"location\": self.location,\n }\n", "path": "bundle-workflow/src/manifests/bundle_manifest.py"}, {"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nfrom manifests.manifest import Manifest\n\n\"\"\"\nA BuildManifest is an immutable view of the outputs from a build step\nThe manifest contains information about the product that was built (in the `build` section),\nand the components that made up the build in the `components` section.\n\nThe format for schema version 1.0 is:\nschema-version: \"1.0\"\nbuild:\n name: string\n version: string\n architecture: x64 or arm64\ncomponents:\n - name: string\n repository: URL of git repository\n ref: git ref that was built (sha, branch, or tag)\n commit_id: The actual git commit ID that was built (i.e. the resolved \"ref\")\n artifacts:\n maven:\n - maven/relative/path/to/artifact\n - ...\n plugins:\n - plugins/relative/path/to/artifact\n - ...\n libs:\n - libs/relative/path/to/artifact\n - ...\n - ...\n\"\"\"\n\n\nclass BuildManifest(Manifest):\n def __init__(self, data):\n super().__init__(data)\n\n self.build = self.Build(data[\"build\"])\n self.components = list(\n map(lambda entry: self.Component(entry), data[\"components\"])\n )\n\n def __to_dict__(self):\n return {\n \"schema-version\": \"1.0\",\n \"build\": self.build.__to_dict__(),\n \"components\": list(\n map(lambda component: component.__to_dict__(), self.components)\n ),\n }\n\n class Build:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.version = data[\"version\"]\n self.architecture = data[\"architecture\"]\n self.id = data[\"id\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"version\": self.version,\n \"architecture\": self.architecture,\n \"id\": self.id,\n }\n\n class Component:\n def __init__(self, data):\n self.name = data[\"name\"]\n self.repository = data[\"repository\"]\n self.ref = data[\"ref\"]\n self.commit_id = data[\"commit_id\"]\n self.artifacts = data[\"artifacts\"]\n self.version = data[\"version\"]\n\n def __to_dict__(self):\n return {\n \"name\": self.name,\n \"repository\": self.repository,\n \"ref\": self.ref,\n \"commit_id\": self.commit_id,\n \"artifacts\": self.artifacts,\n \"version\": self.version,\n }\n", "path": "bundle-workflow/src/manifests/build_manifest.py"}]} | 2,047 | 766 |
gh_patches_debug_47932 | rasdani/github-patches | git_diff | liqd__a4-opin-612 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
too much space below video, not deletable in wagtail

</issue>
<code>
[start of home/wagtail_hooks.py]
1 from django.conf import settings
2 from django.utils.html import format_html
3 from wagtail.wagtailcore import hooks
4
5
6 @hooks.register('insert_editor_css')
7 def editor_css():
8 return format_html('<link rel="stylesheet" href="'
9 + settings.STATIC_URL
10 + 'scss/wagtail_admin/wagtail_admin.css">')
11
[end of home/wagtail_hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/home/wagtail_hooks.py b/home/wagtail_hooks.py
--- a/home/wagtail_hooks.py
+++ b/home/wagtail_hooks.py
@@ -7,4 +7,4 @@
def editor_css():
return format_html('<link rel="stylesheet" href="'
+ settings.STATIC_URL
- + 'scss/wagtail_admin/wagtail_admin.css">')
+ + 'wagtail_admin.css">')
| {"golden_diff": "diff --git a/home/wagtail_hooks.py b/home/wagtail_hooks.py\n--- a/home/wagtail_hooks.py\n+++ b/home/wagtail_hooks.py\n@@ -7,4 +7,4 @@\n def editor_css():\n return format_html('<link rel=\"stylesheet\" href=\"'\n + settings.STATIC_URL\n- + 'scss/wagtail_admin/wagtail_admin.css\">')\n+ + 'wagtail_admin.css\">')\n", "issue": "too much space below video, not deletable in wagtail \n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.utils.html import format_html\nfrom wagtail.wagtailcore import hooks\n\n\[email protected]('insert_editor_css')\ndef editor_css():\n return format_html('<link rel=\"stylesheet\" href=\"'\n + settings.STATIC_URL\n + 'scss/wagtail_admin/wagtail_admin.css\">')\n", "path": "home/wagtail_hooks.py"}]} | 706 | 98 |
gh_patches_debug_30027 | rasdani/github-patches | git_diff | fidals__shopelectro-992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Resurrect script to process photos to the DB
Now we are having script processing photos to the DB
`shopelectro/management/commands/images.py`
Since we have no tests for it, code may become stale. Resurrect it and move products to the prod
</issue>
<code>
[start of shopelectro/logic/header.py]
1 import typing
2 from functools import lru_cache
3
4 from django.conf import settings
5 from django.db.models import Q
6
7 from pages import models as pages_models
8 from shopelectro import models
9
10
11 class Menu:
12 DICT_TYPE = typing.Dict[models.CategoryPage, typing.List[models.CategoryPage]]
13
14 @staticmethod
15 def roots() -> pages_models.PageQuerySet:
16 """
17 QuerySet with header menu items.
18
19 Contains root categories.
20 Result can be tuned HEADER_LINKS settings option.
21 """
22 return (
23 pages_models.Page.objects.active()
24 .filter(
25 Q(slug__in=settings.HEADER_LINKS['add'])
26 | (
27 # @todo #974:30m Optimize the header menu query.
28 # Fetch catalog page for the header menu at the same query.
29 # root category pages.
30 Q(parent=pages_models.CustomPage.objects.filter(slug='catalog'))
31 & Q(type='model')
32 & Q(related_model_name=models.Category._meta.db_table)
33 & ~Q(slug__in=settings.HEADER_LINKS['exclude'])
34 )
35 )
36 .order_by('position')
37 )
38
39 @lru_cache(maxsize=1)
40 def as_dict(self) -> DICT_TYPE:
41 return {
42 root: list(
43 root.get_children()
44 .filter(type='model')
45 .filter(related_model_name=models.Category._meta.db_table)
46 )
47 for root in self.roots().iterator()
48 }
49
[end of shopelectro/logic/header.py]
[start of shopelectro/management/commands/images.py]
1 """Create Image objects from folder with image files."""
2 import os
3
4 from django.conf import settings
5 from django.core.files.images import ImageFile
6 from django.core.management.base import BaseCommand
7
8 from images.models import Image
9 from pages.models import Page
10 from shopelectro.models import Product
11
12
13 IMAGES_ROOT_FOLDER_NAME = os.path.join(settings.MEDIA_ROOT, 'products')
14
15
16 def create_image_models():
17
18 def iter_dirs(path: str):
19 return (dir_ for dir_ in os.scandir(path) if dir_.is_dir())
20
21 def iter_files(path: str):
22 return (file_ for file_ in os.scandir(path) if file_.is_file())
23
24 def get_page(product_id: int) -> Page:
25 product_ = Product.objects.filter(id=product_id).first()
26 return product_.page if product_ else None
27
28 def create_image_model(file_, product_id: int, slug):
29 file_short_name, _ = os.path.splitext(file_.name)
30
31 # skip images, resized to small size
32 if file_short_name == 'small':
33 return
34
35 # create Image model object based on current image
36 page = get_page(product_id=product_id)
37 if not page:
38 return
39 # don't use bulk create, because save() isn't hooked with it
40 # http://bit.ly/django_bulk_create
41 Image.objects.create(
42 model=page,
43 # autoincrement file names: '1.jpg', '2.jpg' and so on
44 slug=slug,
45 image=ImageFile(open(file_.path, mode='rb')),
46 is_main=(file_short_name == 'main')
47 )
48
49 if not os.path.isdir(IMAGES_ROOT_FOLDER_NAME) or len(Image.objects.all()):
50 return
51
52 # run over every image in every folder
53 for dir_ in iter_dirs(IMAGES_ROOT_FOLDER_NAME):
54 for slug_index, file in enumerate(iter_files(dir_.path)):
55 create_image_model(
56 file_=file,
57 product_id=int(dir_.name),
58 slug=str(slug_index)
59 )
60 # old folder stays in fs as backup of old photos
61
62
63 class Command(BaseCommand):
64
65 def handle(self, *args, **kwargs):
66 create_image_models()
67
[end of shopelectro/management/commands/images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/logic/header.py b/shopelectro/logic/header.py
--- a/shopelectro/logic/header.py
+++ b/shopelectro/logic/header.py
@@ -14,7 +14,7 @@
@staticmethod
def roots() -> pages_models.PageQuerySet:
"""
- QuerySet with header menu items.
+ Queryset with header menu items.
Contains root categories.
Result can be tuned HEADER_LINKS settings option.
diff --git a/shopelectro/management/commands/images.py b/shopelectro/management/commands/images.py
--- a/shopelectro/management/commands/images.py
+++ b/shopelectro/management/commands/images.py
@@ -9,7 +9,6 @@
from pages.models import Page
from shopelectro.models import Product
-
IMAGES_ROOT_FOLDER_NAME = os.path.join(settings.MEDIA_ROOT, 'products')
@@ -22,7 +21,7 @@
return (file_ for file_ in os.scandir(path) if file_.is_file())
def get_page(product_id: int) -> Page:
- product_ = Product.objects.filter(id=product_id).first()
+ product_ = Product.objects.filter(vendor_code=product_id).first()
return product_.page if product_ else None
def create_image_model(file_, product_id: int, slug):
@@ -42,6 +41,7 @@
model=page,
# autoincrement file names: '1.jpg', '2.jpg' and so on
slug=slug,
+ # copies file with to the new path on create
image=ImageFile(open(file_.path, mode='rb')),
is_main=(file_short_name == 'main')
)
| {"golden_diff": "diff --git a/shopelectro/logic/header.py b/shopelectro/logic/header.py\n--- a/shopelectro/logic/header.py\n+++ b/shopelectro/logic/header.py\n@@ -14,7 +14,7 @@\n @staticmethod\n def roots() -> pages_models.PageQuerySet:\n \"\"\"\n- QuerySet with header menu items.\n+ Queryset with header menu items.\n \n Contains root categories.\n Result can be tuned HEADER_LINKS settings option.\ndiff --git a/shopelectro/management/commands/images.py b/shopelectro/management/commands/images.py\n--- a/shopelectro/management/commands/images.py\n+++ b/shopelectro/management/commands/images.py\n@@ -9,7 +9,6 @@\n from pages.models import Page\n from shopelectro.models import Product\n \n-\n IMAGES_ROOT_FOLDER_NAME = os.path.join(settings.MEDIA_ROOT, 'products')\n \n \n@@ -22,7 +21,7 @@\n return (file_ for file_ in os.scandir(path) if file_.is_file())\n \n def get_page(product_id: int) -> Page:\n- product_ = Product.objects.filter(id=product_id).first()\n+ product_ = Product.objects.filter(vendor_code=product_id).first()\n return product_.page if product_ else None\n \n def create_image_model(file_, product_id: int, slug):\n@@ -42,6 +41,7 @@\n model=page,\n # autoincrement file names: '1.jpg', '2.jpg' and so on\n slug=slug,\n+ # copies file with to the new path on create\n image=ImageFile(open(file_.path, mode='rb')),\n is_main=(file_short_name == 'main')\n )\n", "issue": "Resurrect script to process photos to the DB\nNow we are having script processing photos to the DB\r\n`shopelectro/management/commands/images.py`\r\n\r\nSince we have no tests for it, code may become stale. Resurrect it and move products to the prod\n", "before_files": [{"content": "import typing\nfrom functools import lru_cache\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom pages import models as pages_models\nfrom shopelectro import models\n\n\nclass Menu:\n DICT_TYPE = typing.Dict[models.CategoryPage, typing.List[models.CategoryPage]]\n\n @staticmethod\n def roots() -> pages_models.PageQuerySet:\n \"\"\"\n QuerySet with header menu items.\n\n Contains root categories.\n Result can be tuned HEADER_LINKS settings option.\n \"\"\"\n return (\n pages_models.Page.objects.active()\n .filter(\n Q(slug__in=settings.HEADER_LINKS['add'])\n | (\n # @todo #974:30m Optimize the header menu query.\n # Fetch catalog page for the header menu at the same query.\n # root category pages.\n Q(parent=pages_models.CustomPage.objects.filter(slug='catalog'))\n & Q(type='model')\n & Q(related_model_name=models.Category._meta.db_table)\n & ~Q(slug__in=settings.HEADER_LINKS['exclude'])\n )\n )\n .order_by('position')\n )\n\n @lru_cache(maxsize=1)\n def as_dict(self) -> DICT_TYPE:\n return {\n root: list(\n root.get_children()\n .filter(type='model')\n .filter(related_model_name=models.Category._meta.db_table)\n )\n for root in self.roots().iterator()\n }\n", "path": "shopelectro/logic/header.py"}, {"content": "\"\"\"Create Image objects from folder with image files.\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.files.images import ImageFile\nfrom django.core.management.base import BaseCommand\n\nfrom images.models import Image\nfrom pages.models import Page\nfrom shopelectro.models import Product\n\n\nIMAGES_ROOT_FOLDER_NAME = os.path.join(settings.MEDIA_ROOT, 'products')\n\n\ndef create_image_models():\n\n def iter_dirs(path: str):\n return (dir_ for dir_ in os.scandir(path) if dir_.is_dir())\n\n def iter_files(path: str):\n return (file_ for file_ in os.scandir(path) if file_.is_file())\n\n def get_page(product_id: int) -> Page:\n product_ = Product.objects.filter(id=product_id).first()\n return product_.page if product_ else None\n\n def create_image_model(file_, product_id: int, slug):\n file_short_name, _ = os.path.splitext(file_.name)\n\n # skip images, resized to small size\n if file_short_name == 'small':\n return\n\n # create Image model object based on current image\n page = get_page(product_id=product_id)\n if not page:\n return\n # don't use bulk create, because save() isn't hooked with it\n # http://bit.ly/django_bulk_create\n Image.objects.create(\n model=page,\n # autoincrement file names: '1.jpg', '2.jpg' and so on\n slug=slug,\n image=ImageFile(open(file_.path, mode='rb')),\n is_main=(file_short_name == 'main')\n )\n\n if not os.path.isdir(IMAGES_ROOT_FOLDER_NAME) or len(Image.objects.all()):\n return\n\n # run over every image in every folder\n for dir_ in iter_dirs(IMAGES_ROOT_FOLDER_NAME):\n for slug_index, file in enumerate(iter_files(dir_.path)):\n create_image_model(\n file_=file,\n product_id=int(dir_.name),\n slug=str(slug_index)\n )\n # old folder stays in fs as backup of old photos\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **kwargs):\n create_image_models()\n", "path": "shopelectro/management/commands/images.py"}]} | 1,632 | 387 |
gh_patches_debug_30136 | rasdani/github-patches | git_diff | ktbyers__netmiko-1648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise exception if asa_login() fails to login successfully
</issue>
<code>
[start of netmiko/cisco/cisco_asa_ssh.py]
1 """Subclass specific to Cisco ASA."""
2 import re
3 import time
4 from netmiko.cisco_base_connection import CiscoSSHConnection, CiscoFileTransfer
5
6
7 class CiscoAsaSSH(CiscoSSHConnection):
8 """Subclass specific to Cisco ASA."""
9
10 def session_preparation(self):
11 """Prepare the session after the connection has been established."""
12 self._test_channel_read()
13 self.set_base_prompt()
14 if self.secret:
15 self.enable()
16 else:
17 self.asa_login()
18 self.disable_paging(command="terminal pager 0")
19 if self.allow_auto_change:
20 try:
21 self.send_config_set("terminal width 511")
22 except ValueError:
23 # Don't fail for the terminal width
24 pass
25
26 # Clear the read buffer
27 time.sleep(0.3 * self.global_delay_factor)
28 self.clear_buffer()
29
30 def send_command_timing(self, *args, **kwargs):
31 """
32 If the ASA is in multi-context mode, then the base_prompt needs to be
33 updated after each context change.
34 """
35 output = super().send_command_timing(*args, **kwargs)
36 if len(args) >= 1:
37 command_string = args[0]
38 else:
39 command_string = kwargs["command_string"]
40 if "changeto" in command_string:
41 self.set_base_prompt()
42 return output
43
44 def send_command(self, *args, **kwargs):
45 """
46 If the ASA is in multi-context mode, then the base_prompt needs to be
47 updated after each context change.
48 """
49 if len(args) >= 1:
50 command_string = args[0]
51 else:
52 command_string = kwargs["command_string"]
53
54 # If changeto in command, look for '#' to determine command is done
55 if "changeto" in command_string:
56 if len(args) <= 1:
57 expect_string = kwargs.get("expect_string", "#")
58 kwargs["expect_string"] = expect_string
59 output = super().send_command(*args, **kwargs)
60
61 if "changeto" in command_string:
62 self.set_base_prompt()
63
64 return output
65
66 def send_command_expect(self, *args, **kwargs):
67 """Backwards compaitibility."""
68 return self.send_command(*args, **kwargs)
69
70 def set_base_prompt(self, *args, **kwargs):
71 """
72 Cisco ASA in multi-context mode needs to have the base prompt updated
73 (if you switch contexts i.e. 'changeto')
74
75 This switch of ASA contexts can occur in configuration mode. If this
76 happens the trailing '(config*' needs stripped off.
77 """
78 cur_base_prompt = super().set_base_prompt(*args, **kwargs)
79 match = re.search(r"(.*)\(conf.*", cur_base_prompt)
80 if match:
81 # strip off (conf.* from base_prompt
82 self.base_prompt = match.group(1)
83 return self.base_prompt
84
85 def asa_login(self):
86 """
87 Handle ASA reaching privilege level 15 using login
88
89 twb-dc-fw1> login
90 Username: admin
91 Password: ************
92 """
93 delay_factor = self.select_delay_factor(0)
94
95 i = 1
96 max_attempts = 50
97 self.write_channel("login" + self.RETURN)
98 while i <= max_attempts:
99 time.sleep(0.5 * delay_factor)
100 output = self.read_channel()
101 if "sername" in output:
102 self.write_channel(self.username + self.RETURN)
103 elif "ssword" in output:
104 self.write_channel(self.password + self.RETURN)
105 elif "#" in output:
106 break
107 else:
108 self.write_channel("login" + self.RETURN)
109 i += 1
110
111 def save_config(self, cmd="write mem", confirm=False, confirm_response=""):
112 """Saves Config"""
113 return super().save_config(
114 cmd=cmd, confirm=confirm, confirm_response=confirm_response
115 )
116
117
118 class CiscoAsaFileTransfer(CiscoFileTransfer):
119 """Cisco ASA SCP File Transfer driver."""
120
121 pass
122
[end of netmiko/cisco/cisco_asa_ssh.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netmiko/cisco/cisco_asa_ssh.py b/netmiko/cisco/cisco_asa_ssh.py
--- a/netmiko/cisco/cisco_asa_ssh.py
+++ b/netmiko/cisco/cisco_asa_ssh.py
@@ -2,6 +2,7 @@
import re
import time
from netmiko.cisco_base_connection import CiscoSSHConnection, CiscoFileTransfer
+from netmiko.ssh_exception import NetmikoAuthenticationException
class CiscoAsaSSH(CiscoSSHConnection):
@@ -88,12 +89,14 @@
twb-dc-fw1> login
Username: admin
- Password: ************
+
+ Raises NetmikoAuthenticationException, if we do not reach privilege
+ level 15 after 3 attempts.
"""
delay_factor = self.select_delay_factor(0)
i = 1
- max_attempts = 50
+ max_attempts = 3
self.write_channel("login" + self.RETURN)
while i <= max_attempts:
time.sleep(0.5 * delay_factor)
@@ -103,11 +106,14 @@
elif "ssword" in output:
self.write_channel(self.password + self.RETURN)
elif "#" in output:
- break
+ return True
else:
self.write_channel("login" + self.RETURN)
i += 1
+ msg = "Unable to get to enable mode!"
+ raise NetmikoAuthenticationException(msg)
+
def save_config(self, cmd="write mem", confirm=False, confirm_response=""):
"""Saves Config"""
return super().save_config(
| {"golden_diff": "diff --git a/netmiko/cisco/cisco_asa_ssh.py b/netmiko/cisco/cisco_asa_ssh.py\n--- a/netmiko/cisco/cisco_asa_ssh.py\n+++ b/netmiko/cisco/cisco_asa_ssh.py\n@@ -2,6 +2,7 @@\n import re\n import time\n from netmiko.cisco_base_connection import CiscoSSHConnection, CiscoFileTransfer\n+from netmiko.ssh_exception import NetmikoAuthenticationException\n \n \n class CiscoAsaSSH(CiscoSSHConnection):\n@@ -88,12 +89,14 @@\n \n twb-dc-fw1> login\n Username: admin\n- Password: ************\n+\n+ Raises NetmikoAuthenticationException, if we do not reach privilege\n+ level 15 after 3 attempts.\n \"\"\"\n delay_factor = self.select_delay_factor(0)\n \n i = 1\n- max_attempts = 50\n+ max_attempts = 3\n self.write_channel(\"login\" + self.RETURN)\n while i <= max_attempts:\n time.sleep(0.5 * delay_factor)\n@@ -103,11 +106,14 @@\n elif \"ssword\" in output:\n self.write_channel(self.password + self.RETURN)\n elif \"#\" in output:\n- break\n+ return True\n else:\n self.write_channel(\"login\" + self.RETURN)\n i += 1\n \n+ msg = \"Unable to get to enable mode!\"\n+ raise NetmikoAuthenticationException(msg)\n+\n def save_config(self, cmd=\"write mem\", confirm=False, confirm_response=\"\"):\n \"\"\"Saves Config\"\"\"\n return super().save_config(\n", "issue": "Raise exception if asa_login() fails to login successfully\n\n", "before_files": [{"content": "\"\"\"Subclass specific to Cisco ASA.\"\"\"\nimport re\nimport time\nfrom netmiko.cisco_base_connection import CiscoSSHConnection, CiscoFileTransfer\n\n\nclass CiscoAsaSSH(CiscoSSHConnection):\n \"\"\"Subclass specific to Cisco ASA.\"\"\"\n\n def session_preparation(self):\n \"\"\"Prepare the session after the connection has been established.\"\"\"\n self._test_channel_read()\n self.set_base_prompt()\n if self.secret:\n self.enable()\n else:\n self.asa_login()\n self.disable_paging(command=\"terminal pager 0\")\n if self.allow_auto_change:\n try:\n self.send_config_set(\"terminal width 511\")\n except ValueError:\n # Don't fail for the terminal width\n pass\n\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()\n\n def send_command_timing(self, *args, **kwargs):\n \"\"\"\n If the ASA is in multi-context mode, then the base_prompt needs to be\n updated after each context change.\n \"\"\"\n output = super().send_command_timing(*args, **kwargs)\n if len(args) >= 1:\n command_string = args[0]\n else:\n command_string = kwargs[\"command_string\"]\n if \"changeto\" in command_string:\n self.set_base_prompt()\n return output\n\n def send_command(self, *args, **kwargs):\n \"\"\"\n If the ASA is in multi-context mode, then the base_prompt needs to be\n updated after each context change.\n \"\"\"\n if len(args) >= 1:\n command_string = args[0]\n else:\n command_string = kwargs[\"command_string\"]\n\n # If changeto in command, look for '#' to determine command is done\n if \"changeto\" in command_string:\n if len(args) <= 1:\n expect_string = kwargs.get(\"expect_string\", \"#\")\n kwargs[\"expect_string\"] = expect_string\n output = super().send_command(*args, **kwargs)\n\n if \"changeto\" in command_string:\n self.set_base_prompt()\n\n return output\n\n def send_command_expect(self, *args, **kwargs):\n \"\"\"Backwards compaitibility.\"\"\"\n return self.send_command(*args, **kwargs)\n\n def set_base_prompt(self, *args, **kwargs):\n \"\"\"\n Cisco ASA in multi-context mode needs to have the base prompt updated\n (if you switch contexts i.e. 'changeto')\n\n This switch of ASA contexts can occur in configuration mode. If this\n happens the trailing '(config*' needs stripped off.\n \"\"\"\n cur_base_prompt = super().set_base_prompt(*args, **kwargs)\n match = re.search(r\"(.*)\\(conf.*\", cur_base_prompt)\n if match:\n # strip off (conf.* from base_prompt\n self.base_prompt = match.group(1)\n return self.base_prompt\n\n def asa_login(self):\n \"\"\"\n Handle ASA reaching privilege level 15 using login\n\n twb-dc-fw1> login\n Username: admin\n Password: ************\n \"\"\"\n delay_factor = self.select_delay_factor(0)\n\n i = 1\n max_attempts = 50\n self.write_channel(\"login\" + self.RETURN)\n while i <= max_attempts:\n time.sleep(0.5 * delay_factor)\n output = self.read_channel()\n if \"sername\" in output:\n self.write_channel(self.username + self.RETURN)\n elif \"ssword\" in output:\n self.write_channel(self.password + self.RETURN)\n elif \"#\" in output:\n break\n else:\n self.write_channel(\"login\" + self.RETURN)\n i += 1\n\n def save_config(self, cmd=\"write mem\", confirm=False, confirm_response=\"\"):\n \"\"\"Saves Config\"\"\"\n return super().save_config(\n cmd=cmd, confirm=confirm, confirm_response=confirm_response\n )\n\n\nclass CiscoAsaFileTransfer(CiscoFileTransfer):\n \"\"\"Cisco ASA SCP File Transfer driver.\"\"\"\n\n pass\n", "path": "netmiko/cisco/cisco_asa_ssh.py"}]} | 1,698 | 373 |
gh_patches_debug_9341 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1986 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Haringey Waste Collection sensor returning 'Unknown'
### I Have A Problem With:
A specific source
### What's Your Problem
I am having an issue adding the Haringey council (UK) Waste Collection sensor into HA. The sensor value shows as 'Unknown'.
I have added this code to my configuration.yaml (replacing "My UPRN" with my actual UPRN):
```
sources:
- name: haringey_gov_uk
args:
uprn: "My UPRN"
sensor:
- platform: waste_collection_schedule
name: "Haringey Waste Collection"
details_format: upcoming
value_template: 'in {{value.daysTo}} days'
```

I have tested with other configs for other councils I found online and those work as expected. Is there something wrong with the Haringey data feed? The URL mentioned in the documentation is still correct, and I can see the correct information on the Haringey website.
### Source (if relevant)
_No response_
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py]
1 from datetime import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Haringey Council"
8 DESCRIPTION = "Source for haringey.gov.uk services for Haringey Council, UK."
9 URL = "https://www.haringey.gov.uk/"
10 TEST_CASES = {
11 "Test_001": {"uprn": "100021209182"},
12 "Test_002": {"uprn": "100021207181"},
13 "Test_003": {"uprn": "100021202738"},
14 "Test_004": {"uprn": 100021202131},
15 }
16 ICON_MAP = {
17 "General Waste": "mdi:trash-can",
18 "Collect Domestic Recycling": "mdi:recycle",
19 "Food Waste": "mdi:food-apple",
20 "Collect Paid Domestic Garden": "mdi:leaf",
21 }
22
23
24 class Source:
25 def __init__(self, uprn):
26 self._uprn = str(uprn).zfill(12)
27
28 def fetch(self):
29 api_url = f"https://wastecollections.haringey.gov.uk/property/{self._uprn}"
30 response = requests.get(api_url)
31
32 soup = BeautifulSoup(response.text, features="html.parser")
33 soup.prettify()
34
35 entries = []
36
37 service_elements = soup.select(".service-wrapper")
38
39 for service_element in service_elements:
40 service_name = service_element.select(".service-name")[0].text.strip()
41 next_service_date = service_element.select("td.next-service")[0]
42
43 next_service_date.span.extract()
44
45 entries.append(
46 Collection(
47 date=datetime.strptime(
48 next_service_date.text.strip(), "%d/%m/%Y"
49 ).date(),
50 t=service_name,
51 icon=ICON_MAP.get(service_name),
52 )
53 )
54
55 return entries
56
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py
@@ -38,7 +38,11 @@
for service_element in service_elements:
service_name = service_element.select(".service-name")[0].text.strip()
- next_service_date = service_element.select("td.next-service")[0]
+
+ next_service_dates = service_element.select("td.next-service")
+ if len(next_service_dates) == 0:
+ continue
+ next_service_date = next_service_dates[0]
next_service_date.span.extract()
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py\n@@ -38,7 +38,11 @@\n \n for service_element in service_elements:\n service_name = service_element.select(\".service-name\")[0].text.strip()\n- next_service_date = service_element.select(\"td.next-service\")[0]\n+\n+ next_service_dates = service_element.select(\"td.next-service\")\n+ if len(next_service_dates) == 0:\n+ continue\n+ next_service_date = next_service_dates[0]\n \n next_service_date.span.extract()\n", "issue": "[Bug]: Haringey Waste Collection sensor returning 'Unknown'\n### I Have A Problem With:\r\n\r\nA specific source\r\n\r\n### What's Your Problem\r\n\r\nI am having an issue adding the Haringey council (UK) Waste Collection sensor into HA. The sensor value shows as 'Unknown'.\r\n\r\nI have added this code to my configuration.yaml (replacing \"My UPRN\" with my actual UPRN):\r\n\r\n```\r\n sources:\r\n - name: haringey_gov_uk\r\n args:\r\n uprn: \"My UPRN\"\r\n\r\nsensor:\r\n - platform: waste_collection_schedule\r\n name: \"Haringey Waste Collection\"\r\n details_format: upcoming\r\n value_template: 'in {{value.daysTo}} days'\r\n```\r\n\r\n\r\n\r\nI have tested with other configs for other councils I found online and those work as expected. Is there something wrong with the Haringey data feed? The URL mentioned in the documentation is still correct, and I can see the correct information on the Haringey website.\r\n\r\n### Source (if relevant)\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Relevant Configuration\r\n\r\n_No response_\r\n\r\n### Checklist Source Error\r\n\r\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\r\n- [X] Checked that the website of your service provider is still working\r\n- [X] Tested my attributes on the service provider website (if possible)\r\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\r\n\r\n### Checklist Sensor Error\r\n\r\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\r\n\r\n### Required\r\n\r\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\r\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Haringey Council\"\nDESCRIPTION = \"Source for haringey.gov.uk services for Haringey Council, UK.\"\nURL = \"https://www.haringey.gov.uk/\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100021209182\"},\n \"Test_002\": {\"uprn\": \"100021207181\"},\n \"Test_003\": {\"uprn\": \"100021202738\"},\n \"Test_004\": {\"uprn\": 100021202131},\n}\nICON_MAP = {\n \"General Waste\": \"mdi:trash-can\",\n \"Collect Domestic Recycling\": \"mdi:recycle\",\n \"Food Waste\": \"mdi:food-apple\",\n \"Collect Paid Domestic Garden\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n\n def fetch(self):\n api_url = f\"https://wastecollections.haringey.gov.uk/property/{self._uprn}\"\n response = requests.get(api_url)\n\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n soup.prettify()\n\n entries = []\n\n service_elements = soup.select(\".service-wrapper\")\n\n for service_element in service_elements:\n service_name = service_element.select(\".service-name\")[0].text.strip()\n next_service_date = service_element.select(\"td.next-service\")[0]\n\n next_service_date.span.extract()\n\n entries.append(\n Collection(\n date=datetime.strptime(\n next_service_date.text.strip(), \"%d/%m/%Y\"\n ).date(),\n t=service_name,\n icon=ICON_MAP.get(service_name),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/haringey_gov_uk.py"}]} | 1,612 | 194 |
gh_patches_debug_39219 | rasdani/github-patches | git_diff | ethereum__consensus-specs-1202 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
is_genesis_trigger fails to verify deposit merkle branch
## Issue
Deposits are processed against empty state with undefined `eth1_data` while `genesis_eth1_data` should be in place.
</issue>
<code>
[start of deposit_contract/contracts/validator_registration.v.py]
1 MIN_DEPOSIT_AMOUNT: constant(uint256) = 1000000000 # Gwei
2 DEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32
3 MAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1
4 PUBKEY_LENGTH: constant(uint256) = 48 # bytes
5 WITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32 # bytes
6 AMOUNT_LENGTH: constant(uint256) = 8 # bytes
7 SIGNATURE_LENGTH: constant(uint256) = 96 # bytes
8
9 Deposit: event({
10 pubkey: bytes[48],
11 withdrawal_credentials: bytes[32],
12 amount: bytes[8],
13 signature: bytes[96],
14 index: bytes[8],
15 })
16
17 branch: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]
18 deposit_count: uint256
19
20 # Compute hashes in empty sparse Merkle tree
21 zero_hashes: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]
22 @public
23 def __init__():
24 for i in range(DEPOSIT_CONTRACT_TREE_DEPTH - 1):
25 self.zero_hashes[i + 1] = sha256(concat(self.zero_hashes[i], self.zero_hashes[i]))
26
27
28 @private
29 @constant
30 def to_little_endian_64(value: uint256) -> bytes[8]:
31 # Reversing bytes using bitwise uint256 manipulations
32 # Note: array accesses of bytes[] are not currently supported in Vyper
33 # Note: this function is only called when `value < 2**64`
34 y: uint256 = 0
35 x: uint256 = value
36 for _ in range(8):
37 y = shift(y, 8)
38 y = y + bitwise_and(x, 255)
39 x = shift(x, -8)
40 return slice(convert(y, bytes32), start=24, len=8)
41
42
43 @public
44 @constant
45 def get_deposit_root() -> bytes32:
46 node: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
47 size: uint256 = self.deposit_count
48 for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
49 if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`
50 node = sha256(concat(self.branch[height], node))
51 else:
52 node = sha256(concat(node, self.zero_hashes[height]))
53 size /= 2
54 return node
55
56
57 @public
58 @constant
59 def get_deposit_count() -> bytes[8]:
60 return self.to_little_endian_64(self.deposit_count)
61
62
63 @payable
64 @public
65 def deposit(pubkey: bytes[PUBKEY_LENGTH],
66 withdrawal_credentials: bytes[WITHDRAWAL_CREDENTIALS_LENGTH],
67 signature: bytes[SIGNATURE_LENGTH]):
68 # Avoid overflowing the Merkle tree (and prevent edge case in computing `self.branch`)
69 assert self.deposit_count < MAX_DEPOSIT_COUNT
70
71 # Validate deposit data
72 deposit_amount: uint256 = msg.value / as_wei_value(1, "gwei")
73 assert deposit_amount >= MIN_DEPOSIT_AMOUNT
74 assert len(pubkey) == PUBKEY_LENGTH
75 assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH
76 assert len(signature) == SIGNATURE_LENGTH
77
78 # Emit `Deposit` log
79 amount: bytes[8] = self.to_little_endian_64(deposit_amount)
80 log.Deposit(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
81
82 # Compute `DepositData` root
83 zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
84 pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))
85 signature_root: bytes32 = sha256(concat(
86 sha256(slice(signature, start=0, len=64)),
87 sha256(concat(slice(signature, start=64, len=SIGNATURE_LENGTH - 64), zero_bytes32)),
88 ))
89 node: bytes32 = sha256(concat(
90 sha256(concat(pubkey_root, withdrawal_credentials)),
91 sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),
92 ))
93
94 # Add `DepositData` root to Merkle tree (update a single `branch` node)
95 self.deposit_count += 1
96 size: uint256 = self.deposit_count
97 for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
98 if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`
99 self.branch[height] = node
100 break
101 node = sha256(concat(self.branch[height], node))
102 size /= 2
103
104
[end of deposit_contract/contracts/validator_registration.v.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deposit_contract/contracts/validator_registration.v.py b/deposit_contract/contracts/validator_registration.v.py
--- a/deposit_contract/contracts/validator_registration.v.py
+++ b/deposit_contract/contracts/validator_registration.v.py
@@ -6,7 +6,7 @@
AMOUNT_LENGTH: constant(uint256) = 8 # bytes
SIGNATURE_LENGTH: constant(uint256) = 96 # bytes
-Deposit: event({
+DepositEvent: event({
pubkey: bytes[48],
withdrawal_credentials: bytes[32],
amount: bytes[8],
@@ -42,8 +42,9 @@
@public
@constant
-def get_deposit_root() -> bytes32:
- node: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
+def get_hash_tree_root() -> bytes32:
+ zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
+ node: bytes32 = zero_bytes32
size: uint256 = self.deposit_count
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`
@@ -51,7 +52,7 @@
else:
node = sha256(concat(node, self.zero_hashes[height]))
size /= 2
- return node
+ return sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
@public
@@ -75,11 +76,11 @@
assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH
assert len(signature) == SIGNATURE_LENGTH
- # Emit `Deposit` log
+ # Emit `DepositEvent` log
amount: bytes[8] = self.to_little_endian_64(deposit_amount)
- log.Deposit(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
+ log.DepositEvent(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))
- # Compute `DepositData` root
+ # Compute `DepositData` hash tree root
zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000
pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))
signature_root: bytes32 = sha256(concat(
@@ -91,7 +92,7 @@
sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),
))
- # Add `DepositData` root to Merkle tree (update a single `branch` node)
+ # Add `DepositData` hash tree root to Merkle tree (update a single `branch` node)
self.deposit_count += 1
size: uint256 = self.deposit_count
for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):
| {"golden_diff": "diff --git a/deposit_contract/contracts/validator_registration.v.py b/deposit_contract/contracts/validator_registration.v.py\n--- a/deposit_contract/contracts/validator_registration.v.py\n+++ b/deposit_contract/contracts/validator_registration.v.py\n@@ -6,7 +6,7 @@\n AMOUNT_LENGTH: constant(uint256) = 8 # bytes\n SIGNATURE_LENGTH: constant(uint256) = 96 # bytes\n \n-Deposit: event({\n+DepositEvent: event({\n pubkey: bytes[48],\n withdrawal_credentials: bytes[32],\n amount: bytes[8],\n@@ -42,8 +42,9 @@\n \n @public\n @constant\n-def get_deposit_root() -> bytes32:\n- node: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000\n+def get_hash_tree_root() -> bytes32:\n+ zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000\n+ node: bytes32 = zero_bytes32\n size: uint256 = self.deposit_count\n for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):\n if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`\n@@ -51,7 +52,7 @@\n else:\n node = sha256(concat(node, self.zero_hashes[height]))\n size /= 2\n- return node\n+ return sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))\n \n \n @public\n@@ -75,11 +76,11 @@\n assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH\n assert len(signature) == SIGNATURE_LENGTH\n \n- # Emit `Deposit` log\n+ # Emit `DepositEvent` log\n amount: bytes[8] = self.to_little_endian_64(deposit_amount)\n- log.Deposit(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))\n+ log.DepositEvent(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))\n \n- # Compute `DepositData` root\n+ # Compute `DepositData` hash tree root\n zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000\n pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))\n signature_root: bytes32 = sha256(concat(\n@@ -91,7 +92,7 @@\n sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),\n ))\n \n- # Add `DepositData` root to Merkle tree (update a single `branch` node)\n+ # Add `DepositData` hash tree root to Merkle tree (update a single `branch` node)\n self.deposit_count += 1\n size: uint256 = self.deposit_count\n for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):\n", "issue": "is_genesis_trigger fails to verify deposit merkle branch\n## Issue\r\nDeposits are processed against empty state with undefined `eth1_data` while `genesis_eth1_data` should be in place.\r\n\n", "before_files": [{"content": "MIN_DEPOSIT_AMOUNT: constant(uint256) = 1000000000 # Gwei\nDEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32\nMAX_DEPOSIT_COUNT: constant(uint256) = 4294967295 # 2**DEPOSIT_CONTRACT_TREE_DEPTH - 1\nPUBKEY_LENGTH: constant(uint256) = 48 # bytes\nWITHDRAWAL_CREDENTIALS_LENGTH: constant(uint256) = 32 # bytes\nAMOUNT_LENGTH: constant(uint256) = 8 # bytes\nSIGNATURE_LENGTH: constant(uint256) = 96 # bytes\n\nDeposit: event({\n pubkey: bytes[48],\n withdrawal_credentials: bytes[32],\n amount: bytes[8],\n signature: bytes[96],\n index: bytes[8],\n})\n\nbranch: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]\ndeposit_count: uint256\n\n# Compute hashes in empty sparse Merkle tree\nzero_hashes: bytes32[DEPOSIT_CONTRACT_TREE_DEPTH]\n@public\ndef __init__():\n for i in range(DEPOSIT_CONTRACT_TREE_DEPTH - 1):\n self.zero_hashes[i + 1] = sha256(concat(self.zero_hashes[i], self.zero_hashes[i]))\n\n\n@private\n@constant\ndef to_little_endian_64(value: uint256) -> bytes[8]:\n # Reversing bytes using bitwise uint256 manipulations\n # Note: array accesses of bytes[] are not currently supported in Vyper\n # Note: this function is only called when `value < 2**64`\n y: uint256 = 0\n x: uint256 = value\n for _ in range(8):\n y = shift(y, 8)\n y = y + bitwise_and(x, 255)\n x = shift(x, -8)\n return slice(convert(y, bytes32), start=24, len=8)\n\n\n@public\n@constant\ndef get_deposit_root() -> bytes32:\n node: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000\n size: uint256 = self.deposit_count\n for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):\n if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`\n node = sha256(concat(self.branch[height], node))\n else:\n node = sha256(concat(node, self.zero_hashes[height]))\n size /= 2\n return node\n\n\n@public\n@constant\ndef get_deposit_count() -> bytes[8]:\n return self.to_little_endian_64(self.deposit_count)\n\n\n@payable\n@public\ndef deposit(pubkey: bytes[PUBKEY_LENGTH],\n withdrawal_credentials: bytes[WITHDRAWAL_CREDENTIALS_LENGTH],\n signature: bytes[SIGNATURE_LENGTH]):\n # Avoid overflowing the Merkle tree (and prevent edge case in computing `self.branch`)\n assert self.deposit_count < MAX_DEPOSIT_COUNT\n\n # Validate deposit data\n deposit_amount: uint256 = msg.value / as_wei_value(1, \"gwei\")\n assert deposit_amount >= MIN_DEPOSIT_AMOUNT\n assert len(pubkey) == PUBKEY_LENGTH\n assert len(withdrawal_credentials) == WITHDRAWAL_CREDENTIALS_LENGTH\n assert len(signature) == SIGNATURE_LENGTH\n\n # Emit `Deposit` log\n amount: bytes[8] = self.to_little_endian_64(deposit_amount)\n log.Deposit(pubkey, withdrawal_credentials, amount, signature, self.to_little_endian_64(self.deposit_count))\n\n # Compute `DepositData` root\n zero_bytes32: bytes32 = 0x0000000000000000000000000000000000000000000000000000000000000000\n pubkey_root: bytes32 = sha256(concat(pubkey, slice(zero_bytes32, start=0, len=64 - PUBKEY_LENGTH)))\n signature_root: bytes32 = sha256(concat(\n sha256(slice(signature, start=0, len=64)),\n sha256(concat(slice(signature, start=64, len=SIGNATURE_LENGTH - 64), zero_bytes32)),\n ))\n node: bytes32 = sha256(concat(\n sha256(concat(pubkey_root, withdrawal_credentials)),\n sha256(concat(amount, slice(zero_bytes32, start=0, len=32 - AMOUNT_LENGTH), signature_root)),\n ))\n\n # Add `DepositData` root to Merkle tree (update a single `branch` node)\n self.deposit_count += 1\n size: uint256 = self.deposit_count\n for height in range(DEPOSIT_CONTRACT_TREE_DEPTH):\n if bitwise_and(size, 1) == 1: # More gas efficient than `size % 2 == 1`\n self.branch[height] = node\n break\n node = sha256(concat(self.branch[height], node))\n size /= 2\n\n", "path": "deposit_contract/contracts/validator_registration.v.py"}]} | 2,043 | 894 |
gh_patches_debug_16538 | rasdani/github-patches | git_diff | sopel-irc__sopel-2063 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
isup responds twice
<!-- Before reporting a bug, please search both open *and closed* issues to
see if it has already been reported. If you can, try to reproduce the problem
on an unmodified copy of the `master` branch first, as sometimes bugs are found
and fixed without a report. If the problem is unreported and persists in
`master`, please help us fix it quickly by filling out as much of this
information as you can. Thanks! -->
### Description
when I run .isup or .isupinsecure, the bot responds twice. first is the error (if any) then it says that the site is up
### Reproduction steps
1. setup a sopel bot using the master branch.
2. in the irc channel run .isup with some url that is down
3. it responds twice
### Expected behavior
only responds with an error or else up, not up and error
### Environment
- Sopel `.version`: [e.g. 7.0.0 or d416e19] master branch
- Sopel installed via: [apt, pip, `setup.py install`, source, ?] source
- Python version: [e.g. 3.6.9] 3.7
- Operating system: [e.g. Debian 10] debian buster
- IRCd `/version`: [e.g. InspIRCd 3.0.1] freenode
- Relevant plugins: [adminchannel, weather, custom\_thing.py, ?] isup
### Notes
seems to be because in https://github.com/sopel-irc/sopel/blob/master/sopel/modules/isup.py#L89 none of the except statements return and so the bot.say for "website is up" is always executed.
</issue>
<code>
[start of sopel/modules/isup.py]
1 # coding=utf-8
2 """
3 isup.py - Sopel Website Status Check Plugin
4 Copyright 2011, Elsie Powell http://embolalia.com
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import absolute_import, division, print_function, unicode_literals
10
11 import requests
12
13 from sopel import plugin
14
15
16 PLUGIN_OUTPUT_PREFIX = '[isup] '
17
18
19 def get_site_url(site):
20 """Get a ``site`` URL
21
22 :param str site: the site to get URL for
23 :return: a valid site URL
24 :raise ValueError: when site is empty, or isn't well formatted
25
26 The ``site`` argument is checked: its scheme must be ``http`` or ``https``,
27 or a :exc:`ValueError` is raised.
28
29 If the ``site`` does not have a scheme, ``http`` is used. If it doesn't
30 have a TLD, a :exc:`ValueError` is raised.
31 """
32 site = site.strip() if site else ''
33 if not site:
34 raise ValueError('What site do you want to check?')
35
36 if not site.startswith(('http://', 'https://')):
37 if '://' in site:
38 protocol = site.split('://')[0] + '://'
39 raise ValueError('Try it again without the %s' % protocol)
40
41 site = 'http://' + site
42
43 domain = site.split('/')[2].split(':')[0]
44 if '.' not in domain:
45 raise ValueError('I need a fully qualified domain name (with a dot).')
46 if domain.endswith(('.local', '.example', '.test', '.invalid', '.localhost')):
47 raise ValueError("I can't check LAN-local or invalid domains.")
48
49 return site
50
51
52 def handle_isup(bot, trigger, secure=True):
53 """Handle the ``bot`` command from ``trigger``
54
55 :param bot: Sopel instance
56 :type bot: :class:`sopel.bot.SopelWrapper`
57 :param trigger: Command's trigger instance
58 :type trigger: :class:`sopel.trigger.Trigger`
59 :param bool secure: Check SSL error if ``True`` (the default)
60 """
61 try:
62 site = get_site_url(trigger.group(2))
63 response = requests.head(site, verify=secure, timeout=(10.0, 5.0))
64 response.raise_for_status()
65 except ValueError as error:
66 bot.reply(str(error))
67 except requests.exceptions.SSLError:
68 bot.say(
69 '{} looks down to me (SSL error). Try using `{}isupinsecure`.'
70 .format(site, bot.config.core.help_prefix))
71 except requests.HTTPError:
72 bot.say(
73 '{} looks down to me (HTTP {} "{}").'
74 .format(site, response.status_code, response.reason))
75 except requests.ConnectTimeout:
76 bot.say(
77 '{} looks down to me (timed out while connecting).'
78 .format(site))
79 except requests.ReadTimeout:
80 bot.say(
81 '{} looks down to me (timed out waiting for reply).'
82 .format(site))
83 except requests.ConnectionError:
84 bot.say(
85 '{} looks down to me (connection error).'
86 .format(site))
87
88 # If no exception happened, the request succeeded.
89 bot.say(site + ' looks fine to me.')
90
91
92 @plugin.command('isupinsecure')
93 @plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
94 def isup_insecure(bot, trigger):
95 """Check if a website is up (without verifying HTTPS)."""
96 handle_isup(bot, trigger, secure=False)
97
98
99 @plugin.command('isup')
100 @plugin.example('.isup google.com',
101 'http://google.com looks fine to me.',
102 online=True, vcr=True)
103 @plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
104 def isup(bot, trigger):
105 """Check if a website is up or not."""
106 handle_isup(bot, trigger, secure=True)
107
[end of sopel/modules/isup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/isup.py b/sopel/modules/isup.py
--- a/sopel/modules/isup.py
+++ b/sopel/modules/isup.py
@@ -84,9 +84,9 @@
bot.say(
'{} looks down to me (connection error).'
.format(site))
-
- # If no exception happened, the request succeeded.
- bot.say(site + ' looks fine to me.')
+ else:
+ # If no exception happened, the request must have succeeded.
+ bot.say(site + ' looks fine to me.')
@plugin.command('isupinsecure')
@@ -97,9 +97,7 @@
@plugin.command('isup')
[email protected]('.isup google.com',
- 'http://google.com looks fine to me.',
- online=True, vcr=True)
[email protected]('.isup google.com')
@plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
def isup(bot, trigger):
"""Check if a website is up or not."""
| {"golden_diff": "diff --git a/sopel/modules/isup.py b/sopel/modules/isup.py\n--- a/sopel/modules/isup.py\n+++ b/sopel/modules/isup.py\n@@ -84,9 +84,9 @@\n bot.say(\n '{} looks down to me (connection error).'\n .format(site))\n-\n- # If no exception happened, the request succeeded.\n- bot.say(site + ' looks fine to me.')\n+ else:\n+ # If no exception happened, the request must have succeeded.\n+ bot.say(site + ' looks fine to me.')\n \n \n @plugin.command('isupinsecure')\n@@ -97,9 +97,7 @@\n \n \n @plugin.command('isup')\[email protected]('.isup google.com',\n- 'http://google.com looks fine to me.',\n- online=True, vcr=True)\[email protected]('.isup google.com')\n @plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)\n def isup(bot, trigger):\n \"\"\"Check if a website is up or not.\"\"\"\n", "issue": "isup responds twice\n<!-- Before reporting a bug, please search both open *and closed* issues to\r\nsee if it has already been reported. If you can, try to reproduce the problem\r\non an unmodified copy of the `master` branch first, as sometimes bugs are found\r\nand fixed without a report. If the problem is unreported and persists in\r\n`master`, please help us fix it quickly by filling out as much of this\r\ninformation as you can. Thanks! -->\r\n\r\n### Description\r\nwhen I run .isup or .isupinsecure, the bot responds twice. first is the error (if any) then it says that the site is up\r\n### Reproduction steps\r\n1. setup a sopel bot using the master branch.\r\n2. in the irc channel run .isup with some url that is down\r\n3. it responds twice\r\n\r\n### Expected behavior\r\nonly responds with an error or else up, not up and error\r\n\r\n### Environment\r\n- Sopel `.version`: [e.g. 7.0.0 or d416e19] master branch\r\n- Sopel installed via: [apt, pip, `setup.py install`, source, ?] source\r\n- Python version: [e.g. 3.6.9] 3.7\r\n- Operating system: [e.g. Debian 10] debian buster\r\n- IRCd `/version`: [e.g. InspIRCd 3.0.1] freenode\r\n- Relevant plugins: [adminchannel, weather, custom\\_thing.py, ?] isup\r\n\r\n### Notes\r\nseems to be because in https://github.com/sopel-irc/sopel/blob/master/sopel/modules/isup.py#L89 none of the except statements return and so the bot.say for \"website is up\" is always executed.\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nisup.py - Sopel Website Status Check Plugin\nCopyright 2011, Elsie Powell http://embolalia.com\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport requests\n\nfrom sopel import plugin\n\n\nPLUGIN_OUTPUT_PREFIX = '[isup] '\n\n\ndef get_site_url(site):\n \"\"\"Get a ``site`` URL\n\n :param str site: the site to get URL for\n :return: a valid site URL\n :raise ValueError: when site is empty, or isn't well formatted\n\n The ``site`` argument is checked: its scheme must be ``http`` or ``https``,\n or a :exc:`ValueError` is raised.\n\n If the ``site`` does not have a scheme, ``http`` is used. If it doesn't\n have a TLD, a :exc:`ValueError` is raised.\n \"\"\"\n site = site.strip() if site else ''\n if not site:\n raise ValueError('What site do you want to check?')\n\n if not site.startswith(('http://', 'https://')):\n if '://' in site:\n protocol = site.split('://')[0] + '://'\n raise ValueError('Try it again without the %s' % protocol)\n\n site = 'http://' + site\n\n domain = site.split('/')[2].split(':')[0]\n if '.' not in domain:\n raise ValueError('I need a fully qualified domain name (with a dot).')\n if domain.endswith(('.local', '.example', '.test', '.invalid', '.localhost')):\n raise ValueError(\"I can't check LAN-local or invalid domains.\")\n\n return site\n\n\ndef handle_isup(bot, trigger, secure=True):\n \"\"\"Handle the ``bot`` command from ``trigger``\n\n :param bot: Sopel instance\n :type bot: :class:`sopel.bot.SopelWrapper`\n :param trigger: Command's trigger instance\n :type trigger: :class:`sopel.trigger.Trigger`\n :param bool secure: Check SSL error if ``True`` (the default)\n \"\"\"\n try:\n site = get_site_url(trigger.group(2))\n response = requests.head(site, verify=secure, timeout=(10.0, 5.0))\n response.raise_for_status()\n except ValueError as error:\n bot.reply(str(error))\n except requests.exceptions.SSLError:\n bot.say(\n '{} looks down to me (SSL error). Try using `{}isupinsecure`.'\n .format(site, bot.config.core.help_prefix))\n except requests.HTTPError:\n bot.say(\n '{} looks down to me (HTTP {} \"{}\").'\n .format(site, response.status_code, response.reason))\n except requests.ConnectTimeout:\n bot.say(\n '{} looks down to me (timed out while connecting).'\n .format(site))\n except requests.ReadTimeout:\n bot.say(\n '{} looks down to me (timed out waiting for reply).'\n .format(site))\n except requests.ConnectionError:\n bot.say(\n '{} looks down to me (connection error).'\n .format(site))\n\n # If no exception happened, the request succeeded.\n bot.say(site + ' looks fine to me.')\n\n\[email protected]('isupinsecure')\[email protected]_prefix(PLUGIN_OUTPUT_PREFIX)\ndef isup_insecure(bot, trigger):\n \"\"\"Check if a website is up (without verifying HTTPS).\"\"\"\n handle_isup(bot, trigger, secure=False)\n\n\[email protected]('isup')\[email protected]('.isup google.com',\n 'http://google.com looks fine to me.',\n online=True, vcr=True)\[email protected]_prefix(PLUGIN_OUTPUT_PREFIX)\ndef isup(bot, trigger):\n \"\"\"Check if a website is up or not.\"\"\"\n handle_isup(bot, trigger, secure=True)\n", "path": "sopel/modules/isup.py"}]} | 1,996 | 226 |
gh_patches_debug_24694 | rasdani/github-patches | git_diff | streamlit__streamlit-7018 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Markdown support for radio buttons
### Problem
Colored text and other markdown elements work in the label of `st.radio` but not in the texts of the radio elements. This is a bit weird since we do support it in the texts of checkboxes (where the text next to the checkbox is the label).
### Solution
Allow markdown in the options of `st.radio`.
---
Community voting on feature requests enables the Streamlit team to understand which features are most important to our users.
**If you'd like the Streamlit team to prioritize this feature request, please use the 👍 (thumbs up emoji) reaction in response to the initial post.**
</issue>
<code>
[start of e2e/scripts/st_radio.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import pandas as pd
16
17 import streamlit as st
18 from streamlit import runtime
19 from tests.streamlit import pyspark_mocks
20
21 options = ("female", "male")
22 i1 = st.radio("radio 1", options, 1)
23 st.write("value 1:", i1)
24
25 i2 = st.radio("radio 2", options, 0, format_func=lambda x: x.capitalize())
26 st.write("value 2:", i2)
27
28 i3 = st.radio("radio 3", [])
29 st.write("value 3:", i3)
30
31 i4 = st.radio("radio 4", options, disabled=True)
32 st.write("value 4:", i4)
33
34 i5 = st.radio("radio 5", options, horizontal=True)
35 st.write("value 5:", i5)
36
37 i6 = st.radio("radio 6", pd.DataFrame({"foo": list(options)}))
38 st.write("value 6:", i6)
39
40 i7 = st.radio("radio 7", options, label_visibility="hidden")
41 st.write("value 7:", i7)
42
43 i8 = st.radio("radio 8", options, label_visibility="collapsed")
44 st.write("value 8:", i8)
45
46
47 if runtime.exists():
48
49 def on_change():
50 st.session_state.radio_changed = True
51
52 st.radio("radio 9", options, 1, key="radio9", on_change=on_change)
53 st.write("value 9:", st.session_state.radio9)
54 st.write("radio changed:", "radio_changed" in st.session_state)
55
56 st.radio("PySpark radio", pyspark_mocks.DataFrame()) # type: ignore
57
[end of e2e/scripts/st_radio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_radio.py b/e2e/scripts/st_radio.py
--- a/e2e/scripts/st_radio.py
+++ b/e2e/scripts/st_radio.py
@@ -19,6 +19,16 @@
from tests.streamlit import pyspark_mocks
options = ("female", "male")
+markdown_options = (
+ "**bold text**",
+ "*italics text*",
+ "~strikethrough text~",
+ "shortcode: :blush:",
+ # link should not work in radio options
+ "[link text](www.example.com)",
+ "`code text`",
+ ":red[red] :blue[blue] :green[green] :violet[violet] :orange[orange]",
+)
i1 = st.radio("radio 1", options, 1)
st.write("value 1:", i1)
@@ -43,14 +53,16 @@
i8 = st.radio("radio 8", options, label_visibility="collapsed")
st.write("value 8:", i8)
+i9 = st.radio("radio 9", markdown_options)
+st.write("value 9:", i9)
if runtime.exists():
def on_change():
st.session_state.radio_changed = True
- st.radio("radio 9", options, 1, key="radio9", on_change=on_change)
- st.write("value 9:", st.session_state.radio9)
+ st.radio("radio 10", options, 1, key="radio10", on_change=on_change)
+ st.write("value 10:", st.session_state.radio10)
st.write("radio changed:", "radio_changed" in st.session_state)
st.radio("PySpark radio", pyspark_mocks.DataFrame()) # type: ignore
| {"golden_diff": "diff --git a/e2e/scripts/st_radio.py b/e2e/scripts/st_radio.py\n--- a/e2e/scripts/st_radio.py\n+++ b/e2e/scripts/st_radio.py\n@@ -19,6 +19,16 @@\n from tests.streamlit import pyspark_mocks\n \n options = (\"female\", \"male\")\n+markdown_options = (\n+ \"**bold text**\",\n+ \"*italics text*\",\n+ \"~strikethrough text~\",\n+ \"shortcode: :blush:\",\n+ # link should not work in radio options\n+ \"[link text](www.example.com)\",\n+ \"`code text`\",\n+ \":red[red] :blue[blue] :green[green] :violet[violet] :orange[orange]\",\n+)\n i1 = st.radio(\"radio 1\", options, 1)\n st.write(\"value 1:\", i1)\n \n@@ -43,14 +53,16 @@\n i8 = st.radio(\"radio 8\", options, label_visibility=\"collapsed\")\n st.write(\"value 8:\", i8)\n \n+i9 = st.radio(\"radio 9\", markdown_options)\n+st.write(\"value 9:\", i9)\n \n if runtime.exists():\n \n def on_change():\n st.session_state.radio_changed = True\n \n- st.radio(\"radio 9\", options, 1, key=\"radio9\", on_change=on_change)\n- st.write(\"value 9:\", st.session_state.radio9)\n+ st.radio(\"radio 10\", options, 1, key=\"radio10\", on_change=on_change)\n+ st.write(\"value 10:\", st.session_state.radio10)\n st.write(\"radio changed:\", \"radio_changed\" in st.session_state)\n \n st.radio(\"PySpark radio\", pyspark_mocks.DataFrame()) # type: ignore\n", "issue": "Markdown support for radio buttons\n### Problem\r\n\r\nColored text and other markdown elements work in the label of `st.radio` but not in the texts of the radio elements. This is a bit weird since we do support it in the texts of checkboxes (where the text next to the checkbox is the label). \r\n\r\n\r\n### Solution\r\n\r\nAllow markdown in the options of `st.radio`. \r\n\r\n\r\n---\r\n\r\nCommunity voting on feature requests enables the Streamlit team to understand which features are most important to our users.\r\n\r\n**If you'd like the Streamlit team to prioritize this feature request, please use the \ud83d\udc4d (thumbs up emoji) reaction in response to the initial post.**\r\n\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\nfrom tests.streamlit import pyspark_mocks\n\noptions = (\"female\", \"male\")\ni1 = st.radio(\"radio 1\", options, 1)\nst.write(\"value 1:\", i1)\n\ni2 = st.radio(\"radio 2\", options, 0, format_func=lambda x: x.capitalize())\nst.write(\"value 2:\", i2)\n\ni3 = st.radio(\"radio 3\", [])\nst.write(\"value 3:\", i3)\n\ni4 = st.radio(\"radio 4\", options, disabled=True)\nst.write(\"value 4:\", i4)\n\ni5 = st.radio(\"radio 5\", options, horizontal=True)\nst.write(\"value 5:\", i5)\n\ni6 = st.radio(\"radio 6\", pd.DataFrame({\"foo\": list(options)}))\nst.write(\"value 6:\", i6)\n\ni7 = st.radio(\"radio 7\", options, label_visibility=\"hidden\")\nst.write(\"value 7:\", i7)\n\ni8 = st.radio(\"radio 8\", options, label_visibility=\"collapsed\")\nst.write(\"value 8:\", i8)\n\n\nif runtime.exists():\n\n def on_change():\n st.session_state.radio_changed = True\n\n st.radio(\"radio 9\", options, 1, key=\"radio9\", on_change=on_change)\n st.write(\"value 9:\", st.session_state.radio9)\n st.write(\"radio changed:\", \"radio_changed\" in st.session_state)\n\nst.radio(\"PySpark radio\", pyspark_mocks.DataFrame()) # type: ignore\n", "path": "e2e/scripts/st_radio.py"}]} | 1,286 | 401 |
gh_patches_debug_27 | rasdani/github-patches | git_diff | netket__netket-214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
module 'netket' has no attribute 'MPI'
With the merge #193 we have lost the MPI module
</issue>
<code>
[start of netket/__init__.py]
1 # Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from . import (
17 _C_netket,
18 dynamics,
19 exact,
20 graph,
21 hilbert,
22 layer,
23 machine,
24 operator,
25 optimizer,
26 output,
27 sampler,
28 stats,
29 supervised,
30 unsupervised,
31 utils,
32 variational,
33 )
34
[end of netket/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netket/__init__.py b/netket/__init__.py
--- a/netket/__init__.py
+++ b/netket/__init__.py
@@ -31,3 +31,4 @@
utils,
variational,
)
+from ._C_netket import MPI, LookupReal, LookupComplex
| {"golden_diff": "diff --git a/netket/__init__.py b/netket/__init__.py\n--- a/netket/__init__.py\n+++ b/netket/__init__.py\n@@ -31,3 +31,4 @@\n utils,\n variational,\n )\n+from ._C_netket import MPI, LookupReal, LookupComplex\n", "issue": "module 'netket' has no attribute 'MPI'\nWith the merge #193 we have lost the MPI module \n", "before_files": [{"content": "# Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom . import (\n _C_netket,\n dynamics,\n exact,\n graph,\n hilbert,\n layer,\n machine,\n operator,\n optimizer,\n output,\n sampler,\n stats,\n supervised,\n unsupervised,\n utils,\n variational,\n)\n", "path": "netket/__init__.py"}]} | 830 | 72 |
gh_patches_debug_3491 | rasdani/github-patches | git_diff | ESMCI__cime-3863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nuopc run complete message location
In commit b9d7b65fd case_run.py was changed to look for the run completed message in drv.log instead of in med.log
for nuopc runs, however the system_tests_common.py was not changed to correspond. PR incoming.
@mvertens @uturuncoglu
</issue>
<code>
[start of scripts/lib/CIME/SystemTests/nodefail.py]
1 """
2 CIME restart upon failed node test.
3 """
4 from CIME.XML.standard_module_setup import *
5 from CIME.SystemTests.ers import ERS
6 from CIME.utils import get_model
7
8 logger = logging.getLogger(__name__)
9
10 class NODEFAIL(ERS):
11
12 def __init__(self, case):
13 """
14 initialize an object interface to the ERS system test
15 """
16 ERS.__init__(self, case)
17
18 self._fail_sentinel = os.path.join(case.get_value("RUNDIR"), "FAIL_SENTINEL")
19 self._fail_str = case.get_value("NODE_FAIL_REGEX")
20
21 def _restart_fake_phase(self):
22 # Swap out model.exe for one that emits node failures
23 rundir = self._case.get_value("RUNDIR")
24 exeroot = self._case.get_value("EXEROOT")
25 driver = self._case.get_value("COMP_INTERFACE")
26 if driver == "nuopc":
27 logname = "med"
28 else:
29 logname = "cpl"
30 fake_exe = \
31 """#!/bin/bash
32
33 fail_sentinel={0}
34 cpl_log={1}/{4}.log.$LID
35 model_log={1}/{2}.log.$LID
36 touch $cpl_log
37 touch $fail_sentinel
38 declare -i num_fails=$(cat $fail_sentinel | wc -l)
39 declare -i times_to_fail=${{NODEFAIL_NUM_FAILS:-3}}
40
41 if ((num_fails < times_to_fail)); then
42 echo FAKE FAIL >> $cpl_log
43 echo FAIL >> $fail_sentinel
44 echo '{3}' >> $model_log
45 sleep 1
46 exit -1
47 else
48 echo Insta pass
49 echo SUCCESSFUL TERMINATION > $cpl_log
50 fi
51 """.format(self._fail_sentinel, rundir, get_model(), self._fail_str, logname)
52
53 fake_exe_file = os.path.join(exeroot, "fake.sh")
54 with open(fake_exe_file, "w") as fd:
55 fd.write(fake_exe)
56
57 os.chmod(fake_exe_file, 0o755)
58
59 prev_run_exe = self._case.get_value("run_exe")
60 env_mach_specific = self._case.get_env("mach_specific")
61 env_mach_specific.set_value("run_exe", fake_exe_file)
62 self._case.flush(flushall=True)
63
64 # This flag is needed by mpt to run a script under mpiexec
65 mpilib = self._case.get_value("MPILIB")
66 if mpilib == "mpt":
67 os.environ["MPI_SHEPHERD"] = "true"
68
69 self.run_indv(suffix=None)
70
71 if mpilib == "mpt":
72 del os.environ["MPI_SHEPHERD"]
73
74 env_mach_specific = self._case.get_env("mach_specific")
75 env_mach_specific.set_value("run_exe", prev_run_exe)
76 self._case.flush(flushall=True)
77
78 def run_phase(self):
79 self._ers_first_phase()
80 self._restart_fake_phase()
81 self._ers_second_phase()
82
[end of scripts/lib/CIME/SystemTests/nodefail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/SystemTests/nodefail.py b/scripts/lib/CIME/SystemTests/nodefail.py
--- a/scripts/lib/CIME/SystemTests/nodefail.py
+++ b/scripts/lib/CIME/SystemTests/nodefail.py
@@ -24,7 +24,7 @@
exeroot = self._case.get_value("EXEROOT")
driver = self._case.get_value("COMP_INTERFACE")
if driver == "nuopc":
- logname = "med"
+ logname = "drv"
else:
logname = "cpl"
fake_exe = \
| {"golden_diff": "diff --git a/scripts/lib/CIME/SystemTests/nodefail.py b/scripts/lib/CIME/SystemTests/nodefail.py\n--- a/scripts/lib/CIME/SystemTests/nodefail.py\n+++ b/scripts/lib/CIME/SystemTests/nodefail.py\n@@ -24,7 +24,7 @@\n exeroot = self._case.get_value(\"EXEROOT\")\n driver = self._case.get_value(\"COMP_INTERFACE\")\n if driver == \"nuopc\":\n- logname = \"med\"\n+ logname = \"drv\"\n else:\n logname = \"cpl\"\n fake_exe = \\\n", "issue": "nuopc run complete message location \nIn commit b9d7b65fd case_run.py was changed to look for the run completed message in drv.log instead of in med.log\r\nfor nuopc runs, however the system_tests_common.py was not changed to correspond. PR incoming.\r\n@mvertens @uturuncoglu \n", "before_files": [{"content": "\"\"\"\nCIME restart upon failed node test.\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.SystemTests.ers import ERS\nfrom CIME.utils import get_model\n\nlogger = logging.getLogger(__name__)\n\nclass NODEFAIL(ERS):\n\n def __init__(self, case):\n \"\"\"\n initialize an object interface to the ERS system test\n \"\"\"\n ERS.__init__(self, case)\n\n self._fail_sentinel = os.path.join(case.get_value(\"RUNDIR\"), \"FAIL_SENTINEL\")\n self._fail_str = case.get_value(\"NODE_FAIL_REGEX\")\n\n def _restart_fake_phase(self):\n # Swap out model.exe for one that emits node failures\n rundir = self._case.get_value(\"RUNDIR\")\n exeroot = self._case.get_value(\"EXEROOT\")\n driver = self._case.get_value(\"COMP_INTERFACE\")\n if driver == \"nuopc\":\n logname = \"med\"\n else:\n logname = \"cpl\"\n fake_exe = \\\n\"\"\"#!/bin/bash\n\nfail_sentinel={0}\ncpl_log={1}/{4}.log.$LID\nmodel_log={1}/{2}.log.$LID\ntouch $cpl_log\ntouch $fail_sentinel\ndeclare -i num_fails=$(cat $fail_sentinel | wc -l)\ndeclare -i times_to_fail=${{NODEFAIL_NUM_FAILS:-3}}\n\nif ((num_fails < times_to_fail)); then\n echo FAKE FAIL >> $cpl_log\n echo FAIL >> $fail_sentinel\n echo '{3}' >> $model_log\n sleep 1\n exit -1\nelse\n echo Insta pass\n echo SUCCESSFUL TERMINATION > $cpl_log\nfi\n\"\"\".format(self._fail_sentinel, rundir, get_model(), self._fail_str, logname)\n\n fake_exe_file = os.path.join(exeroot, \"fake.sh\")\n with open(fake_exe_file, \"w\") as fd:\n fd.write(fake_exe)\n\n os.chmod(fake_exe_file, 0o755)\n\n prev_run_exe = self._case.get_value(\"run_exe\")\n env_mach_specific = self._case.get_env(\"mach_specific\")\n env_mach_specific.set_value(\"run_exe\", fake_exe_file)\n self._case.flush(flushall=True)\n\n # This flag is needed by mpt to run a script under mpiexec\n mpilib = self._case.get_value(\"MPILIB\")\n if mpilib == \"mpt\":\n os.environ[\"MPI_SHEPHERD\"] = \"true\"\n\n self.run_indv(suffix=None)\n\n if mpilib == \"mpt\":\n del os.environ[\"MPI_SHEPHERD\"]\n\n env_mach_specific = self._case.get_env(\"mach_specific\")\n env_mach_specific.set_value(\"run_exe\", prev_run_exe)\n self._case.flush(flushall=True)\n\n def run_phase(self):\n self._ers_first_phase()\n self._restart_fake_phase()\n self._ers_second_phase()\n", "path": "scripts/lib/CIME/SystemTests/nodefail.py"}]} | 1,431 | 128 |
gh_patches_debug_40801 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1767 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2529 error with multiple `AWS::Logs::SubscriptionFilter` resources.
*cfn-lint version: 0.38.0*
*[`E2529`](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/rules.md#E2529) error with multiple [`AWS::Logs::SubscriptionFilter`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html) resources.*
I have a CloudFormation template (using AWS SAM) that has multiple SubscriptionFilters. These SubscriptionFilters have the same `LogGroupName`, which causes an `E2529` error despite these filters having separate FilterPatterns. The template passes the [`aws cloudformation validate-template`](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/validate-template.html) command.
[`src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py`](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py)
```
MainFunctionLogGroup:
Type: AWS::Logs::LogGroup
Properties:
RetentionInDays: 14
LogGroupName: !Join ["", [/aws/lambda/, !Ref MainFunction]]
MainFunctionLogFilter:
Type: AWS::Logs::SubscriptionFilter
Properties:
DestinationArn: !Ref LogIngestionARN
FilterPattern: "FilterPattern1"
LogGroupName: !Ref MainFunctionLogGroup
SecondaryLogFilter:
Type: AWS::Logs::SubscriptionFilter
Properties:
DestinationArn: !Ref LogIngestionARN
FilterPattern: "FilterPattern2"
LogGroupName: !Ref MainFunctionLogGroup
```
</issue>
<code>
[start of src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule
6 from cfnlint.rules import RuleMatch
7
8
9 class EventsLogGroupName(CloudFormationLintRule):
10 """Check if the settings of multiple subscriptions are included for one LogGroup"""
11 id = 'E2529'
12 shortdesc = 'Check for duplicate Lambda events'
13 description = 'Check if there are any duplicate log groups in the Lambda event trigger element.'
14 source_url = 'https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#user-content-cloudwatchlogs'
15 tags = ['resources', 'lambda']
16
17 def check_events_subscription_duplicated(self, cfn):
18 """Check if Lambda Events Subscription is duplicated"""
19 matches = []
20 message = 'You must specify the AWS::Serverless::Function event correctly. ' \
21 'LogGroups are duplicated. '
22
23 log_group_name_list = self.__get_log_group_name_list(cfn)
24
25 if self.__is_duplicated(log_group_name_list):
26 matches.append(
27 RuleMatch(
28 'path', message.format()
29 )
30 )
31
32 return matches
33
34 def __is_duplicated(self, duplicate_list):
35 unique_list = self.__remove(duplicate_list)
36 return len(unique_list) != len(duplicate_list)
37
38 def __remove(self, duplicate):
39 final_list = []
40 for ele in duplicate:
41 if ele not in final_list:
42 final_list.append(ele)
43 return final_list
44
45 def __get_log_group_name_list(self, cfn):
46 log_group_name_list = []
47 for value in cfn.get_resources('AWS::Logs::SubscriptionFilter').items():
48 prop = value[1].get('Properties')
49 log_group_name_list.append(prop.get('LogGroupName'))
50 return log_group_name_list
51
52 def match(self, cfn):
53 """Check if Lambda Events Subscription is duplicated"""
54 matches = []
55 matches.extend(
56 self.check_events_subscription_duplicated(cfn)
57 )
58 return matches
59
[end of src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py b/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py
--- a/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py
+++ b/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py
@@ -2,6 +2,7 @@
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
+import json
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
@@ -9,45 +10,41 @@
class EventsLogGroupName(CloudFormationLintRule):
"""Check if the settings of multiple subscriptions are included for one LogGroup"""
id = 'E2529'
- shortdesc = 'Check for duplicate Lambda events'
- description = 'Check if there are any duplicate log groups in the Lambda event trigger element.'
+ shortdesc = 'Check for SubscriptionFilters have beyond 2 attachments to a CloudWatch Log Group'
+ description = 'The current limit for a CloudWatch Log Group is they can have 2 subscription filters. ' \
+ 'We will look for duplicate LogGroupNames inside Subscription Filters and make sure they are within 2. ' \
+ 'This doesn\'t account for any other subscription filters getting set.'
source_url = 'https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#user-content-cloudwatchlogs'
tags = ['resources', 'lambda']
+ limit = 2
def check_events_subscription_duplicated(self, cfn):
"""Check if Lambda Events Subscription is duplicated"""
matches = []
- message = 'You must specify the AWS::Serverless::Function event correctly. ' \
- 'LogGroups are duplicated. '
-
- log_group_name_list = self.__get_log_group_name_list(cfn)
-
- if self.__is_duplicated(log_group_name_list):
- matches.append(
- RuleMatch(
- 'path', message.format()
+ message = 'You can only have {} Subscription Filters per CloudWatch Log Group'.format(self.limit)
+
+ log_group_paths = self.__get_log_group_name_list(cfn)
+ for _, c in log_group_paths.items():
+ if len(c) > self.limit:
+ matches.append(
+ RuleMatch(
+ ['Resources', c[2]], message.format()
+ )
)
- )
return matches
- def __is_duplicated(self, duplicate_list):
- unique_list = self.__remove(duplicate_list)
- return len(unique_list) != len(duplicate_list)
-
- def __remove(self, duplicate):
- final_list = []
- for ele in duplicate:
- if ele not in final_list:
- final_list.append(ele)
- return final_list
-
def __get_log_group_name_list(self, cfn):
- log_group_name_list = []
+ log_group_paths = {}
for value in cfn.get_resources('AWS::Logs::SubscriptionFilter').items():
prop = value[1].get('Properties')
- log_group_name_list.append(prop.get('LogGroupName'))
- return log_group_name_list
+ log_group_name = json.dumps(prop.get('LogGroupName'))
+
+ if log_group_name not in log_group_paths:
+ log_group_paths[log_group_name] = []
+
+ log_group_paths[log_group_name].append(value[0])
+ return log_group_paths
def match(self, cfn):
"""Check if Lambda Events Subscription is duplicated"""
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py b/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py\n--- a/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py\n+++ b/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py\n@@ -2,6 +2,7 @@\n Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n SPDX-License-Identifier: MIT-0\n \"\"\"\n+import json\n from cfnlint.rules import CloudFormationLintRule\n from cfnlint.rules import RuleMatch\n \n@@ -9,45 +10,41 @@\n class EventsLogGroupName(CloudFormationLintRule):\n \"\"\"Check if the settings of multiple subscriptions are included for one LogGroup\"\"\"\n id = 'E2529'\n- shortdesc = 'Check for duplicate Lambda events'\n- description = 'Check if there are any duplicate log groups in the Lambda event trigger element.'\n+ shortdesc = 'Check for SubscriptionFilters have beyond 2 attachments to a CloudWatch Log Group'\n+ description = 'The current limit for a CloudWatch Log Group is they can have 2 subscription filters. ' \\\n+ 'We will look for duplicate LogGroupNames inside Subscription Filters and make sure they are within 2. ' \\\n+ 'This doesn\\'t account for any other subscription filters getting set.'\n source_url = 'https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#user-content-cloudwatchlogs'\n tags = ['resources', 'lambda']\n+ limit = 2\n \n def check_events_subscription_duplicated(self, cfn):\n \"\"\"Check if Lambda Events Subscription is duplicated\"\"\"\n matches = []\n- message = 'You must specify the AWS::Serverless::Function event correctly. ' \\\n- 'LogGroups are duplicated. '\n-\n- log_group_name_list = self.__get_log_group_name_list(cfn)\n-\n- if self.__is_duplicated(log_group_name_list):\n- matches.append(\n- RuleMatch(\n- 'path', message.format()\n+ message = 'You can only have {} Subscription Filters per CloudWatch Log Group'.format(self.limit)\n+\n+ log_group_paths = self.__get_log_group_name_list(cfn)\n+ for _, c in log_group_paths.items():\n+ if len(c) > self.limit:\n+ matches.append(\n+ RuleMatch(\n+ ['Resources', c[2]], message.format()\n+ )\n )\n- )\n \n return matches\n \n- def __is_duplicated(self, duplicate_list):\n- unique_list = self.__remove(duplicate_list)\n- return len(unique_list) != len(duplicate_list)\n-\n- def __remove(self, duplicate):\n- final_list = []\n- for ele in duplicate:\n- if ele not in final_list:\n- final_list.append(ele)\n- return final_list\n-\n def __get_log_group_name_list(self, cfn):\n- log_group_name_list = []\n+ log_group_paths = {}\n for value in cfn.get_resources('AWS::Logs::SubscriptionFilter').items():\n prop = value[1].get('Properties')\n- log_group_name_list.append(prop.get('LogGroupName'))\n- return log_group_name_list\n+ log_group_name = json.dumps(prop.get('LogGroupName'))\n+\n+ if log_group_name not in log_group_paths:\n+ log_group_paths[log_group_name] = []\n+\n+ log_group_paths[log_group_name].append(value[0])\n+ return log_group_paths\n \n def match(self, cfn):\n \"\"\"Check if Lambda Events Subscription is duplicated\"\"\"\n", "issue": "E2529 error with multiple `AWS::Logs::SubscriptionFilter` resources.\n*cfn-lint version: 0.38.0*\r\n\r\n*[`E2529`](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/rules.md#E2529) error with multiple [`AWS::Logs::SubscriptionFilter`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-subscriptionfilter.html) resources.*\r\n\r\nI have a CloudFormation template (using AWS SAM) that has multiple SubscriptionFilters. These SubscriptionFilters have the same `LogGroupName`, which causes an `E2529` error despite these filters having separate FilterPatterns. The template passes the [`aws cloudformation validate-template`](https://docs.aws.amazon.com/cli/latest/reference/cloudformation/validate-template.html) command.\r\n\r\n[`src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py`](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py)\r\n\r\n```\r\n MainFunctionLogGroup:\r\n Type: AWS::Logs::LogGroup\r\n Properties:\r\n RetentionInDays: 14\r\n LogGroupName: !Join [\"\", [/aws/lambda/, !Ref MainFunction]]\r\n\r\n MainFunctionLogFilter:\r\n Type: AWS::Logs::SubscriptionFilter\r\n Properties:\r\n DestinationArn: !Ref LogIngestionARN\r\n FilterPattern: \"FilterPattern1\"\r\n LogGroupName: !Ref MainFunctionLogGroup\r\n\r\n SecondaryLogFilter:\r\n Type: AWS::Logs::SubscriptionFilter\r\n Properties:\r\n DestinationArn: !Ref LogIngestionARN\r\n FilterPattern: \"FilterPattern2\"\r\n LogGroupName: !Ref MainFunctionLogGroup\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass EventsLogGroupName(CloudFormationLintRule):\n \"\"\"Check if the settings of multiple subscriptions are included for one LogGroup\"\"\"\n id = 'E2529'\n shortdesc = 'Check for duplicate Lambda events'\n description = 'Check if there are any duplicate log groups in the Lambda event trigger element.'\n source_url = 'https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#user-content-cloudwatchlogs'\n tags = ['resources', 'lambda']\n\n def check_events_subscription_duplicated(self, cfn):\n \"\"\"Check if Lambda Events Subscription is duplicated\"\"\"\n matches = []\n message = 'You must specify the AWS::Serverless::Function event correctly. ' \\\n 'LogGroups are duplicated. '\n\n log_group_name_list = self.__get_log_group_name_list(cfn)\n\n if self.__is_duplicated(log_group_name_list):\n matches.append(\n RuleMatch(\n 'path', message.format()\n )\n )\n\n return matches\n\n def __is_duplicated(self, duplicate_list):\n unique_list = self.__remove(duplicate_list)\n return len(unique_list) != len(duplicate_list)\n\n def __remove(self, duplicate):\n final_list = []\n for ele in duplicate:\n if ele not in final_list:\n final_list.append(ele)\n return final_list\n\n def __get_log_group_name_list(self, cfn):\n log_group_name_list = []\n for value in cfn.get_resources('AWS::Logs::SubscriptionFilter').items():\n prop = value[1].get('Properties')\n log_group_name_list.append(prop.get('LogGroupName'))\n return log_group_name_list\n\n def match(self, cfn):\n \"\"\"Check if Lambda Events Subscription is duplicated\"\"\"\n matches = []\n matches.extend(\n self.check_events_subscription_duplicated(cfn)\n )\n return matches\n", "path": "src/cfnlint/rules/resources/lmbd/EventsLogGroupName.py"}]} | 1,492 | 792 |
gh_patches_debug_2259 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Philips Remote DIM_DOWN typo?
https://github.com/dmulcahey/zha-device-handlers/blob/833ee24710496d317a03b0f0b9f61df31291d75b/zhaquirks/philips/rwl021.py#L137
It seems that it should be:
`ARGS: [1, 30, 9],`
</issue>
<code>
[start of zhaquirks/philips/rwl021.py]
1 """Phillips RWL021 device."""
2 from zigpy.profiles import zha, zll
3 from zigpy.quirks import CustomCluster, CustomDevice
4 import zigpy.types as t
5 from zigpy.zcl.clusters.general import (
6 Basic,
7 BinaryInput,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 PowerConfiguration,
14 Scenes,
15 )
16
17 from ..const import (
18 ARGS,
19 CLUSTER_ID,
20 COMMAND,
21 COMMAND_OFF_WITH_EFFECT,
22 COMMAND_ON,
23 COMMAND_STEP,
24 DEVICE_TYPE,
25 DIM_DOWN,
26 DIM_UP,
27 ENDPOINT_ID,
28 ENDPOINTS,
29 INPUT_CLUSTERS,
30 LONG_PRESS,
31 OUTPUT_CLUSTERS,
32 PROFILE_ID,
33 SHORT_PRESS,
34 TURN_OFF,
35 TURN_ON,
36 )
37
38 DIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821
39
40
41 class BasicCluster(CustomCluster, Basic):
42 """Centralite acceleration cluster."""
43
44 def __init__(self, *args, **kwargs):
45 """Init."""
46 super().__init__(*args, **kwargs)
47 self.attributes = super().attributes.copy()
48 self.attributes.update({0x0031: ("phillips", t.bitmap16)})
49
50
51 class PhilipsRWL021(CustomDevice):
52 """Phillips RWL021 device."""
53
54 signature = {
55 # <SimpleDescriptor endpoint=1 profile=49246 device_type=2096
56 # device_version=2
57 # input_clusters=[0]
58 # output_clusters=[0, 3, 4, 6, 8, 5]>
59 ENDPOINTS: {
60 1: {
61 PROFILE_ID: zll.PROFILE_ID,
62 DEVICE_TYPE: zll.DeviceType.SCENE_CONTROLLER,
63 INPUT_CLUSTERS: [Basic.cluster_id],
64 OUTPUT_CLUSTERS: [
65 Basic.cluster_id,
66 Identify.cluster_id,
67 Groups.cluster_id,
68 OnOff.cluster_id,
69 LevelControl.cluster_id,
70 Scenes.cluster_id,
71 ],
72 },
73 # <SimpleDescriptor endpoint=2 profile=260 device_type=12
74 # device_version=0
75 # input_clusters=[0, 1, 3, 15, 64512]
76 # output_clusters=[25]>
77 2: {
78 PROFILE_ID: zha.PROFILE_ID,
79 DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,
80 INPUT_CLUSTERS: [
81 Basic.cluster_id,
82 PowerConfiguration.cluster_id,
83 Identify.cluster_id,
84 BinaryInput.cluster_id,
85 64512,
86 ],
87 OUTPUT_CLUSTERS: [Ota.cluster_id],
88 },
89 }
90 }
91
92 replacement = {
93 ENDPOINTS: {
94 1: {
95 INPUT_CLUSTERS: [Basic.cluster_id],
96 OUTPUT_CLUSTERS: [
97 Basic.cluster_id,
98 Identify.cluster_id,
99 Groups.cluster_id,
100 OnOff.cluster_id,
101 LevelControl.cluster_id,
102 Scenes.cluster_id,
103 ],
104 },
105 2: {
106 INPUT_CLUSTERS: [
107 BasicCluster,
108 PowerConfiguration.cluster_id,
109 Identify.cluster_id,
110 BinaryInput.cluster_id,
111 64512,
112 ],
113 OUTPUT_CLUSTERS: [Ota.cluster_id],
114 },
115 }
116 }
117
118 device_automation_triggers = {
119 (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON},
120 (LONG_PRESS, TURN_OFF): {COMMAND: COMMAND_OFF_WITH_EFFECT},
121 (SHORT_PRESS, DIM_UP): {
122 COMMAND: COMMAND_STEP,
123 CLUSTER_ID: 8,
124 ENDPOINT_ID: 1,
125 ARGS: [0, 30, 9],
126 },
127 (LONG_PRESS, DIM_UP): {
128 COMMAND: COMMAND_STEP,
129 CLUSTER_ID: 8,
130 ENDPOINT_ID: 1,
131 ARGS: [0, 56, 9],
132 },
133 (SHORT_PRESS, DIM_DOWN): {
134 COMMAND: COMMAND_STEP,
135 CLUSTER_ID: 8,
136 ENDPOINT_ID: 1,
137 ARGS: [1, 56, 9],
138 },
139 (LONG_PRESS, DIM_DOWN): {
140 COMMAND: COMMAND_STEP,
141 CLUSTER_ID: 8,
142 ENDPOINT_ID: 1,
143 ARGS: [1, 56, 9],
144 },
145 }
146
[end of zhaquirks/philips/rwl021.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/philips/rwl021.py b/zhaquirks/philips/rwl021.py
--- a/zhaquirks/philips/rwl021.py
+++ b/zhaquirks/philips/rwl021.py
@@ -134,7 +134,7 @@
COMMAND: COMMAND_STEP,
CLUSTER_ID: 8,
ENDPOINT_ID: 1,
- ARGS: [1, 56, 9],
+ ARGS: [1, 30, 9],
},
(LONG_PRESS, DIM_DOWN): {
COMMAND: COMMAND_STEP,
| {"golden_diff": "diff --git a/zhaquirks/philips/rwl021.py b/zhaquirks/philips/rwl021.py\n--- a/zhaquirks/philips/rwl021.py\n+++ b/zhaquirks/philips/rwl021.py\n@@ -134,7 +134,7 @@\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n- ARGS: [1, 56, 9],\n+ ARGS: [1, 30, 9],\n },\n (LONG_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n", "issue": "Philips Remote DIM_DOWN typo?\nhttps://github.com/dmulcahey/zha-device-handlers/blob/833ee24710496d317a03b0f0b9f61df31291d75b/zhaquirks/philips/rwl021.py#L137\r\n\r\nIt seems that it should be:\r\n`ARGS: [1, 30, 9],`\n", "before_files": [{"content": "\"\"\"Phillips RWL021 device.\"\"\"\nfrom zigpy.profiles import zha, zll\nfrom zigpy.quirks import CustomCluster, CustomDevice\nimport zigpy.types as t\nfrom zigpy.zcl.clusters.general import (\n Basic,\n BinaryInput,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PowerConfiguration,\n Scenes,\n)\n\nfrom ..const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_OFF_WITH_EFFECT,\n COMMAND_ON,\n COMMAND_STEP,\n DEVICE_TYPE,\n DIM_DOWN,\n DIM_UP,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_OFF,\n TURN_ON,\n)\n\nDIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821\n\n\nclass BasicCluster(CustomCluster, Basic):\n \"\"\"Centralite acceleration cluster.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init.\"\"\"\n super().__init__(*args, **kwargs)\n self.attributes = super().attributes.copy()\n self.attributes.update({0x0031: (\"phillips\", t.bitmap16)})\n\n\nclass PhilipsRWL021(CustomDevice):\n \"\"\"Phillips RWL021 device.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=49246 device_type=2096\n # device_version=2\n # input_clusters=[0]\n # output_clusters=[0, 3, 4, 6, 8, 5]>\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zll.PROFILE_ID,\n DEVICE_TYPE: zll.DeviceType.SCENE_CONTROLLER,\n INPUT_CLUSTERS: [Basic.cluster_id],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n ],\n },\n # <SimpleDescriptor endpoint=2 profile=260 device_type=12\n # device_version=0\n # input_clusters=[0, 1, 3, 15, 64512]\n # output_clusters=[25]>\n 2: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n BinaryInput.cluster_id,\n 64512,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n }\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n INPUT_CLUSTERS: [Basic.cluster_id],\n OUTPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Scenes.cluster_id,\n ],\n },\n 2: {\n INPUT_CLUSTERS: [\n BasicCluster,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n BinaryInput.cluster_id,\n 64512,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON},\n (LONG_PRESS, TURN_OFF): {COMMAND: COMMAND_OFF_WITH_EFFECT},\n (SHORT_PRESS, DIM_UP): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 30, 9],\n },\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 56, 9],\n },\n (SHORT_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [1, 56, 9],\n },\n (LONG_PRESS, DIM_DOWN): {\n COMMAND: COMMAND_STEP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [1, 56, 9],\n },\n }\n", "path": "zhaquirks/philips/rwl021.py"}]} | 1,934 | 146 |
gh_patches_debug_17277 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create packages, automate releases
Create new `opentelemetry-` packages for the API and SDK, and consider doing the same for all OC contrib packages.
Configure CI to build and release these packages when we tag a new release.
See #6 for CI bootstrapping.
</issue>
<code>
[start of opentelemetry-api/setup.py]
1 # Copyright 2019, OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 import setuptools
18
19 BASE_DIR = os.path.dirname(__file__)
20 VERSION_FILENAME = os.path.join(
21 BASE_DIR, "src", "opentelemetry", "util", "version.py"
22 )
23 PACKAGE_INFO = {}
24 with open(VERSION_FILENAME) as f:
25 exec(f.read(), PACKAGE_INFO)
26
27 setuptools.setup(
28 name="opentelemetry-api",
29 version=PACKAGE_INFO["__version__"],
30 author="OpenTelemetry Authors",
31 author_email="[email protected]",
32 classifiers=[
33 "Development Status :: 3 - Alpha",
34 "Intended Audience :: Developers",
35 "License :: OSI Approved :: Apache Software License",
36 "Programming Language :: Python",
37 "Programming Language :: Python :: 3",
38 "Programming Language :: Python :: 3.4",
39 "Programming Language :: Python :: 3.5",
40 "Programming Language :: Python :: 3.6",
41 "Programming Language :: Python :: 3.7",
42 ],
43 description="OpenTelemetry Python API",
44 include_package_data=True,
45 long_description=open("README.rst").read(),
46 install_requires=["typing; python_version<'3.5'"],
47 extras_require={},
48 license="Apache-2.0",
49 package_dir={"": "src"},
50 packages=setuptools.find_namespace_packages(
51 where="src", include="opentelemetry.*"
52 ),
53 url=(
54 "https://github.com/open-telemetry/opentelemetry-python"
55 "/tree/master/opentelemetry-api"
56 ),
57 zip_safe=False,
58 )
59
[end of opentelemetry-api/setup.py]
[start of opentelemetry-sdk/setup.py]
1 # Copyright 2019, OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 import setuptools
18
19 BASE_DIR = os.path.dirname(__file__)
20 VERSION_FILENAME = os.path.join(
21 BASE_DIR, "src", "opentelemetry", "sdk", "version.py"
22 )
23 PACKAGE_INFO = {}
24 with open(VERSION_FILENAME) as f:
25 exec(f.read(), PACKAGE_INFO)
26
27 setuptools.setup(
28 name="opentelemetry-sdk",
29 version=PACKAGE_INFO["__version__"],
30 author="OpenTelemetry Authors",
31 author_email="[email protected]",
32 classifiers=[
33 "Development Status :: 3 - Alpha",
34 "Intended Audience :: Developers",
35 "License :: OSI Approved :: Apache Software License",
36 "Programming Language :: Python",
37 "Programming Language :: Python :: 3",
38 "Programming Language :: Python :: 3.4",
39 "Programming Language :: Python :: 3.5",
40 "Programming Language :: Python :: 3.6",
41 "Programming Language :: Python :: 3.7",
42 ],
43 description="OpenTelemetry Python SDK",
44 include_package_data=True,
45 long_description=open("README.rst").read(),
46 install_requires=["opentelemetry-api==0.1.dev0"],
47 extras_require={},
48 license="Apache-2.0",
49 package_dir={"": "src"},
50 packages=setuptools.find_namespace_packages(
51 where="src", include="opentelemetry.sdk.*"
52 ),
53 url=(
54 "https://github.com/open-telemetry/opentelemetry-python"
55 "/tree/master/opentelemetry-sdk"
56 ),
57 zip_safe=False,
58 )
59
[end of opentelemetry-sdk/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/setup.py b/opentelemetry-api/setup.py
--- a/opentelemetry-api/setup.py
+++ b/opentelemetry-api/setup.py
@@ -43,6 +43,7 @@
description="OpenTelemetry Python API",
include_package_data=True,
long_description=open("README.rst").read(),
+ long_description_content_type="text/x-rst",
install_requires=["typing; python_version<'3.5'"],
extras_require={},
license="Apache-2.0",
diff --git a/opentelemetry-sdk/setup.py b/opentelemetry-sdk/setup.py
--- a/opentelemetry-sdk/setup.py
+++ b/opentelemetry-sdk/setup.py
@@ -43,6 +43,7 @@
description="OpenTelemetry Python SDK",
include_package_data=True,
long_description=open("README.rst").read(),
+ long_description_content_type="text/x-rst",
install_requires=["opentelemetry-api==0.1.dev0"],
extras_require={},
license="Apache-2.0",
| {"golden_diff": "diff --git a/opentelemetry-api/setup.py b/opentelemetry-api/setup.py\n--- a/opentelemetry-api/setup.py\n+++ b/opentelemetry-api/setup.py\n@@ -43,6 +43,7 @@\n description=\"OpenTelemetry Python API\",\n include_package_data=True,\n long_description=open(\"README.rst\").read(),\n+ long_description_content_type=\"text/x-rst\",\n install_requires=[\"typing; python_version<'3.5'\"],\n extras_require={},\n license=\"Apache-2.0\",\ndiff --git a/opentelemetry-sdk/setup.py b/opentelemetry-sdk/setup.py\n--- a/opentelemetry-sdk/setup.py\n+++ b/opentelemetry-sdk/setup.py\n@@ -43,6 +43,7 @@\n description=\"OpenTelemetry Python SDK\",\n include_package_data=True,\n long_description=open(\"README.rst\").read(),\n+ long_description_content_type=\"text/x-rst\",\n install_requires=[\"opentelemetry-api==0.1.dev0\"],\n extras_require={},\n license=\"Apache-2.0\",\n", "issue": "Create packages, automate releases\nCreate new `opentelemetry-` packages for the API and SDK, and consider doing the same for all OC contrib packages.\r\n\r\nConfigure CI to build and release these packages when we tag a new release.\r\n\r\nSee #6 for CI bootstrapping.\n", "before_files": [{"content": "# Copyright 2019, OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport setuptools\n\nBASE_DIR = os.path.dirname(__file__)\nVERSION_FILENAME = os.path.join(\n BASE_DIR, \"src\", \"opentelemetry\", \"util\", \"version.py\"\n)\nPACKAGE_INFO = {}\nwith open(VERSION_FILENAME) as f:\n exec(f.read(), PACKAGE_INFO)\n\nsetuptools.setup(\n name=\"opentelemetry-api\",\n version=PACKAGE_INFO[\"__version__\"],\n author=\"OpenTelemetry Authors\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n description=\"OpenTelemetry Python API\",\n include_package_data=True,\n long_description=open(\"README.rst\").read(),\n install_requires=[\"typing; python_version<'3.5'\"],\n extras_require={},\n license=\"Apache-2.0\",\n package_dir={\"\": \"src\"},\n packages=setuptools.find_namespace_packages(\n where=\"src\", include=\"opentelemetry.*\"\n ),\n url=(\n \"https://github.com/open-telemetry/opentelemetry-python\"\n \"/tree/master/opentelemetry-api\"\n ),\n zip_safe=False,\n)\n", "path": "opentelemetry-api/setup.py"}, {"content": "# Copyright 2019, OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport setuptools\n\nBASE_DIR = os.path.dirname(__file__)\nVERSION_FILENAME = os.path.join(\n BASE_DIR, \"src\", \"opentelemetry\", \"sdk\", \"version.py\"\n)\nPACKAGE_INFO = {}\nwith open(VERSION_FILENAME) as f:\n exec(f.read(), PACKAGE_INFO)\n\nsetuptools.setup(\n name=\"opentelemetry-sdk\",\n version=PACKAGE_INFO[\"__version__\"],\n author=\"OpenTelemetry Authors\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n description=\"OpenTelemetry Python SDK\",\n include_package_data=True,\n long_description=open(\"README.rst\").read(),\n install_requires=[\"opentelemetry-api==0.1.dev0\"],\n extras_require={},\n license=\"Apache-2.0\",\n package_dir={\"\": \"src\"},\n packages=setuptools.find_namespace_packages(\n where=\"src\", include=\"opentelemetry.sdk.*\"\n ),\n url=(\n \"https://github.com/open-telemetry/opentelemetry-python\"\n \"/tree/master/opentelemetry-sdk\"\n ),\n zip_safe=False,\n)\n", "path": "opentelemetry-sdk/setup.py"}]} | 1,763 | 230 |
gh_patches_debug_7031 | rasdani/github-patches | git_diff | facebookresearch__hydra-1961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CI] hydra_nevergrad_sweeper tests are failing on main branch.
example failure https://app.circleci.com/pipelines/github/facebookresearch/hydra/11235/workflows/273a5296-bebf-4808-8e68-14b9889b63a5/jobs/102829
</issue>
<code>
[start of plugins/hydra_nevergrad_sweeper/setup.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from pathlib import Path
4
5 from read_version import read_version
6 from setuptools import find_namespace_packages, setup
7
8 setup(
9 name="hydra-nevergrad-sweeper",
10 version=read_version("hydra_plugins/hydra_nevergrad_sweeper", "__init__.py"),
11 author="Jeremy Rapin, Omry Yadan, Jieru Hu",
12 author_email="[email protected], [email protected], [email protected]",
13 description="Hydra Nevergrad Sweeper plugin",
14 long_description=(Path(__file__).parent / "README.md").read_text(),
15 long_description_content_type="text/markdown",
16 url="https://github.com/facebookresearch/hydra/",
17 packages=find_namespace_packages(include=["hydra_plugins.*"]),
18 classifiers=[
19 "License :: OSI Approved :: MIT License",
20 "Programming Language :: Python :: 3.6",
21 "Programming Language :: Python :: 3.7",
22 "Programming Language :: Python :: 3.8",
23 "Programming Language :: Python :: 3.9",
24 "Operating System :: OS Independent",
25 "Development Status :: 4 - Beta",
26 ],
27 install_requires=[
28 "hydra-core>=1.1.0.dev7",
29 "nevergrad>=0.4.3.post2,<0.4.3.post7", # https://github.com/facebookresearch/hydra/issues/1768
30 "cma==3.0.3", # https://github.com/facebookresearch/hydra/issues/1684
31 "numpy<1.20.0", # remove once nevergrad is upgraded to support numpy 1.20
32 ],
33 include_package_data=True,
34 )
35
[end of plugins/hydra_nevergrad_sweeper/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py
--- a/plugins/hydra_nevergrad_sweeper/setup.py
+++ b/plugins/hydra_nevergrad_sweeper/setup.py
@@ -26,7 +26,7 @@
],
install_requires=[
"hydra-core>=1.1.0.dev7",
- "nevergrad>=0.4.3.post2,<0.4.3.post7", # https://github.com/facebookresearch/hydra/issues/1768
+ "nevergrad>=0.4.3.post9",
"cma==3.0.3", # https://github.com/facebookresearch/hydra/issues/1684
"numpy<1.20.0", # remove once nevergrad is upgraded to support numpy 1.20
],
| {"golden_diff": "diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py\n--- a/plugins/hydra_nevergrad_sweeper/setup.py\n+++ b/plugins/hydra_nevergrad_sweeper/setup.py\n@@ -26,7 +26,7 @@\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n- \"nevergrad>=0.4.3.post2,<0.4.3.post7\", # https://github.com/facebookresearch/hydra/issues/1768\n+ \"nevergrad>=0.4.3.post9\",\n \"cma==3.0.3\", # https://github.com/facebookresearch/hydra/issues/1684\n \"numpy<1.20.0\", # remove once nevergrad is upgraded to support numpy 1.20\n ],\n", "issue": "[CI] hydra_nevergrad_sweeper tests are failing on main branch.\nexample failure https://app.circleci.com/pipelines/github/facebookresearch/hydra/11235/workflows/273a5296-bebf-4808-8e68-14b9889b63a5/jobs/102829\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-nevergrad-sweeper\",\n version=read_version(\"hydra_plugins/hydra_nevergrad_sweeper\", \"__init__.py\"),\n author=\"Jeremy Rapin, Omry Yadan, Jieru Hu\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Hydra Nevergrad Sweeper plugin\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.1.0.dev7\",\n \"nevergrad>=0.4.3.post2,<0.4.3.post7\", # https://github.com/facebookresearch/hydra/issues/1768\n \"cma==3.0.3\", # https://github.com/facebookresearch/hydra/issues/1684\n \"numpy<1.20.0\", # remove once nevergrad is upgraded to support numpy 1.20\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_nevergrad_sweeper/setup.py"}]} | 1,087 | 206 |
gh_patches_debug_11560 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4282 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Azure - Docs missing for VM Images
</issue>
<code>
[start of tools/c7n_azure/c7n_azure/resources/image.py]
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from c7n_azure.provider import resources
16 from c7n_azure.resources.arm import ArmResourceManager
17
18
19 @resources.register('image')
20 class Image(ArmResourceManager):
21 class resource_type(ArmResourceManager.resource_type):
22 service = 'azure.mgmt.compute'
23 client = 'ComputeManagementClient'
24 enum_spec = ('images', 'list', None)
25 default_report_fields = (
26 'name',
27 'location',
28 'resourceGroup',
29 )
30 resource_type = 'Microsoft.Compute/images'
31
[end of tools/c7n_azure/c7n_azure/resources/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/c7n_azure/c7n_azure/resources/image.py b/tools/c7n_azure/c7n_azure/resources/image.py
--- a/tools/c7n_azure/c7n_azure/resources/image.py
+++ b/tools/c7n_azure/c7n_azure/resources/image.py
@@ -18,6 +18,23 @@
@resources.register('image')
class Image(ArmResourceManager):
+ """Virtual Machine Image
+
+ :example:
+ Returns all virtual machine images named my-test-vm-image
+
+ .. code-block:: yaml
+
+ policies:
+ - name: get-vm-image
+ resource: azure.image
+ filters:
+ - type: value
+ key: name
+ op: eq
+ value: my-test-vm-image
+
+ """
class resource_type(ArmResourceManager.resource_type):
service = 'azure.mgmt.compute'
client = 'ComputeManagementClient'
| {"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/resources/image.py b/tools/c7n_azure/c7n_azure/resources/image.py\n--- a/tools/c7n_azure/c7n_azure/resources/image.py\n+++ b/tools/c7n_azure/c7n_azure/resources/image.py\n@@ -18,6 +18,23 @@\n \n @resources.register('image')\n class Image(ArmResourceManager):\n+ \"\"\"Virtual Machine Image\n+\n+ :example:\n+ Returns all virtual machine images named my-test-vm-image\n+\n+ .. code-block:: yaml\n+\n+ policies:\n+ - name: get-vm-image\n+ resource: azure.image\n+ filters:\n+ - type: value\n+ key: name\n+ op: eq\n+ value: my-test-vm-image\n+\n+ \"\"\"\n class resource_type(ArmResourceManager.resource_type):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n", "issue": "Azure - Docs missing for VM Images\n\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\n\n\[email protected]('image')\nclass Image(ArmResourceManager):\n class resource_type(ArmResourceManager.resource_type):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n enum_spec = ('images', 'list', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n )\n resource_type = 'Microsoft.Compute/images'\n", "path": "tools/c7n_azure/c7n_azure/resources/image.py"}]} | 853 | 217 |
gh_patches_debug_14715 | rasdani/github-patches | git_diff | openai__gym-1950 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing attrs in LazyFrames: dtype, shape
This is my own fault.. I managed to push the wrong branch in #1906
</issue>
<code>
[start of gym/wrappers/frame_stack.py]
1 from collections import deque
2 import numpy as np
3
4 from gym.spaces import Box
5 from gym import Wrapper
6
7
8 class LazyFrames(object):
9 r"""Ensures common frames are only stored once to optimize memory use.
10
11 To further reduce the memory use, it is optionally to turn on lz4 to
12 compress the observations.
13
14 .. note::
15
16 This object should only be converted to numpy array just before forward pass.
17
18 Args:
19 lz4_compress (bool): use lz4 to compress the frames internally
20
21 """
22 __slots__ = ('frame_shape', 'dtype', 'shape', 'lz4_compress', '_frames')
23
24 def __init__(self, frames, lz4_compress=False):
25 if lz4_compress:
26 from lz4.block import compress
27 self.frame_shape = tuple(frames[0].shape)
28 self.dtype = frames[0].dtype
29 self.shape = (len(frames),) + self.frame_shape
30 frames = [compress(frame) for frame in frames]
31 self._frames = frames
32 self.lz4_compress = lz4_compress
33
34 def __array__(self, dtype=None):
35 arr = self[:]
36 if dtype is not None:
37 return arr.astype(dtype)
38 return arr
39
40 def __len__(self):
41 return self.shape[0]
42
43 def __getitem__(self, int_or_slice):
44 if isinstance(int_or_slice, int):
45 return self._check_decompress(self._frames[int_or_slice]) # single frame
46 return np.stack([self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0)
47
48 def __eq__(self, other):
49 return self.__array__() == other
50
51 def _check_decompress(self, frame):
52 if self.lz4_compress:
53 from lz4.block import decompress
54 return np.frombuffer(decompress(frame), dtype=self.dtype).reshape(self.frame_shape)
55 return frame
56
57
58 class FrameStack(Wrapper):
59 r"""Observation wrapper that stacks the observations in a rolling manner.
60
61 For example, if the number of stacks is 4, then the returned observation contains
62 the most recent 4 observations. For environment 'Pendulum-v0', the original observation
63 is an array with shape [3], so if we stack 4 observations, the processed observation
64 has shape [4, 3].
65
66 .. note::
67
68 To be memory efficient, the stacked observations are wrapped by :class:`LazyFrame`.
69
70 .. note::
71
72 The observation space must be `Box` type. If one uses `Dict`
73 as observation space, it should apply `FlattenDictWrapper` at first.
74
75 Example::
76
77 >>> import gym
78 >>> env = gym.make('PongNoFrameskip-v0')
79 >>> env = FrameStack(env, 4)
80 >>> env.observation_space
81 Box(4, 210, 160, 3)
82
83 Args:
84 env (Env): environment object
85 num_stack (int): number of stacks
86 lz4_compress (bool): use lz4 to compress the frames internally
87
88 """
89 def __init__(self, env, num_stack, lz4_compress=False):
90 super(FrameStack, self).__init__(env)
91 self.num_stack = num_stack
92 self.lz4_compress = lz4_compress
93
94 self.frames = deque(maxlen=num_stack)
95
96 low = np.repeat(self.observation_space.low[np.newaxis, ...], num_stack, axis=0)
97 high = np.repeat(self.observation_space.high[np.newaxis, ...], num_stack, axis=0)
98 self.observation_space = Box(low=low, high=high, dtype=self.observation_space.dtype)
99
100 def _get_observation(self):
101 assert len(self.frames) == self.num_stack, (len(self.frames), self.num_stack)
102 return LazyFrames(list(self.frames), self.lz4_compress)
103
104 def step(self, action):
105 observation, reward, done, info = self.env.step(action)
106 self.frames.append(observation)
107 return self._get_observation(), reward, done, info
108
109 def reset(self, **kwargs):
110 observation = self.env.reset(**kwargs)
111 [self.frames.append(observation) for _ in range(self.num_stack)]
112 return self._get_observation()
113
[end of gym/wrappers/frame_stack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/wrappers/frame_stack.py b/gym/wrappers/frame_stack.py
--- a/gym/wrappers/frame_stack.py
+++ b/gym/wrappers/frame_stack.py
@@ -22,11 +22,11 @@
__slots__ = ('frame_shape', 'dtype', 'shape', 'lz4_compress', '_frames')
def __init__(self, frames, lz4_compress=False):
+ self.frame_shape = tuple(frames[0].shape)
+ self.shape = (len(frames),) + self.frame_shape
+ self.dtype = frames[0].dtype
if lz4_compress:
from lz4.block import compress
- self.frame_shape = tuple(frames[0].shape)
- self.dtype = frames[0].dtype
- self.shape = (len(frames),) + self.frame_shape
frames = [compress(frame) for frame in frames]
self._frames = frames
self.lz4_compress = lz4_compress
| {"golden_diff": "diff --git a/gym/wrappers/frame_stack.py b/gym/wrappers/frame_stack.py\n--- a/gym/wrappers/frame_stack.py\n+++ b/gym/wrappers/frame_stack.py\n@@ -22,11 +22,11 @@\n __slots__ = ('frame_shape', 'dtype', 'shape', 'lz4_compress', '_frames')\n \n def __init__(self, frames, lz4_compress=False):\n+ self.frame_shape = tuple(frames[0].shape)\n+ self.shape = (len(frames),) + self.frame_shape\n+ self.dtype = frames[0].dtype\n if lz4_compress:\n from lz4.block import compress\n- self.frame_shape = tuple(frames[0].shape)\n- self.dtype = frames[0].dtype\n- self.shape = (len(frames),) + self.frame_shape\n frames = [compress(frame) for frame in frames]\n self._frames = frames\n self.lz4_compress = lz4_compress\n", "issue": "Missing attrs in LazyFrames: dtype, shape\nThis is my own fault.. I managed to push the wrong branch in #1906 \n", "before_files": [{"content": "from collections import deque\nimport numpy as np\n\nfrom gym.spaces import Box\nfrom gym import Wrapper\n\n\nclass LazyFrames(object):\n r\"\"\"Ensures common frames are only stored once to optimize memory use.\n\n To further reduce the memory use, it is optionally to turn on lz4 to\n compress the observations.\n\n .. note::\n\n This object should only be converted to numpy array just before forward pass.\n\n Args:\n lz4_compress (bool): use lz4 to compress the frames internally\n\n \"\"\"\n __slots__ = ('frame_shape', 'dtype', 'shape', 'lz4_compress', '_frames')\n\n def __init__(self, frames, lz4_compress=False):\n if lz4_compress:\n from lz4.block import compress\n self.frame_shape = tuple(frames[0].shape)\n self.dtype = frames[0].dtype\n self.shape = (len(frames),) + self.frame_shape\n frames = [compress(frame) for frame in frames]\n self._frames = frames\n self.lz4_compress = lz4_compress\n\n def __array__(self, dtype=None):\n arr = self[:]\n if dtype is not None:\n return arr.astype(dtype)\n return arr\n\n def __len__(self):\n return self.shape[0]\n\n def __getitem__(self, int_or_slice):\n if isinstance(int_or_slice, int):\n return self._check_decompress(self._frames[int_or_slice]) # single frame\n return np.stack([self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0)\n\n def __eq__(self, other):\n return self.__array__() == other\n\n def _check_decompress(self, frame):\n if self.lz4_compress:\n from lz4.block import decompress\n return np.frombuffer(decompress(frame), dtype=self.dtype).reshape(self.frame_shape)\n return frame\n\n\nclass FrameStack(Wrapper):\n r\"\"\"Observation wrapper that stacks the observations in a rolling manner.\n\n For example, if the number of stacks is 4, then the returned observation contains\n the most recent 4 observations. For environment 'Pendulum-v0', the original observation\n is an array with shape [3], so if we stack 4 observations, the processed observation\n has shape [4, 3].\n\n .. note::\n\n To be memory efficient, the stacked observations are wrapped by :class:`LazyFrame`.\n\n .. note::\n\n The observation space must be `Box` type. If one uses `Dict`\n as observation space, it should apply `FlattenDictWrapper` at first.\n\n Example::\n\n >>> import gym\n >>> env = gym.make('PongNoFrameskip-v0')\n >>> env = FrameStack(env, 4)\n >>> env.observation_space\n Box(4, 210, 160, 3)\n\n Args:\n env (Env): environment object\n num_stack (int): number of stacks\n lz4_compress (bool): use lz4 to compress the frames internally\n\n \"\"\"\n def __init__(self, env, num_stack, lz4_compress=False):\n super(FrameStack, self).__init__(env)\n self.num_stack = num_stack\n self.lz4_compress = lz4_compress\n\n self.frames = deque(maxlen=num_stack)\n\n low = np.repeat(self.observation_space.low[np.newaxis, ...], num_stack, axis=0)\n high = np.repeat(self.observation_space.high[np.newaxis, ...], num_stack, axis=0)\n self.observation_space = Box(low=low, high=high, dtype=self.observation_space.dtype)\n\n def _get_observation(self):\n assert len(self.frames) == self.num_stack, (len(self.frames), self.num_stack)\n return LazyFrames(list(self.frames), self.lz4_compress)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n self.frames.append(observation)\n return self._get_observation(), reward, done, info\n\n def reset(self, **kwargs):\n observation = self.env.reset(**kwargs)\n [self.frames.append(observation) for _ in range(self.num_stack)]\n return self._get_observation()\n", "path": "gym/wrappers/frame_stack.py"}]} | 1,729 | 219 |
gh_patches_debug_18600 | rasdani/github-patches | git_diff | ivy-llc__ivy-22517 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kaiser_bessel_derived_window
- [ ] #1559
</issue>
<code>
[start of ivy/functional/frontends/tensorflow/signal.py]
1 import ivy
2 from ivy.functional.frontends.tensorflow.func_wrapper import (
3 to_ivy_arrays_and_back,
4 handle_tf_dtype,
5 )
6 from ivy.func_wrapper import with_supported_dtypes
7
8
9 # dct
10 @to_ivy_arrays_and_back
11 def dct(input, type=2, n=None, axis=-1, norm=None, name=None):
12 return ivy.dct(input, type=type, n=n, axis=axis, norm=norm)
13
14
15 # idct
16 @to_ivy_arrays_and_back
17 def idct(input, type=2, n=None, axis=-1, norm=None, name=None):
18 inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]
19 return ivy.dct(input, type=inverse_type, n=n, axis=axis, norm=norm)
20
21
22 @with_supported_dtypes(
23 {"2.13.0 and below": ("float32", "float64", "float16", "bfloat16")},
24 "tensorflow",
25 )
26 @handle_tf_dtype
27 @to_ivy_arrays_and_back
28 def kaiser_window(window_length, beta=12.0, dtype=ivy.float32, name=None):
29 return ivy.kaiser_window(window_length, periodic=False, beta=beta, dtype=dtype)
30
31
32 @with_supported_dtypes(
33 {"2.13.0 and below": ("float16", "float32", "float64", "bfloat16")},
34 "tensorflow",
35 )
36 @to_ivy_arrays_and_back
37 def vorbis_window(window_length, dtype=ivy.float32, name=None):
38 return ivy.vorbis_window(window_length, dtype=dtype, out=None)
39
[end of ivy/functional/frontends/tensorflow/signal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/tensorflow/signal.py b/ivy/functional/frontends/tensorflow/signal.py
--- a/ivy/functional/frontends/tensorflow/signal.py
+++ b/ivy/functional/frontends/tensorflow/signal.py
@@ -19,6 +19,15 @@
return ivy.dct(input, type=inverse_type, n=n, axis=axis, norm=norm)
+# kaiser_bessel_derived_window
+@handle_tf_dtype
+@to_ivy_arrays_and_back
+def kaiser_bessel_derived_window(
+ window_length, beta=12.0, dtype=ivy.float32, name=None
+):
+ return ivy.kaiser_bessel_derived_window(window_length, beta=beta, dtype=dtype)
+
+
@with_supported_dtypes(
{"2.13.0 and below": ("float32", "float64", "float16", "bfloat16")},
"tensorflow",
@@ -36,3 +45,11 @@
@to_ivy_arrays_and_back
def vorbis_window(window_length, dtype=ivy.float32, name=None):
return ivy.vorbis_window(window_length, dtype=dtype, out=None)
+
+
+kaiser_bessel_derived_window.supported_dtypes = (
+ "float32",
+ "float64",
+ "float16",
+ "bfloat16",
+)
| {"golden_diff": "diff --git a/ivy/functional/frontends/tensorflow/signal.py b/ivy/functional/frontends/tensorflow/signal.py\n--- a/ivy/functional/frontends/tensorflow/signal.py\n+++ b/ivy/functional/frontends/tensorflow/signal.py\n@@ -19,6 +19,15 @@\n return ivy.dct(input, type=inverse_type, n=n, axis=axis, norm=norm)\n \n \n+# kaiser_bessel_derived_window\n+@handle_tf_dtype\n+@to_ivy_arrays_and_back\n+def kaiser_bessel_derived_window(\n+ window_length, beta=12.0, dtype=ivy.float32, name=None\n+):\n+ return ivy.kaiser_bessel_derived_window(window_length, beta=beta, dtype=dtype)\n+\n+\n @with_supported_dtypes(\n {\"2.13.0 and below\": (\"float32\", \"float64\", \"float16\", \"bfloat16\")},\n \"tensorflow\",\n@@ -36,3 +45,11 @@\n @to_ivy_arrays_and_back\n def vorbis_window(window_length, dtype=ivy.float32, name=None):\n return ivy.vorbis_window(window_length, dtype=dtype, out=None)\n+\n+\n+kaiser_bessel_derived_window.supported_dtypes = (\n+ \"float32\",\n+ \"float64\",\n+ \"float16\",\n+ \"bfloat16\",\n+)\n", "issue": "kaiser_bessel_derived_window\n- [ ] #1559\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.tensorflow.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_tf_dtype,\n)\nfrom ivy.func_wrapper import with_supported_dtypes\n\n\n# dct\n@to_ivy_arrays_and_back\ndef dct(input, type=2, n=None, axis=-1, norm=None, name=None):\n return ivy.dct(input, type=type, n=n, axis=axis, norm=norm)\n\n\n# idct\n@to_ivy_arrays_and_back\ndef idct(input, type=2, n=None, axis=-1, norm=None, name=None):\n inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]\n return ivy.dct(input, type=inverse_type, n=n, axis=axis, norm=norm)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"float32\", \"float64\", \"float16\", \"bfloat16\")},\n \"tensorflow\",\n)\n@handle_tf_dtype\n@to_ivy_arrays_and_back\ndef kaiser_window(window_length, beta=12.0, dtype=ivy.float32, name=None):\n return ivy.kaiser_window(window_length, periodic=False, beta=beta, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.13.0 and below\": (\"float16\", \"float32\", \"float64\", \"bfloat16\")},\n \"tensorflow\",\n)\n@to_ivy_arrays_and_back\ndef vorbis_window(window_length, dtype=ivy.float32, name=None):\n return ivy.vorbis_window(window_length, dtype=dtype, out=None)\n", "path": "ivy/functional/frontends/tensorflow/signal.py"}]} | 1,015 | 326 |
gh_patches_debug_60744 | rasdani/github-patches | git_diff | gpodder__mygpo-546 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when trying to run manage.py update-toplist
```
Traceback (most recent call last):
File "manage.py", line 8, in <module>
execute_from_command_line(sys.argv)
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File "/home/mike/mygpo/mygpo/directory/management/commands/update-toplist.py", line 27, in handle
total = podcasts.count_fast()
AttributeError: 'PodcastQuerySet' object has no attribute 'count_fast'
```
</issue>
<code>
[start of mygpo/directory/management/commands/update-toplist.py]
1 from optparse import make_option
2
3 from django.core.management.base import BaseCommand
4
5 from mygpo.podcasts.models import Podcast
6 from mygpo.utils import progress
7 from mygpo.directory.tasks import update_podcast_subscribers
8
9
10 class Command(BaseCommand):
11 """ For each podcast a task is scheduled to update its subscriber count """
12
13 def add_arguments(self, parser):
14 parser.add_argument(
15 "--silent",
16 action="store_true",
17 dest="silent",
18 default=False,
19 help="Don't show any output",
20 ),
21
22 def handle(self, *args, **options):
23
24 silent = options.get("silent")
25
26 podcasts = Podcast.objects.all()
27 total = podcasts.count_fast()
28
29 for n, podcast in enumerate(podcasts):
30 update_podcast_subscribers.delay(podcast.get_id())
31
32 if not silent:
33 progress(n, total)
34
[end of mygpo/directory/management/commands/update-toplist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mygpo/directory/management/commands/update-toplist.py b/mygpo/directory/management/commands/update-toplist.py
--- a/mygpo/directory/management/commands/update-toplist.py
+++ b/mygpo/directory/management/commands/update-toplist.py
@@ -24,7 +24,7 @@
silent = options.get("silent")
podcasts = Podcast.objects.all()
- total = podcasts.count_fast()
+ total = podcasts.count()
for n, podcast in enumerate(podcasts):
update_podcast_subscribers.delay(podcast.get_id())
| {"golden_diff": "diff --git a/mygpo/directory/management/commands/update-toplist.py b/mygpo/directory/management/commands/update-toplist.py\n--- a/mygpo/directory/management/commands/update-toplist.py\n+++ b/mygpo/directory/management/commands/update-toplist.py\n@@ -24,7 +24,7 @@\n silent = options.get(\"silent\")\n \n podcasts = Podcast.objects.all()\n- total = podcasts.count_fast()\n+ total = podcasts.count()\n \n for n, podcast in enumerate(podcasts):\n update_podcast_subscribers.delay(podcast.get_id())\n", "issue": "Error when trying to run manage.py update-toplist\n```\r\nTraceback (most recent call last):\r\n File \"manage.py\", line 8, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/__init__.py\", line 381, in execute_from_command_line\r\n utility.execute()\r\n File \"/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/__init__.py\", line 375, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/base.py\", line 316, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/home/mike/mygpo/venv/lib/python3.6/site-packages/django/core/management/base.py\", line 353, in execute\r\n output = self.handle(*args, **options)\r\n File \"/home/mike/mygpo/mygpo/directory/management/commands/update-toplist.py\", line 27, in handle\r\n total = podcasts.count_fast()\r\nAttributeError: 'PodcastQuerySet' object has no attribute 'count_fast'\r\n```\n", "before_files": [{"content": "from optparse import make_option\n\nfrom django.core.management.base import BaseCommand\n\nfrom mygpo.podcasts.models import Podcast\nfrom mygpo.utils import progress\nfrom mygpo.directory.tasks import update_podcast_subscribers\n\n\nclass Command(BaseCommand):\n \"\"\" For each podcast a task is scheduled to update its subscriber count \"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--silent\",\n action=\"store_true\",\n dest=\"silent\",\n default=False,\n help=\"Don't show any output\",\n ),\n\n def handle(self, *args, **options):\n\n silent = options.get(\"silent\")\n\n podcasts = Podcast.objects.all()\n total = podcasts.count_fast()\n\n for n, podcast in enumerate(podcasts):\n update_podcast_subscribers.delay(podcast.get_id())\n\n if not silent:\n progress(n, total)\n", "path": "mygpo/directory/management/commands/update-toplist.py"}]} | 1,090 | 132 |
gh_patches_debug_11 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update version number
Sprint 26 will be 0.3.3
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version='v0.3.2'
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version='v0.3.2'
\ No newline at end of file
+hdx_version='v0.3.3'
\ No newline at end of file
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version='v0.3.2'\n\\ No newline at end of file\n+hdx_version='v0.3.3'\n\\ No newline at end of file\n", "issue": "Update version number\nSprint 26 will be 0.3.3\n\n", "before_files": [{"content": "hdx_version='v0.3.2'", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 585 | 121 |
gh_patches_debug_19246 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1896 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2.2.0 CLI reports version 2.1.2dev0
* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)
* Template project url: n/a
* Python version: 3.11
* Operating System: linux
### Description:
Get the accurate version of cookiecutter from the CLI
### What I've run:
```bash
cookiecutter --version
Cookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])
```
Would be a one-line fix, but ideally would be always be sourced from exactly one place:
- `setup.py` and `importlib_metadata`
- `__init__.py`
- a `VERSION` file
</issue>
<code>
[start of cookiecutter/__init__.py]
1 """Main package for Cookiecutter."""
2 __version__ = "2.1.2.dev0"
3
[end of cookiecutter/__init__.py]
[start of setup.py]
1 """cookiecutter distutils configuration."""
2 from setuptools import setup
3
4 version = "2.2.3.dev0"
5
6 with open('README.md', encoding='utf-8') as readme_file:
7 readme = readme_file.read()
8
9 requirements = [
10 'binaryornot>=0.4.4',
11 'Jinja2>=2.7,<4.0.0',
12 'click>=7.0,<9.0.0',
13 'pyyaml>=5.3.1',
14 'python-slugify>=4.0.0',
15 'requests>=2.23.0',
16 'arrow',
17 ]
18
19 setup(
20 name='cookiecutter',
21 version=version,
22 description=(
23 'A command-line utility that creates projects from project '
24 'templates, e.g. creating a Python package project from a '
25 'Python package project template.'
26 ),
27 long_description=readme,
28 long_description_content_type='text/markdown',
29 author='Audrey Feldroy',
30 author_email='[email protected]',
31 url='https://github.com/cookiecutter/cookiecutter',
32 project_urls={
33 "Documentation": "https://cookiecutter.readthedocs.io",
34 "Issues": "https://github.com/cookiecutter/cookiecutter/issues",
35 "Discord": "https://discord.gg/9BrxzPKuEW",
36 },
37 packages=['cookiecutter'],
38 package_dir={'cookiecutter': 'cookiecutter'},
39 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
40 include_package_data=True,
41 python_requires='>=3.7',
42 install_requires=requirements,
43 license='BSD',
44 zip_safe=False,
45 classifiers=[
46 "Development Status :: 5 - Production/Stable",
47 "Environment :: Console",
48 "Intended Audience :: Developers",
49 "Natural Language :: English",
50 "License :: OSI Approved :: BSD License",
51 "Programming Language :: Python :: 3 :: Only",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.7",
54 "Programming Language :: Python :: 3.8",
55 "Programming Language :: Python :: 3.9",
56 "Programming Language :: Python :: 3.10",
57 "Programming Language :: Python :: 3.11",
58 "Programming Language :: Python :: Implementation :: CPython",
59 "Programming Language :: Python :: Implementation :: PyPy",
60 "Programming Language :: Python",
61 "Topic :: Software Development",
62 ],
63 keywords=[
64 "cookiecutter",
65 "Python",
66 "projects",
67 "project templates",
68 "Jinja2",
69 "skeleton",
70 "scaffolding",
71 "project directory",
72 "package",
73 "packaging",
74 ],
75 )
76
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/__init__.py b/cookiecutter/__init__.py
--- a/cookiecutter/__init__.py
+++ b/cookiecutter/__init__.py
@@ -1,2 +1,12 @@
"""Main package for Cookiecutter."""
-__version__ = "2.1.2.dev0"
+from pathlib import Path
+
+
+def _get_version() -> str:
+ """Read VERSION.txt and return its contents."""
+ path = Path(__file__).parent.resolve()
+ version_file = path / "VERSION.txt"
+ return version_file.read_text().strip()
+
+
+__version__ = _get_version()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,11 +1,22 @@
"""cookiecutter distutils configuration."""
+from pathlib import Path
from setuptools import setup
-version = "2.2.3.dev0"
+
+def _get_version() -> str:
+ """Read cookiecutter/VERSION.txt and return its contents."""
+ path = Path("cookiecutter").resolve()
+ version_file = path / "VERSION.txt"
+ return version_file.read_text().strip()
+
+
+version = _get_version()
+
with open('README.md', encoding='utf-8') as readme_file:
readme = readme_file.read()
+
requirements = [
'binaryornot>=0.4.4',
'Jinja2>=2.7,<4.0.0',
| {"golden_diff": "diff --git a/cookiecutter/__init__.py b/cookiecutter/__init__.py\n--- a/cookiecutter/__init__.py\n+++ b/cookiecutter/__init__.py\n@@ -1,2 +1,12 @@\n \"\"\"Main package for Cookiecutter.\"\"\"\n-__version__ = \"2.1.2.dev0\"\n+from pathlib import Path\n+\n+\n+def _get_version() -> str:\n+ \"\"\"Read VERSION.txt and return its contents.\"\"\"\n+ path = Path(__file__).parent.resolve()\n+ version_file = path / \"VERSION.txt\"\n+ return version_file.read_text().strip()\n+\n+\n+__version__ = _get_version()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,11 +1,22 @@\n \"\"\"cookiecutter distutils configuration.\"\"\"\n+from pathlib import Path\n from setuptools import setup\n \n-version = \"2.2.3.dev0\"\n+\n+def _get_version() -> str:\n+ \"\"\"Read cookiecutter/VERSION.txt and return its contents.\"\"\"\n+ path = Path(\"cookiecutter\").resolve()\n+ version_file = path / \"VERSION.txt\"\n+ return version_file.read_text().strip()\n+\n+\n+version = _get_version()\n+\n \n with open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n \n+\n requirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n", "issue": "2.2.0 CLI reports version 2.1.2dev0 \n* Cookiecutter version: 2.2.0 (or 2.1.2dev0, depending on who you ask)\r\n* Template project url: n/a\r\n* Python version: 3.11\r\n* Operating System: linux\r\n\r\n### Description:\r\n\r\nGet the accurate version of cookiecutter from the CLI\r\n\r\n### What I've run:\r\n\r\n```bash\r\ncookiecutter --version\r\nCookiecutter 2.1.2.dev0 from $PREFIX/lib/python3.11/site-packages (Python 3.11.4 | packaged by conda-forge | (main, Jun 10 2023, 18:08:17) [GCC 12.2.0])\r\n```\r\n\r\nWould be a one-line fix, but ideally would be always be sourced from exactly one place:\r\n- `setup.py` and `importlib_metadata`\r\n- `__init__.py`\r\n- a `VERSION` file\n", "before_files": [{"content": "\"\"\"Main package for Cookiecutter.\"\"\"\n__version__ = \"2.1.2.dev0\"\n", "path": "cookiecutter/__init__.py"}, {"content": "\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.2.3.dev0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<9.0.0',\n 'pyyaml>=5.3.1',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'arrow',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n project_urls={\n \"Documentation\": \"https://cookiecutter.readthedocs.io\",\n \"Issues\": \"https://github.com/cookiecutter/cookiecutter/issues\",\n \"Discord\": \"https://discord.gg/9BrxzPKuEW\",\n },\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.7',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 1,530 | 336 |
gh_patches_debug_4969 | rasdani/github-patches | git_diff | spacetelescope__jwql-601 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build tests for Bad Pixel Monitor
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.23.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',
13 'git+https://github.com/spacetelescope/jwst_reffiles'
14 ]
15 REQUIRES = [
16 'asdf>=2.3.3',
17 'astropy>=3.2.1',
18 'astroquery>=0.3.9',
19 'authlib',
20 'bokeh>=1.0,<1.4',
21 'codecov',
22 'crds',
23 'django>=2.0,<3.0',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'matplotlib',
31 'nodejs',
32 'numpy',
33 'numpydoc',
34 'pandas',
35 'psycopg2',
36 'pysiaf',
37 'pytest',
38 'pytest-cov',
39 'scipy',
40 'sphinx',
41 'sqlalchemy',
42 'stsci_rtd_theme',
43 'twine'
44 ]
45
46 setup(
47 name='jwql',
48 version=VERSION,
49 description=DESCRIPTION,
50 url='https://github.com/spacetelescope/jwql.git',
51 author=AUTHORS,
52 author_email='[email protected]',
53 license='BSD',
54 keywords=['astronomy', 'python'],
55 classifiers=['Programming Language :: Python'],
56 packages=find_packages(),
57 install_requires=REQUIRES,
58 dependency_links=DEPENDENCY_LINKS,
59 include_package_data=True,
60 include_dirs=[np.get_include()],
61 )
62
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',
- 'git+https://github.com/spacetelescope/jwst_reffiles'
+ 'git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles'
]
REQUIRES = [
'asdf>=2.3.3',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',\n- 'git+https://github.com/spacetelescope/jwst_reffiles'\n+ 'git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles'\n ]\n REQUIRES = [\n 'asdf>=2.3.3',\n", "issue": "Build tests for Bad Pixel Monitor\n\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.23.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',\n 'git+https://github.com/spacetelescope/jwst_reffiles'\n ]\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,113 | 138 |
gh_patches_debug_14263 | rasdani/github-patches | git_diff | OCA__stock-logistics-warehouse-1247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[12.0] stock_secondary_unit "secondary qty" value in picking not affecting "initial demand"
AFFECTED VERSIONS
12.0 (it works ok on 13.0)
STEPS TO REPRODUCE
Activate Units of Measure in general settings > inventory
Inventory > Master Data > Products > Set a secondary unit in a product

Create a new transfer > add product > select secondary UoM > input secondary qty

CURRENT BEHAVIOR
Value "Initial demand" is not affected by secondary qty input
In the same way, if "Initial demand" is set, "secondary qty" does not change - basically there is no relation between the two fields

REQUIRED BEHAVIOR
When "secondary qty" is updated, also "initial demand" should update - and viceversa, as it happens in SO with secondary unit modules
VIDEO
https://recordit.co/zcuDUx6xco
</issue>
<code>
[start of stock_secondary_unit/models/stock_move.py]
1 # Copyright 2018 Tecnativa - Sergio Teruel
2 # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
3 from odoo import api, fields, models
4 from odoo.addons import decimal_precision as dp
5 from odoo.tools.float_utils import float_compare, float_round
6
7
8 class StockSecondaryUnitMixin(models.AbstractModel):
9 _name = 'stock.secondary.unit.mixin'
10 _description = 'Stock Secondary Unit Mixin'
11
12 secondary_uom_id = fields.Many2one(
13 comodel_name='product.secondary.unit',
14 string='Second unit',
15 )
16 secondary_uom_qty = fields.Float(
17 string='Secondary Qty',
18 digits=dp.get_precision('Product Unit of Measure'),
19 )
20
21
22 class StockMove(models.Model):
23 _inherit = ['stock.move', 'stock.secondary.unit.mixin']
24 _name = 'stock.move'
25
26 def _merge_moves_fields(self):
27 res = super(StockMove, self)._merge_moves_fields()
28 res['secondary_uom_qty'] = self[-1:].secondary_uom_qty
29 return res
30
31 @api.onchange('secondary_uom_id', 'secondary_uom_qty')
32 def onchange_secondary_uom(self):
33 if not self.secondary_uom_id:
34 return
35 factor = self.secondary_uom_id.factor * self.product_uom.factor
36
37 qty = float_round(
38 self.secondary_uom_qty * factor,
39 precision_rounding=self.product_uom.rounding
40 )
41 if float_compare(
42 self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding
43 ) != 0:
44 self.product_uom_qty = qty
45
46 @api.onchange('product_uom_qty')
47 def onchange_secondary_unit_product_uom_qty(self):
48 if not self.secondary_uom_id:
49 return
50 factor = self.secondary_uom_id.factor * self.product_uom.factor
51
52 qty = float_round(
53 self.product_uom_qty / (factor or 1.0),
54 precision_rounding=self.secondary_uom_id.uom_id.rounding
55 )
56 if float_compare(
57 self.secondary_uom_qty,
58 qty,
59 precision_rounding=self.secondary_uom_id.uom_id.rounding
60 ) != 0:
61 self.secondary_uom_qty = qty
62
63 @api.onchange('product_uom')
64 def onchange_product_uom_for_secondary(self):
65 if not self.secondary_uom_id:
66 return
67 factor = self.product_uom.factor * self.secondary_uom_id.factor
68 qty = float_round(
69 self.product_uom_qty / (factor or 1.0),
70 precision_rounding=self.product_uom.rounding
71 )
72 if float_compare(
73 self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding
74 ) != 0:
75 self.secondary_uom_qty = qty
76
77
78 class StockMoveLine(models.Model):
79 _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']
80 _name = 'stock.move.line'
81
82 @api.model
83 def create(self, vals):
84 move = self.env['stock.move'].browse(vals.get('move_id', False))
85 if move.secondary_uom_id:
86 uom = self.env['uom.uom'].browse(vals['product_uom_id'])
87 factor = move.secondary_uom_id.factor * uom.factor
88 move_line_qty = vals.get(
89 'product_uom_qty', vals.get('qty_done', 0.0))
90 qty = float_round(
91 move_line_qty / (factor or 1.0),
92 precision_rounding=move.secondary_uom_id.uom_id.rounding
93 )
94 vals.update({
95 'secondary_uom_qty': qty,
96 'secondary_uom_id': move.secondary_uom_id.id,
97 })
98 return super().create(vals)
99
[end of stock_secondary_unit/models/stock_move.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/stock_secondary_unit/models/stock_move.py b/stock_secondary_unit/models/stock_move.py
--- a/stock_secondary_unit/models/stock_move.py
+++ b/stock_secondary_unit/models/stock_move.py
@@ -96,3 +96,21 @@
'secondary_uom_id': move.secondary_uom_id.id,
})
return super().create(vals)
+
+ @api.multi
+ def write(self, vals):
+ for rec in self:
+ move = rec.move_id
+ if move.secondary_uom_id:
+ uom = rec.product_id.uom_id
+ factor = move.secondary_uom_id.factor * uom.factor
+ move_line_qty = vals.get('product_uom_qty', rec.product_uom_qty)
+ qty = float_round(
+ move_line_qty / (factor or 1.0),
+ precision_rounding=move.secondary_uom_id.uom_id.rounding
+ )
+ vals.update({
+ 'secondary_uom_qty': qty,
+ 'secondary_uom_id': move.secondary_uom_id.id,
+ })
+ return super().write(vals)
| {"golden_diff": "diff --git a/stock_secondary_unit/models/stock_move.py b/stock_secondary_unit/models/stock_move.py\n--- a/stock_secondary_unit/models/stock_move.py\n+++ b/stock_secondary_unit/models/stock_move.py\n@@ -96,3 +96,21 @@\n 'secondary_uom_id': move.secondary_uom_id.id,\n })\n return super().create(vals)\n+\n+ @api.multi\n+ def write(self, vals):\n+ for rec in self:\n+ move = rec.move_id\n+ if move.secondary_uom_id:\n+ uom = rec.product_id.uom_id\n+ factor = move.secondary_uom_id.factor * uom.factor\n+ move_line_qty = vals.get('product_uom_qty', rec.product_uom_qty)\n+ qty = float_round(\n+ move_line_qty / (factor or 1.0),\n+ precision_rounding=move.secondary_uom_id.uom_id.rounding\n+ )\n+ vals.update({\n+ 'secondary_uom_qty': qty,\n+ 'secondary_uom_id': move.secondary_uom_id.id,\n+ })\n+ return super().write(vals)\n", "issue": "[12.0] stock_secondary_unit \"secondary qty\" value in picking not affecting \"initial demand\"\nAFFECTED VERSIONS\r\n\r\n12.0 (it works ok on 13.0)\r\n\r\nSTEPS TO REPRODUCE\r\n\r\nActivate Units of Measure in general settings > inventory\r\n\r\nInventory > Master Data > Products > Set a secondary unit in a product\r\n\r\n\r\n\r\nCreate a new transfer > add product > select secondary UoM > input secondary qty\r\n\r\n\r\n\r\nCURRENT BEHAVIOR\r\n\r\nValue \"Initial demand\" is not affected by secondary qty input\r\nIn the same way, if \"Initial demand\" is set, \"secondary qty\" does not change - basically there is no relation between the two fields\r\n\r\n\r\n\r\nREQUIRED BEHAVIOR \r\n\r\nWhen \"secondary qty\" is updated, also \"initial demand\" should update - and viceversa, as it happens in SO with secondary unit modules\r\n\r\nVIDEO\r\n\r\nhttps://recordit.co/zcuDUx6xco\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Tecnativa - Sergio Teruel\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\nfrom odoo import api, fields, models\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.tools.float_utils import float_compare, float_round\n\n\nclass StockSecondaryUnitMixin(models.AbstractModel):\n _name = 'stock.secondary.unit.mixin'\n _description = 'Stock Secondary Unit Mixin'\n\n secondary_uom_id = fields.Many2one(\n comodel_name='product.secondary.unit',\n string='Second unit',\n )\n secondary_uom_qty = fields.Float(\n string='Secondary Qty',\n digits=dp.get_precision('Product Unit of Measure'),\n )\n\n\nclass StockMove(models.Model):\n _inherit = ['stock.move', 'stock.secondary.unit.mixin']\n _name = 'stock.move'\n\n def _merge_moves_fields(self):\n res = super(StockMove, self)._merge_moves_fields()\n res['secondary_uom_qty'] = self[-1:].secondary_uom_qty\n return res\n\n @api.onchange('secondary_uom_id', 'secondary_uom_qty')\n def onchange_secondary_uom(self):\n if not self.secondary_uom_id:\n return\n factor = self.secondary_uom_id.factor * self.product_uom.factor\n\n qty = float_round(\n self.secondary_uom_qty * factor,\n precision_rounding=self.product_uom.rounding\n )\n if float_compare(\n self.product_uom_qty, qty, precision_rounding=self.product_uom.rounding\n ) != 0:\n self.product_uom_qty = qty\n\n @api.onchange('product_uom_qty')\n def onchange_secondary_unit_product_uom_qty(self):\n if not self.secondary_uom_id:\n return\n factor = self.secondary_uom_id.factor * self.product_uom.factor\n\n qty = float_round(\n self.product_uom_qty / (factor or 1.0),\n precision_rounding=self.secondary_uom_id.uom_id.rounding\n )\n if float_compare(\n self.secondary_uom_qty,\n qty,\n precision_rounding=self.secondary_uom_id.uom_id.rounding\n ) != 0:\n self.secondary_uom_qty = qty\n\n @api.onchange('product_uom')\n def onchange_product_uom_for_secondary(self):\n if not self.secondary_uom_id:\n return\n factor = self.product_uom.factor * self.secondary_uom_id.factor\n qty = float_round(\n self.product_uom_qty / (factor or 1.0),\n precision_rounding=self.product_uom.rounding\n )\n if float_compare(\n self.secondary_uom_qty, qty, precision_rounding=self.product_uom.rounding\n ) != 0:\n self.secondary_uom_qty = qty\n\n\nclass StockMoveLine(models.Model):\n _inherit = ['stock.move.line', 'stock.secondary.unit.mixin']\n _name = 'stock.move.line'\n\n @api.model\n def create(self, vals):\n move = self.env['stock.move'].browse(vals.get('move_id', False))\n if move.secondary_uom_id:\n uom = self.env['uom.uom'].browse(vals['product_uom_id'])\n factor = move.secondary_uom_id.factor * uom.factor\n move_line_qty = vals.get(\n 'product_uom_qty', vals.get('qty_done', 0.0))\n qty = float_round(\n move_line_qty / (factor or 1.0),\n precision_rounding=move.secondary_uom_id.uom_id.rounding\n )\n vals.update({\n 'secondary_uom_qty': qty,\n 'secondary_uom_id': move.secondary_uom_id.id,\n })\n return super().create(vals)\n", "path": "stock_secondary_unit/models/stock_move.py"}]} | 1,931 | 251 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.