problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_2814 | rasdani/github-patches | git_diff | dotkom__onlineweb4-496 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make offline archive look more like event archive
Same as #481. This is mainly about the filtering section.
</issue>
<code>
[start of apps/api/v0/article.py]
1 #-*- coding: utf-8 -*-
2 from copy import copy
3
4 from django.conf import settings
5 from django.template.defaultfilters import slugify
6 from django.utils import timezone
7
8 from filebrowser.base import FileObject
9 from filebrowser.settings import VERSIONS
10 from tastypie import fields
11 from tastypie.resources import ModelResource
12
13 from apps.api.v0.authentication import UserResource
14 from apps.article.models import Article, ArticleTag, Tag
15
16
17
18
19 class ArticleResource(ModelResource):
20 author = fields.ToOneField(UserResource, 'created_by')
21
22 def alter_list_data_to_serialize(self, request, data):
23 # Renames list data 'object' to 'articles'.
24 if isinstance(data, dict):
25 data['articles'] = copy(data['objects'])
26 del(data['objects'])
27 return data
28
29 # Making multiple images for the article
30 def dehydrate(self, bundle):
31
32 # Setting slug-field
33 bundle.data['slug'] = slugify(bundle.data['heading'])
34
35 # If image is set
36 if bundle.data['image']:
37 # Parse to FileObject used by Filebrowser
38 temp_image = FileObject(bundle.data['image'])
39
40 # Itterate the different versions (by key)
41 for ver in VERSIONS.keys():
42 # Check if the key start with article_ (if it does, we want to crop to that size)
43 if ver.startswith('article_'):
44 # Adding the new image to the object
45 bundle.data['image_'+ver] = temp_image.version_generate(ver).url
46
47 # Unset the image-field
48 del(bundle.data['image'])
49
50 # Returning washed object
51 return bundle
52
53 def get_object_list(self, request):
54 # Getting the GET-params
55 if 'tag' in request.GET:
56 request_tag = request.GET['tag']
57 else:
58 request_tag = None
59
60 if 'year' in request.GET:
61 request_year = request.GET['year']
62 else:
63 request_year = None
64
65 if 'month' in request.GET:
66 request_month = request.GET['month']
67 else:
68 request_month = None
69
70 # Check filtering here
71 if (request_year is not None):
72 if (request_month is not None):
73 # Filtering on both year and month
74 queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')
75 else:
76 # Filtering on only year
77 queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')
78 else:
79 # Not filtering on year, check if filtering on slug (tag) or return default query
80 if (request_tag is not None):
81 # Filtering on slug
82 slug_query = Tag.objects.filter(slug = request_tag)
83 slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')
84 queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')
85 else:
86 # No filtering at all, return default query
87 queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
88 return queryset
89
90 class Meta:
91 API_LIMIT_PER_PAGE = 9
92 queryset = Article.objects.filter(published_date__lte=timezone.now())
93 resource_name = 'article/all'
94 ordering = ['-published_date']
95 include_absolute_url = True
96 filtering = {
97 'featured' : ('exact',),
98 'published_date' : ('gte',),
99 }
100
101 class ArticleLatestResource(ModelResource):
102 author = fields.ToOneField(UserResource, 'created_by')
103
104 class Meta:
105 queryset = Article.objects.filter(published_date__lte=timezone.now())
106
107 resource_name = 'article/latest'
108 filtering = {
109 'featured': ('exact',)
110 }
111 ordering = ['-published_date']
112 max_limit = 25
113 def alter_list_data_to_serialize(self, request, data):
114 # Renames list data 'object' to 'articles'.
115 if isinstance(data, dict):
116 data['articles'] = copy(data['objects'])
117 del(data['objects'])
118 return data
119 def dehydrate(self, bundle):
120 bundle.data['slug'] = slugify(bundle.data['heading'])
121 return bundle
122
[end of apps/api/v0/article.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/api/v0/article.py b/apps/api/v0/article.py
--- a/apps/api/v0/article.py
+++ b/apps/api/v0/article.py
@@ -17,7 +17,7 @@
class ArticleResource(ModelResource):
- author = fields.ToOneField(UserResource, 'created_by')
+ author = fields.ToOneField(UserResource, 'created_by', full=True)
def alter_list_data_to_serialize(self, request, data):
# Renames list data 'object' to 'articles'.
| {"golden_diff": "diff --git a/apps/api/v0/article.py b/apps/api/v0/article.py\n--- a/apps/api/v0/article.py\n+++ b/apps/api/v0/article.py\n@@ -17,7 +17,7 @@\n \n \n class ArticleResource(ModelResource):\n- author = fields.ToOneField(UserResource, 'created_by')\n+ author = fields.ToOneField(UserResource, 'created_by', full=True)\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n", "issue": "Make offline archive look more like event archive\nSame as #481. This is mainly about the filtering section.\n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\n\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\n\nfrom apps.api.v0.authentication import UserResource\nfrom apps.article.models import Article, ArticleTag, Tag\n\n\n\n\nclass ArticleResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict):\n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n \n # Making multiple images for the article\n def dehydrate(self, bundle):\n \n # Setting slug-field\n bundle.data['slug'] = slugify(bundle.data['heading'])\n \n # If image is set\n if bundle.data['image']:\n # Parse to FileObject used by Filebrowser\n temp_image = FileObject(bundle.data['image'])\n \n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n # Adding the new image to the object\n bundle.data['image_'+ver] = temp_image.version_generate(ver).url\n \n # Unset the image-field\n del(bundle.data['image'])\n \n # Returning washed object\n return bundle\n \n def get_object_list(self, request):\n # Getting the GET-params\n if 'tag' in request.GET:\n request_tag = request.GET['tag']\n else:\n request_tag = None\n \n if 'year' in request.GET:\n request_year = request.GET['year']\n else:\n request_year = None\n \n if 'month' in request.GET:\n request_month = request.GET['month']\n else:\n request_month = None\n \n # Check filtering here\n if (request_year is not None):\n if (request_month is not None):\n # Filtering on both year and month\n queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Filtering on only year\n queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Not filtering on year, check if filtering on slug (tag) or return default query\n if (request_tag is not None):\n # Filtering on slug\n slug_query = Tag.objects.filter(slug = request_tag)\n slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')\n queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # No filtering at all, return default query\n queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return queryset\n \n class Meta: \n API_LIMIT_PER_PAGE = 9\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n resource_name = 'article/all'\n ordering = ['-published_date']\n include_absolute_url = True\n filtering = {\n 'featured' : ('exact',),\n 'published_date' : ('gte',),\n }\n\nclass ArticleLatestResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n class Meta:\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n \n resource_name = 'article/latest'\n filtering = {\n 'featured': ('exact',)\n }\n ordering = ['-published_date']\n max_limit = 25\n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict): \n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n def dehydrate(self, bundle):\n bundle.data['slug'] = slugify(bundle.data['heading'])\n return bundle\n", "path": "apps/api/v0/article.py"}]} | 1,751 | 116 |
gh_patches_debug_14108 | rasdani/github-patches | git_diff | wright-group__WrightTools-726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Group is not defined in collection
https://github.com/wright-group/WrightTools/blob/ca056aa600f341501a99d2ea4d11f7d74047bc26/WrightTools/_open.py#L48
Statement will cause an attribute error. Not tested currently
</issue>
<code>
[start of WrightTools/_open.py]
1 """Generic open method for wt5 files."""
2
3
4 # --- import -------------------------------------------------------------------------------------
5
6
7 import posixpath
8
9 import h5py
10
11 from . import collection as wt_collection
12 from . import data as wt_data
13
14
15 # --- define -------------------------------------------------------------------------------------
16
17
18 __all__ = ["open"]
19
20
21 # --- functions ----------------------------------------------------------------------------------
22
23
24 def open(filepath, edit_local=False):
25 """Open any wt5 file, returning the top-level object (data or collection).
26
27 Parameters
28 ----------
29 filepath : string
30 Path to file.
31 edit_local : boolean (optional)
32 If True, the file itself will be opened for editing. Otherwise, a
33 copy will be created. Default is False.
34
35 Returns
36 -------
37 WrightTools Collection or Data
38 Root-level object in file.
39 """
40 f = h5py.File(filepath)
41 class_name = f[posixpath.sep].attrs["class"]
42 name = f[posixpath.sep].attrs["name"]
43 if class_name == "Data":
44 return wt_data.Data(filepath=filepath, name=name, edit_local=edit_local)
45 elif class_name == "Collection":
46 return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)
47 else:
48 return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)
49
[end of WrightTools/_open.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/_open.py b/WrightTools/_open.py
--- a/WrightTools/_open.py
+++ b/WrightTools/_open.py
@@ -10,6 +10,7 @@
from . import collection as wt_collection
from . import data as wt_data
+from . import _group as wt_group
# --- define -------------------------------------------------------------------------------------
@@ -45,4 +46,4 @@
elif class_name == "Collection":
return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)
else:
- return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)
+ return wt_group.Group(filepath=filepath, name=name, edit_local=edit_local)
| {"golden_diff": "diff --git a/WrightTools/_open.py b/WrightTools/_open.py\n--- a/WrightTools/_open.py\n+++ b/WrightTools/_open.py\n@@ -10,6 +10,7 @@\n \n from . import collection as wt_collection\n from . import data as wt_data\n+from . import _group as wt_group\n \n \n # --- define -------------------------------------------------------------------------------------\n@@ -45,4 +46,4 @@\n elif class_name == \"Collection\":\n return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)\n else:\n- return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)\n+ return wt_group.Group(filepath=filepath, name=name, edit_local=edit_local)\n", "issue": "Group is not defined in collection\nhttps://github.com/wright-group/WrightTools/blob/ca056aa600f341501a99d2ea4d11f7d74047bc26/WrightTools/_open.py#L48\r\n\r\nStatement will cause an attribute error. Not tested currently\n", "before_files": [{"content": "\"\"\"Generic open method for wt5 files.\"\"\"\n\n\n# --- import -------------------------------------------------------------------------------------\n\n\nimport posixpath\n\nimport h5py\n\nfrom . import collection as wt_collection\nfrom . import data as wt_data\n\n\n# --- define -------------------------------------------------------------------------------------\n\n\n__all__ = [\"open\"]\n\n\n# --- functions ----------------------------------------------------------------------------------\n\n\ndef open(filepath, edit_local=False):\n \"\"\"Open any wt5 file, returning the top-level object (data or collection).\n\n Parameters\n ----------\n filepath : string\n Path to file.\n edit_local : boolean (optional)\n If True, the file itself will be opened for editing. Otherwise, a\n copy will be created. Default is False.\n\n Returns\n -------\n WrightTools Collection or Data\n Root-level object in file.\n \"\"\"\n f = h5py.File(filepath)\n class_name = f[posixpath.sep].attrs[\"class\"]\n name = f[posixpath.sep].attrs[\"name\"]\n if class_name == \"Data\":\n return wt_data.Data(filepath=filepath, name=name, edit_local=edit_local)\n elif class_name == \"Collection\":\n return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)\n else:\n return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)\n", "path": "WrightTools/_open.py"}]} | 979 | 161 |
gh_patches_debug_5168 | rasdani/github-patches | git_diff | ivy-llc__ivy-13695 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
poisson
</issue>
<code>
[start of ivy/functional/frontends/jax/random.py]
1 # local
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.jax.func_wrapper import (
5 to_ivy_arrays_and_back,
6 handle_jax_dtype,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def PRNGKey(seed):
12 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)
13
14
15 @handle_jax_dtype
16 @to_ivy_arrays_and_back
17 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):
18 return ivy.random_uniform(
19 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])
20 )
21
22
23 @handle_jax_dtype
24 @to_ivy_arrays_and_back
25 def normal(key, shape=(), dtype=None):
26 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))
27
28
29 def _get_seed(key):
30 key1, key2 = int(key[0]), int(key[1])
31 return ivy.to_scalar(int("".join(map(str, [key1, key2]))))
32
33
34 @handle_jax_dtype
35 @to_ivy_arrays_and_back
36 @with_unsupported_dtypes(
37 {
38 "0.3.14 and below": (
39 "float16",
40 "bfloat16",
41 )
42 },
43 "jax",
44 )
45 def beta(key, a, b, shape=None, dtype=None):
46 seed = _get_seed(key)
47 return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)
48
49
50 @handle_jax_dtype
51 @to_ivy_arrays_and_back
52 @with_unsupported_dtypes(
53 {
54 "0.3.14 and below": (
55 "float16",
56 "bfloat16",
57 )
58 },
59 "jax",
60 )
61 def dirichlet(key, alpha, shape=None, dtype="float32"):
62 seed = _get_seed(key)
63 alpha = ivy.astype(alpha, dtype)
64 return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)
65
[end of ivy/functional/frontends/jax/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py
--- a/ivy/functional/frontends/jax/random.py
+++ b/ivy/functional/frontends/jax/random.py
@@ -62,3 +62,14 @@
seed = _get_seed(key)
alpha = ivy.astype(alpha, dtype)
return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)
+
+
+@handle_jax_dtype
+@to_ivy_arrays_and_back
+@with_unsupported_dtypes(
+ {"0.3.14 and below": ("unsigned", "int8", "int16")},
+ "jax",
+)
+def poisson(key, lam, shape=None, dtype=None):
+ seed = _get_seed(key)
+ return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -62,3 +62,14 @@\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n+\n+\n+@handle_jax_dtype\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes(\n+ {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n+ \"jax\",\n+)\n+def poisson(key, lam, shape=None, dtype=None):\n+ seed = _get_seed(key)\n+ return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n", "issue": "poisson\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/jax/random.py"}]} | 1,163 | 208 |
gh_patches_debug_41621 | rasdani/github-patches | git_diff | watchdogpolska__feder-328 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Eksport w CSV EmailLog
Wprowadziliśmy w ```feder.letters.logs``` statystyki dostarczania wiadomości. Należy wprowadzić zestawienie wszystkich danych z EmailLog dla danego monitoringu, aby można było zrobić statystykę czy coś.
</issue>
<code>
[start of feder/letters/logs/views.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from braces.views import SelectRelatedMixin, PrefetchRelatedMixin
5 from cached_property import cached_property
6 from django.shortcuts import get_object_or_404
7 from django.views.generic import DetailView, ListView
8
9 from feder.cases.models import Case
10 from feder.letters.logs.models import EmailLog
11 from feder.main.mixins import AttrPermissionRequiredMixin
12 from feder.monitorings.models import Monitoring
13
14
15 class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):
16 select_related = ['case']
17 paginate_by = 100
18 model = EmailLog
19 permission_attribute = 'case__monitoring'
20 permission_required = 'monitorings.view_log'
21
22 def get_permission_object(self):
23 return self.monitoring
24
25 def get_queryset(self):
26 return super(ListMonitoringMixin, self).get_queryset().filter(case__monitoring=self.monitoring).with_logrecord_count()
27
28 def get_context_data(self, **kwargs):
29 kwargs['monitoring'] = self.monitoring
30 return super(ListMonitoringMixin, self).get_context_data(**kwargs)
31
32
33 class EmailLogMonitoringListView(ListMonitoringMixin, ListView):
34 template_name_suffix = '_list_for_monitoring'
35 permission_required = 'monitorings.view_log'
36
37 @cached_property
38 def monitoring(self):
39 return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])
40
41
42 class EmailLogCaseListView(ListMonitoringMixin, ListView):
43 template_name_suffix = '_list_for_case'
44
45 @cached_property
46 def case(self):
47 return get_object_or_404(Case.objects.select_related('monitoring'),
48 pk=self.kwargs['case_pk'])
49
50 @cached_property
51 def monitoring(self):
52 return self.case.monitoring
53
54 def get_context_data(self, **kwargs):
55 kwargs['case'] = self.case
56 return super(EmailLogCaseListView, self).get_context_data(**kwargs)
57
58 def get_queryset(self):
59 return super(ListMonitoringMixin, self).get_queryset().filter(case=self.case)
60
61
62 class EmailLogDetailView(AttrPermissionRequiredMixin, PrefetchRelatedMixin,
63 SelectRelatedMixin, DetailView):
64 model = EmailLog
65 select_related = ['case__monitoring']
66 prefetch_related = ['logrecord_set']
67 permission_attribute = 'case__monitoring'
68 permission_required = 'monitorings.view_log'
69
[end of feder/letters/logs/views.py]
[start of feder/letters/logs/urls.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.conf.urls import url
5 from django.utils.translation import ugettext_lazy as _
6
7 from . import views
8
9 urlpatterns = [
10 url(_(r'^case-(?P<case_pk>[\d-]+)$'), views.EmailLogCaseListView.as_view(),
11 name="list"),
12 url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)$'), views.EmailLogMonitoringListView.as_view(),
13 name="list"),
14 url(_(r'^log-(?P<pk>[\d-]+)$'), views.EmailLogDetailView.as_view(),
15 name="detail"),
16 ]
17
[end of feder/letters/logs/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/feder/letters/logs/urls.py b/feder/letters/logs/urls.py
--- a/feder/letters/logs/urls.py
+++ b/feder/letters/logs/urls.py
@@ -11,6 +11,8 @@
name="list"),
url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)$'), views.EmailLogMonitoringListView.as_view(),
name="list"),
+ url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)/export$'), views.EmailLogMonitoringCsvView.as_view(),
+ name="export"),
url(_(r'^log-(?P<pk>[\d-]+)$'), views.EmailLogDetailView.as_view(),
name="detail"),
]
diff --git a/feder/letters/logs/views.py b/feder/letters/logs/views.py
--- a/feder/letters/logs/views.py
+++ b/feder/letters/logs/views.py
@@ -1,8 +1,12 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
+from django.utils import timezone
+import unicodecsv as csv
+
from braces.views import SelectRelatedMixin, PrefetchRelatedMixin
from cached_property import cached_property
+from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView, ListView
@@ -10,7 +14,7 @@
from feder.letters.logs.models import EmailLog
from feder.main.mixins import AttrPermissionRequiredMixin
from feder.monitorings.models import Monitoring
-
+from django.views.generic.list import ListView
class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):
select_related = ['case']
@@ -39,6 +43,61 @@
return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])
+class EmailLogMonitoringCsvView(ListMonitoringMixin, ListView):
+ permission_required = 'monitorings.view_log'
+
+ select_related = ['case', 'case__institution']
+
+ @cached_property
+ def monitoring(self):
+ return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])
+
+ def get(self, *args, **kwargs):
+ response = self._get_csv_response()
+ self._write_rows(response, self.get_queryset())
+ return response
+
+ @staticmethod
+ def _get_base_model_field_names(queryset):
+ opts = queryset.model._meta
+ return [field.name for field in opts.fields if field.related_model is None]
+
+ def _get_csv_response(self):
+ csv_response = HttpResponse(content_type='text/csv')
+ current_time = timezone.now()
+ filename = 'email_log_{0}-{1}-{2}.csv'.format(self.monitoring.id,
+ current_time.strftime('%Y_%m_%d-%H_%M_%S'),
+ current_time.tzname()
+ )
+ csv_response['Content-Disposition'] = "attachment;filename={0}".format(filename)
+ return csv_response
+
+ def _write_rows(self, response, queryset):
+ writer = csv.writer(response)
+
+ # automatically add all fields from base table/model
+ base_field_names = self._get_base_model_field_names(queryset)
+
+ # print header row
+ writer.writerow(base_field_names +
+ [
+ 'case id',
+ 'case email',
+ 'institution',
+ 'institution id',
+ 'monitoring id']
+ )
+
+ for obj in queryset:
+ writer.writerow(
+ [getattr(obj, field) for field in base_field_names] + [
+ obj.case.id,
+ obj.case.email,
+ obj.case.institution.name,
+ obj.case.institution_id,
+ obj.case.monitoring_id,
+ ])
+
class EmailLogCaseListView(ListMonitoringMixin, ListView):
template_name_suffix = '_list_for_case'
| {"golden_diff": "diff --git a/feder/letters/logs/urls.py b/feder/letters/logs/urls.py\n--- a/feder/letters/logs/urls.py\n+++ b/feder/letters/logs/urls.py\n@@ -11,6 +11,8 @@\n name=\"list\"),\n url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)$'), views.EmailLogMonitoringListView.as_view(),\n name=\"list\"),\n+ url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)/export$'), views.EmailLogMonitoringCsvView.as_view(),\n+ name=\"export\"),\n url(_(r'^log-(?P<pk>[\\d-]+)$'), views.EmailLogDetailView.as_view(),\n name=\"detail\"),\n ]\ndiff --git a/feder/letters/logs/views.py b/feder/letters/logs/views.py\n--- a/feder/letters/logs/views.py\n+++ b/feder/letters/logs/views.py\n@@ -1,8 +1,12 @@\n # -*- coding: utf-8 -*-\n from __future__ import unicode_literals\n \n+from django.utils import timezone\n+import unicodecsv as csv\n+\n from braces.views import SelectRelatedMixin, PrefetchRelatedMixin\n from cached_property import cached_property\n+from django.http import HttpResponse\n from django.shortcuts import get_object_or_404\n from django.views.generic import DetailView, ListView\n \n@@ -10,7 +14,7 @@\n from feder.letters.logs.models import EmailLog\n from feder.main.mixins import AttrPermissionRequiredMixin\n from feder.monitorings.models import Monitoring\n-\n+from django.views.generic.list import ListView\n \n class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):\n select_related = ['case']\n@@ -39,6 +43,61 @@\n return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n \n \n+class EmailLogMonitoringCsvView(ListMonitoringMixin, ListView):\n+ permission_required = 'monitorings.view_log'\n+\n+ select_related = ['case', 'case__institution']\n+\n+ @cached_property\n+ def monitoring(self):\n+ return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n+\n+ def get(self, *args, **kwargs):\n+ response = self._get_csv_response()\n+ self._write_rows(response, self.get_queryset())\n+ return response\n+\n+ @staticmethod\n+ def _get_base_model_field_names(queryset):\n+ opts = queryset.model._meta\n+ return [field.name for field in opts.fields if field.related_model is None]\n+\n+ def _get_csv_response(self):\n+ csv_response = HttpResponse(content_type='text/csv')\n+ current_time = timezone.now()\n+ filename = 'email_log_{0}-{1}-{2}.csv'.format(self.monitoring.id,\n+ current_time.strftime('%Y_%m_%d-%H_%M_%S'),\n+ current_time.tzname()\n+ )\n+ csv_response['Content-Disposition'] = \"attachment;filename={0}\".format(filename)\n+ return csv_response\n+\n+ def _write_rows(self, response, queryset):\n+ writer = csv.writer(response)\n+\n+ # automatically add all fields from base table/model\n+ base_field_names = self._get_base_model_field_names(queryset)\n+\n+ # print header row\n+ writer.writerow(base_field_names +\n+ [\n+ 'case id',\n+ 'case email',\n+ 'institution',\n+ 'institution id',\n+ 'monitoring id']\n+ )\n+\n+ for obj in queryset:\n+ writer.writerow(\n+ [getattr(obj, field) for field in base_field_names] + [\n+ obj.case.id,\n+ obj.case.email,\n+ obj.case.institution.name,\n+ obj.case.institution_id,\n+ obj.case.monitoring_id,\n+ ])\n+\n class EmailLogCaseListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_case'\n", "issue": "Eksport w CSV EmailLog \nWprowadzili\u015bmy w ```feder.letters.logs``` statystyki dostarczania wiadomo\u015bci. Nale\u017cy wprowadzi\u0107 zestawienie wszystkich danych z EmailLog dla danego monitoringu, aby mo\u017cna by\u0142o zrobi\u0107 statystyk\u0119 czy co\u015b.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom braces.views import SelectRelatedMixin, PrefetchRelatedMixin\nfrom cached_property import cached_property\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import DetailView, ListView\n\nfrom feder.cases.models import Case\nfrom feder.letters.logs.models import EmailLog\nfrom feder.main.mixins import AttrPermissionRequiredMixin\nfrom feder.monitorings.models import Monitoring\n\n\nclass ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):\n select_related = ['case']\n paginate_by = 100\n model = EmailLog\n permission_attribute = 'case__monitoring'\n permission_required = 'monitorings.view_log'\n\n def get_permission_object(self):\n return self.monitoring\n\n def get_queryset(self):\n return super(ListMonitoringMixin, self).get_queryset().filter(case__monitoring=self.monitoring).with_logrecord_count()\n\n def get_context_data(self, **kwargs):\n kwargs['monitoring'] = self.monitoring\n return super(ListMonitoringMixin, self).get_context_data(**kwargs)\n\n\nclass EmailLogMonitoringListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_monitoring'\n permission_required = 'monitorings.view_log'\n\n @cached_property\n def monitoring(self):\n return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n\n\nclass EmailLogCaseListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_case'\n\n @cached_property\n def case(self):\n return get_object_or_404(Case.objects.select_related('monitoring'),\n pk=self.kwargs['case_pk'])\n\n @cached_property\n def monitoring(self):\n return self.case.monitoring\n\n def get_context_data(self, **kwargs):\n kwargs['case'] = self.case\n return super(EmailLogCaseListView, self).get_context_data(**kwargs)\n\n def get_queryset(self):\n return super(ListMonitoringMixin, self).get_queryset().filter(case=self.case)\n\n\nclass EmailLogDetailView(AttrPermissionRequiredMixin, PrefetchRelatedMixin,\n SelectRelatedMixin, DetailView):\n model = EmailLog\n select_related = ['case__monitoring']\n prefetch_related = ['logrecord_set']\n permission_attribute = 'case__monitoring'\n permission_required = 'monitorings.view_log'\n", "path": "feder/letters/logs/views.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import views\n\nurlpatterns = [\n url(_(r'^case-(?P<case_pk>[\\d-]+)$'), views.EmailLogCaseListView.as_view(),\n name=\"list\"),\n url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)$'), views.EmailLogMonitoringListView.as_view(),\n name=\"list\"),\n url(_(r'^log-(?P<pk>[\\d-]+)$'), views.EmailLogDetailView.as_view(),\n name=\"detail\"),\n]\n", "path": "feder/letters/logs/urls.py"}]} | 1,435 | 861 |
gh_patches_debug_14332 | rasdani/github-patches | git_diff | scikit-hep__pyhf-638 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automate deployment to PyPI
# Description
According to @lukasheinrich, the current workflow for deploying to PyPI is:
```
git checkout master
git pull
bumpversion patch
git commit
git push origin master --tags
```
This is a bit annoyingly manual and ideally should be done automatically.
Luckily, there is an [official PyPA GitHub action](https://discuss.python.org/t/official-github-action-for-publishing-to-pypi/1061) to do this:
https://github.com/pypa/gh-action-pypi-publish
However, we need GitHub actions for pyhf, so we have to wait.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from os import path
3 import sys
4
5 this_directory = path.abspath(path.dirname(__file__))
6 if sys.version_info.major < 3:
7 from io import open
8 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
9 long_description = readme_md.read()
10
11 extras_require = {
12 'tensorflow': ['tensorflow~=1.15', 'tensorflow-probability~=0.8', 'numpy~=1.16',],
13 'torch': ['torch~=1.2'],
14 'xmlio': ['uproot'],
15 'minuit': ['iminuit'],
16 'develop': [
17 'pyflakes',
18 'pytest~=3.5',
19 'pytest-cov>=2.5.1',
20 'pytest-mock',
21 'pytest-benchmark[histogram]',
22 'pytest-console-scripts',
23 'pydocstyle',
24 'coverage>=4.0', # coveralls
25 'matplotlib',
26 'jupyter',
27 'nbdime',
28 'uproot~=3.3',
29 'papermill~=1.0',
30 'nteract-scrapbook~=0.2',
31 'graphviz',
32 'bumpversion',
33 'sphinx',
34 'sphinxcontrib-bibtex',
35 'sphinxcontrib-napoleon',
36 'sphinx_rtd_theme',
37 'nbsphinx',
38 'sphinx-issues',
39 'm2r',
40 'jsonpatch',
41 'ipython',
42 'pre-commit',
43 'black;python_version>="3.6"', # Black is Python3 only
44 'twine',
45 'check-manifest',
46 ],
47 }
48 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
49
50
51 def _is_test_pypi():
52 """
53 Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
54 set to true (c.f. .travis.yml)
55
56 The use_scm_version kwarg accepts a callable for the local_scheme
57 configuration parameter with argument "version". This can be replaced
58 with a lambda as the desired version structure is {next_version}.dev{distance}
59 c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy
60
61 As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version
62 controlled through bumpversion is used.
63 """
64 from os import getenv
65
66 return (
67 {'local_scheme': lambda version: ''}
68 if getenv('TESTPYPI_UPLOAD') == 'true'
69 else False
70 )
71
72
73 setup(
74 name='pyhf',
75 version='0.2.0',
76 description='(partial) pure python histfactory implementation',
77 long_description=long_description,
78 long_description_content_type='text/markdown',
79 url='https://github.com/diana-hep/pyhf',
80 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
81 author_email='[email protected], [email protected], [email protected]',
82 license='Apache',
83 keywords='physics fitting numpy scipy tensorflow pytorch',
84 classifiers=[
85 "Programming Language :: Python :: 2",
86 "Programming Language :: Python :: 2.7",
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'six', # for modifiers
100 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
101 'jsonpatch',
102 'pyyaml', # for parsing CLI equal-delimited options
103 ],
104 extras_require=extras_require,
105 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
106 dependency_links=[],
107 use_scm_version=_is_test_pypi(),
108 )
109
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,8 +50,8 @@
def _is_test_pypi():
"""
- Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
- set to true (c.f. .travis.yml)
+ Determine if the CI environment has IS_TESTPYPI defined and
+ set to true (c.f. .github/workflows/publish-package.yml)
The use_scm_version kwarg accepts a callable for the local_scheme
configuration parameter with argument "version". This can be replaced
@@ -65,7 +65,7 @@
return (
{'local_scheme': lambda version: ''}
- if getenv('TESTPYPI_UPLOAD') == 'true'
+ if getenv('IS_TESTPYPI') == 'true'
else False
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,8 +50,8 @@\n \n def _is_test_pypi():\n \"\"\"\n- Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n- set to true (c.f. .travis.yml)\n+ Determine if the CI environment has IS_TESTPYPI defined and\n+ set to true (c.f. .github/workflows/publish-package.yml)\n \n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n@@ -65,7 +65,7 @@\n \n return (\n {'local_scheme': lambda version: ''}\n- if getenv('TESTPYPI_UPLOAD') == 'true'\n+ if getenv('IS_TESTPYPI') == 'true'\n else False\n )\n", "issue": "Automate deployment to PyPI\n# Description\r\n\r\nAccording to @lukasheinrich, the current workflow for deploying to PyPI is:\r\n\r\n```\r\ngit checkout master\r\ngit pull\r\nbumpversion patch\r\ngit commit\r\ngit push origin master --tags\r\n```\r\n\r\nThis is a bit annoyingly manual and ideally should be done automatically.\r\n\r\nLuckily, there is an [official PyPA GitHub action](https://discuss.python.org/t/official-github-action-for-publishing-to-pypi/1061) to do this:\r\n\r\nhttps://github.com/pypa/gh-action-pypi-publish\r\n\r\nHowever, we need GitHub actions for pyhf, so we have to wait.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=1.15', 'tensorflow-probability~=0.8', 'numpy~=1.16',],\n 'torch': ['torch~=1.2'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython',\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n 'check-manifest',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.2.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]} | 1,847 | 195 |
gh_patches_debug_59597 | rasdani/github-patches | git_diff | googleapis__python-bigquery-587 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
loosen opentelemetry dependencies
See Spanner PR: https://github.com/googleapis/python-spanner/pull/298
</issue>
<code>
[start of setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "packaging >= 14.3",
37 "protobuf >= 3.12.0",
38 "requests >= 2.18.0, < 3.0.0dev",
39 ]
40 extras = {
41 "bqstorage": [
42 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
44 # installed, even though `google-cloud-bigquery-storage` specifies it
45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
46 # See: https://github.com/googleapis/python-bigquery/issues/83 The
47 # grpc.Channel.close() method isn't added until 1.32.0.
48 # https://github.com/grpc/grpc/pull/15254
49 "grpcio >= 1.32.0, < 2.0dev",
50 "pyarrow >= 1.0.0, < 4.0dev",
51 ],
52 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"],
53 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
54 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
55 "opentelemetry": [
56 "opentelemetry-api==0.11b0",
57 "opentelemetry-sdk==0.11b0",
58 "opentelemetry-instrumentation==0.11b0",
59 ],
60 }
61
62 all_extras = []
63
64 for extra in extras:
65 # Exclude this extra from all to avoid overly strict dependencies on core
66 # libraries such as pyarrow.
67 # https://github.com/googleapis/python-bigquery/issues/563
68 if extra in {"bignumeric_type"}:
69 continue
70 all_extras.extend(extras[extra])
71
72 extras["all"] = all_extras
73
74 # Setup boilerplate below this line.
75
76 package_root = os.path.abspath(os.path.dirname(__file__))
77
78 readme_filename = os.path.join(package_root, "README.rst")
79 with io.open(readme_filename, encoding="utf-8") as readme_file:
80 readme = readme_file.read()
81
82 version = {}
83 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
84 exec(fp.read(), version)
85 version = version["__version__"]
86
87 # Only include packages under the 'google' namespace. Do not include tests,
88 # benchmarks, etc.
89 packages = [
90 package
91 for package in setuptools.PEP420PackageFinder.find()
92 if package.startswith("google")
93 ]
94
95 # Determine which namespaces are needed.
96 namespaces = ["google"]
97 if "google.cloud" in packages:
98 namespaces.append("google.cloud")
99
100
101 setuptools.setup(
102 name=name,
103 version=version,
104 description=description,
105 long_description=readme,
106 author="Google LLC",
107 author_email="[email protected]",
108 license="Apache 2.0",
109 url="https://github.com/googleapis/python-bigquery",
110 classifiers=[
111 release_status,
112 "Intended Audience :: Developers",
113 "License :: OSI Approved :: Apache Software License",
114 "Programming Language :: Python",
115 "Programming Language :: Python :: 3",
116 "Programming Language :: Python :: 3.6",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Operating System :: OS Independent",
121 "Topic :: Internet",
122 ],
123 platforms="Posix; MacOS X; Windows",
124 packages=packages,
125 namespace_packages=namespaces,
126 install_requires=dependencies,
127 extras_require=extras,
128 python_requires=">=3.6, <3.10",
129 include_package_data=True,
130 zip_safe=False,
131 )
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,9 +53,9 @@
"bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
"tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
"opentelemetry": [
- "opentelemetry-api==0.11b0",
- "opentelemetry-sdk==0.11b0",
- "opentelemetry-instrumentation==0.11b0",
+ "opentelemetry-api >= 0.11b0",
+ "opentelemetry-sdk >= 0.11b0",
+ "opentelemetry-instrumentation >= 0.11b0",
],
}
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,9 +53,9 @@\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n- \"opentelemetry-api==0.11b0\",\n- \"opentelemetry-sdk==0.11b0\",\n- \"opentelemetry-instrumentation==0.11b0\",\n+ \"opentelemetry-api >= 0.11b0\",\n+ \"opentelemetry-sdk >= 0.11b0\",\n+ \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n }\n", "issue": "loosen opentelemetry dependencies\nSee Spanner PR: https://github.com/googleapis/python-spanner/pull/298\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 2,014 | 191 |
gh_patches_debug_14536 | rasdani/github-patches | git_diff | mozmeao__snippets-service-864 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filter by release channel on ASRSnippets raises an error
</issue>
<code>
[start of snippets/base/admin/filters.py]
1 from datetime import datetime, timedelta
2
3 from django.contrib import admin
4 from django.utils.encoding import force_text
5
6
7 class ModifiedFilter(admin.SimpleListFilter):
8 title = 'Last modified'
9 parameter_name = 'last_modified'
10
11 def lookups(self, request, model_admin):
12 return (
13 ('24', '24 hours'),
14 ('168', '7 days'),
15 ('336', '14 days'),
16 ('720', '30 days'),
17 ('all', 'All'),
18 )
19
20 def queryset(self, request, queryset):
21 value = self.value()
22 if not value or value == 'all':
23 return queryset
24
25 when = datetime.utcnow() - timedelta(hours=int(value))
26 return queryset.exclude(modified__lt=when)
27
28 def choices(self, cl):
29 for lookup, title in self.lookup_choices:
30 yield {
31 'selected': self.value() == force_text(lookup),
32 'query_string': cl.get_query_string({
33 self.parameter_name: lookup,
34 }, []),
35 'display': title,
36 }
37
38
39 class ChannelFilter(admin.SimpleListFilter):
40 title = 'Channel'
41 parameter_name = 'channel'
42
43 def lookups(self, request, model_admin):
44 return (
45 ('on_release', 'Release'),
46 ('on_esr', 'ESR'),
47 ('on_beta', 'Beta'),
48 ('on_aurora', 'Dev (Aurora)'),
49 ('on_nightly', 'Nightly'),
50 )
51
52 def queryset(self, request, queryset):
53 if self.value() is None:
54 return queryset
55
56 return queryset.filter(**{self.value(): True})
57
58
59 class ActivityStreamFilter(admin.SimpleListFilter):
60 title = 'Activity Stream'
61 parameter_name = 'is_activity_stream'
62
63 def lookups(self, request, model_admin):
64 return (
65 ('yes', 'Yes'),
66 ('no', 'No'),
67 )
68
69 def queryset(self, request, queryset):
70 if self.value() is None:
71 return queryset
72 elif self.value() == 'yes':
73 return queryset.filter(on_startpage_5=True)
74 elif self.value() == 'no':
75 return queryset.exclude(on_startpage_5=True)
76
[end of snippets/base/admin/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/snippets/base/admin/filters.py b/snippets/base/admin/filters.py
--- a/snippets/base/admin/filters.py
+++ b/snippets/base/admin/filters.py
@@ -3,6 +3,8 @@
from django.contrib import admin
from django.utils.encoding import force_text
+from snippets.base.managers import SnippetQuerySet
+
class ModifiedFilter(admin.SimpleListFilter):
title = 'Last modified'
@@ -53,7 +55,9 @@
if self.value() is None:
return queryset
- return queryset.filter(**{self.value(): True})
+ if isinstance(queryset, SnippetQuerySet):
+ return queryset.filter(**{self.value(): True})
+ return queryset.filter(**{f'target__{self.value()}': True})
class ActivityStreamFilter(admin.SimpleListFilter):
| {"golden_diff": "diff --git a/snippets/base/admin/filters.py b/snippets/base/admin/filters.py\n--- a/snippets/base/admin/filters.py\n+++ b/snippets/base/admin/filters.py\n@@ -3,6 +3,8 @@\n from django.contrib import admin\n from django.utils.encoding import force_text\n \n+from snippets.base.managers import SnippetQuerySet\n+\n \n class ModifiedFilter(admin.SimpleListFilter):\n title = 'Last modified'\n@@ -53,7 +55,9 @@\n if self.value() is None:\n return queryset\n \n- return queryset.filter(**{self.value(): True})\n+ if isinstance(queryset, SnippetQuerySet):\n+ return queryset.filter(**{self.value(): True})\n+ return queryset.filter(**{f'target__{self.value()}': True})\n \n \n class ActivityStreamFilter(admin.SimpleListFilter):\n", "issue": "Filter by release channel on ASRSnippets raises an error\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib import admin\nfrom django.utils.encoding import force_text\n\n\nclass ModifiedFilter(admin.SimpleListFilter):\n title = 'Last modified'\n parameter_name = 'last_modified'\n\n def lookups(self, request, model_admin):\n return (\n ('24', '24 hours'),\n ('168', '7 days'),\n ('336', '14 days'),\n ('720', '30 days'),\n ('all', 'All'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if not value or value == 'all':\n return queryset\n\n when = datetime.utcnow() - timedelta(hours=int(value))\n return queryset.exclude(modified__lt=when)\n\n def choices(self, cl):\n for lookup, title in self.lookup_choices:\n yield {\n 'selected': self.value() == force_text(lookup),\n 'query_string': cl.get_query_string({\n self.parameter_name: lookup,\n }, []),\n 'display': title,\n }\n\n\nclass ChannelFilter(admin.SimpleListFilter):\n title = 'Channel'\n parameter_name = 'channel'\n\n def lookups(self, request, model_admin):\n return (\n ('on_release', 'Release'),\n ('on_esr', 'ESR'),\n ('on_beta', 'Beta'),\n ('on_aurora', 'Dev (Aurora)'),\n ('on_nightly', 'Nightly'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n\n return queryset.filter(**{self.value(): True})\n\n\nclass ActivityStreamFilter(admin.SimpleListFilter):\n title = 'Activity Stream'\n parameter_name = 'is_activity_stream'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', 'Yes'),\n ('no', 'No'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n elif self.value() == 'yes':\n return queryset.filter(on_startpage_5=True)\n elif self.value() == 'no':\n return queryset.exclude(on_startpage_5=True)\n", "path": "snippets/base/admin/filters.py"}]} | 1,167 | 182 |
gh_patches_debug_21120 | rasdani/github-patches | git_diff | chainer__chainer-242 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type check to NonparameterizedLinear function
Related to #123
</issue>
<code>
[start of chainer/functions/nonparameterized_linear.py]
1 from chainer import cuda
2 from chainer import function
3 from chainer.functions import linear as linear_module
4
5
6 class NonparameterizedLinear(function.Function):
7
8 """Nonparameterized linear class.
9
10 .. seealso:: :class:`Linear`
11
12 """
13
14 def forward(self, x):
15 W = x[1]
16 b = None
17 if len(x) == 3:
18 b = x[2]
19 out_size, in_size = W.shape
20 func = linear_module.Linear(
21 in_size, out_size, initialW=W, initial_bias=b)
22 self.func = func
23 if any(isinstance(i, cuda.GPUArray) for i in x):
24 func.to_gpu()
25 return func.forward(x[:1])
26
27 def backward(self, x, gy):
28 func = self.func
29 func.zero_grads()
30 gx = func.backward(x[:1], gy)
31 if func.gb is None:
32 return (gx[0], func.gW)
33 return (gx[0], func.gW, func.gb)
34
35
36 def linear(x, W, b=None, stride=1, pad=0, use_cudnn=True):
37 """Nonparameterized linear function.
38
39 Args:
40 x (~chainer.Variable): Input variable.
41 W (~chainer.Variable): Weight variable.
42 b (~chainer.Variable): Bias variable.
43
44 Returns:
45 ~chainer.Variable: Output variable.
46
47 .. seealso:: :class:`Linear`
48
49 """
50
51 return NonparameterizedLinear()(x, W, b)
52
[end of chainer/functions/nonparameterized_linear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/nonparameterized_linear.py b/chainer/functions/nonparameterized_linear.py
--- a/chainer/functions/nonparameterized_linear.py
+++ b/chainer/functions/nonparameterized_linear.py
@@ -1,6 +1,9 @@
+import numpy
+
from chainer import cuda
from chainer import function
from chainer.functions import linear as linear_module
+from chainer.utils import type_check
class NonparameterizedLinear(function.Function):
@@ -11,6 +14,29 @@
"""
+ def check_type_forward(self, in_types):
+ type_check.expect(
+ 2 <= in_types.size(),
+ in_types.size() <= 3,
+ )
+ x_type = in_types[0]
+ w_type = in_types[1]
+
+ prod = type_check.Variable(numpy.prod, 'prod')
+ type_check.expect(
+ x_type.dtype == numpy.float32,
+ w_type.dtype == numpy.float32,
+ x_type.ndim >= 2,
+ w_type.ndim == 2,
+ prod(x_type.shape[1:]) == w_type.shape[1],
+ )
+ if in_types.size().eval() == 3:
+ b_type = in_types[2]
+ type_check.expect(
+ b_type.ndim == 1,
+ b_type.shape[0] == w_type.shape[0],
+ )
+
def forward(self, x):
W = x[1]
b = None
| {"golden_diff": "diff --git a/chainer/functions/nonparameterized_linear.py b/chainer/functions/nonparameterized_linear.py\n--- a/chainer/functions/nonparameterized_linear.py\n+++ b/chainer/functions/nonparameterized_linear.py\n@@ -1,6 +1,9 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n from chainer.functions import linear as linear_module\n+from chainer.utils import type_check\n \n \n class NonparameterizedLinear(function.Function):\n@@ -11,6 +14,29 @@\n \n \"\"\"\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(\n+ 2 <= in_types.size(),\n+ in_types.size() <= 3,\n+ )\n+ x_type = in_types[0]\n+ w_type = in_types[1]\n+\n+ prod = type_check.Variable(numpy.prod, 'prod')\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ w_type.dtype == numpy.float32,\n+ x_type.ndim >= 2,\n+ w_type.ndim == 2,\n+ prod(x_type.shape[1:]) == w_type.shape[1],\n+ )\n+ if in_types.size().eval() == 3:\n+ b_type = in_types[2]\n+ type_check.expect(\n+ b_type.ndim == 1,\n+ b_type.shape[0] == w_type.shape[0],\n+ )\n+\n def forward(self, x):\n W = x[1]\n b = None\n", "issue": "Add type check to NonparameterizedLinear function\nRelated to #123\n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.functions import linear as linear_module\n\n\nclass NonparameterizedLinear(function.Function):\n\n \"\"\"Nonparameterized linear class.\n\n .. seealso:: :class:`Linear`\n\n \"\"\"\n\n def forward(self, x):\n W = x[1]\n b = None\n if len(x) == 3:\n b = x[2]\n out_size, in_size = W.shape\n func = linear_module.Linear(\n in_size, out_size, initialW=W, initial_bias=b)\n self.func = func\n if any(isinstance(i, cuda.GPUArray) for i in x):\n func.to_gpu()\n return func.forward(x[:1])\n\n def backward(self, x, gy):\n func = self.func\n func.zero_grads()\n gx = func.backward(x[:1], gy)\n if func.gb is None:\n return (gx[0], func.gW)\n return (gx[0], func.gW, func.gb)\n\n\ndef linear(x, W, b=None, stride=1, pad=0, use_cudnn=True):\n \"\"\"Nonparameterized linear function.\n\n Args:\n x (~chainer.Variable): Input variable.\n W (~chainer.Variable): Weight variable.\n b (~chainer.Variable): Bias variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :class:`Linear`\n\n \"\"\"\n\n return NonparameterizedLinear()(x, W, b)\n", "path": "chainer/functions/nonparameterized_linear.py"}]} | 985 | 331 |
gh_patches_debug_63158 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2101 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Users should be able to edit expired 'careeropportunity' from Dashboard
## What kind of an issue is this?
- Feature request
## What is the expected behaviour?
You should be able to click to edit from the list of expired careeropportunities in the Dashboard.
## Other information
This was requested by one of our users on email.
</issue>
<code>
[start of apps/careeropportunity/dashboard/views.py]
1 # -*- encoding: utf-8 -*-
2 import logging
3
4 from django.contrib import messages
5 from django.contrib.auth.decorators import login_required
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import get_object_or_404, redirect, render
8 from django.utils import timezone
9 from guardian.decorators import permission_required
10
11 from apps.careeropportunity.forms import AddCareerOpportunityForm
12 from apps.careeropportunity.models import CareerOpportunity
13 from apps.dashboard.tools import get_base_context, has_access
14
15
16 @login_required
17 @permission_required('careeropportunity.view_careeropportunity', return_403=True)
18 def index(request):
19
20 if not has_access(request):
21 raise PermissionDenied
22
23 context = get_base_context(request)
24
25 # "cops" is short for "careeropportunities" which is a fucking long word
26 # "cop" is short for "careeropportunity" which also is a fucking long word
27 cops = CareerOpportunity.objects.all()
28 context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
29 context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
30
31 return render(request, 'careeropportunity/dashboard/index.html', context)
32
33
34 @login_required
35 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
36 def detail(request, opportunity_id=None):
37 logger = logging.getLogger(__name__)
38 logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))
39
40 if not has_access(request):
41 raise PermissionDenied
42
43 context = get_base_context(request)
44 cop = None
45 if opportunity_id:
46 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
47 context['cop'] = cop
48 context['form'] = AddCareerOpportunityForm(instance=cop)
49 else:
50 context['form'] = AddCareerOpportunityForm()
51
52 if request.method == 'POST':
53 if cop:
54 form = AddCareerOpportunityForm(data=request.POST, instance=cop)
55 else:
56 form = AddCareerOpportunityForm(data=request.POST)
57
58 if form.is_valid():
59 form.save()
60 messages.success(request, 'La til ny karrieremulighet')
61 return redirect(index)
62 else:
63 context['form'] = form
64 messages.error(request,
65 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for å se hva som gikk galt.')
66
67 return render(request, 'careeropportunity/dashboard/detail.html', context)
68
69
70 @login_required
71 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
72 def delete(request, opportunity_id=None):
73 logger = logging.getLogger(__name__)
74 logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))
75 if not has_access(request):
76 raise PermissionDenied
77
78 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
79 cop.delete()
80 messages.success(request, 'Slettet karrieremuligheten')
81 return redirect(index)
82
[end of apps/careeropportunity/dashboard/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py
--- a/apps/careeropportunity/dashboard/views.py
+++ b/apps/careeropportunity/dashboard/views.py
@@ -27,7 +27,7 @@
cops = CareerOpportunity.objects.all()
context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
-
+ context['all'] = cops
return render(request, 'careeropportunity/dashboard/index.html', context)
| {"golden_diff": "diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py\n--- a/apps/careeropportunity/dashboard/views.py\n+++ b/apps/careeropportunity/dashboard/views.py\n@@ -27,7 +27,7 @@\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n-\n+ context['all'] = cops\n return render(request, 'careeropportunity/dashboard/index.html', context)\n", "issue": "Users should be able to edit expired 'careeropportunity' from Dashboard\n## What kind of an issue is this?\r\n- Feature request\r\n\r\n## What is the expected behaviour?\r\n\r\nYou should be able to click to edit from the list of expired careeropportunities in the Dashboard.\r\n\r\n## Other information\r\n\r\nThis was requested by one of our users on email.\r\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}]} | 1,455 | 136 |
gh_patches_debug_12142 | rasdani/github-patches | git_diff | safe-global__safe-config-service-90 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use different namespace and endpoint name for `/safe-apps`
The endpoint `/api/v1/safe-apps` is currently under the `v1` namespace and `safe-apps` endpoint name.
To align it better with the future endpoints the following should be changed:
- the namespace changes from `v1` to `safe-apps`
- the endpoint name changes from `safe-apps` to `list`
This results in a reverse url resolution with `safe-apps:list` instead of `v1:safe-apps`
</issue>
<code>
[start of src/config/urls.py]
1 from django.contrib import admin
2 from django.http import HttpResponse
3 from django.urls import include, path, re_path
4 from drf_yasg.views import get_schema_view
5 from rest_framework import permissions
6
7 schema_view = get_schema_view(
8 validators=["flex", "ssv"],
9 public=True,
10 permission_classes=(permissions.AllowAny,),
11 )
12
13 urlpatterns = [
14 path("api/v1/", include("safe_apps.urls", namespace="v1")),
15 path("api/v1/", include("chains.urls", namespace="chains")),
16 path("admin/", admin.site.urls),
17 path("check/", lambda request: HttpResponse("Ok"), name="check"),
18 re_path(
19 r"^swagger(?P<format>\.json|\.yaml)$",
20 schema_view.without_ui(cache_timeout=0),
21 name="schema-json",
22 ),
23 re_path(
24 r"^$",
25 schema_view.with_ui("swagger", cache_timeout=0),
26 name="schema-swagger-ui",
27 ),
28 ]
29
[end of src/config/urls.py]
[start of src/safe_apps/urls.py]
1 from django.urls import path
2
3 from .views import SafeAppsListView
4
5 app_name = "apps"
6
7 urlpatterns = [
8 path("safe-apps/", SafeAppsListView.as_view(), name="safe-apps"),
9 ]
10
[end of src/safe_apps/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/config/urls.py b/src/config/urls.py
--- a/src/config/urls.py
+++ b/src/config/urls.py
@@ -11,7 +11,7 @@
)
urlpatterns = [
- path("api/v1/", include("safe_apps.urls", namespace="v1")),
+ path("api/v1/", include("safe_apps.urls", namespace="safe-apps")),
path("api/v1/", include("chains.urls", namespace="chains")),
path("admin/", admin.site.urls),
path("check/", lambda request: HttpResponse("Ok"), name="check"),
diff --git a/src/safe_apps/urls.py b/src/safe_apps/urls.py
--- a/src/safe_apps/urls.py
+++ b/src/safe_apps/urls.py
@@ -5,5 +5,5 @@
app_name = "apps"
urlpatterns = [
- path("safe-apps/", SafeAppsListView.as_view(), name="safe-apps"),
+ path("safe-apps/", SafeAppsListView.as_view(), name="list"),
]
| {"golden_diff": "diff --git a/src/config/urls.py b/src/config/urls.py\n--- a/src/config/urls.py\n+++ b/src/config/urls.py\n@@ -11,7 +11,7 @@\n )\n \n urlpatterns = [\n- path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n+ path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"safe-apps\")),\n path(\"api/v1/\", include(\"chains.urls\", namespace=\"chains\")),\n path(\"admin/\", admin.site.urls),\n path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\ndiff --git a/src/safe_apps/urls.py b/src/safe_apps/urls.py\n--- a/src/safe_apps/urls.py\n+++ b/src/safe_apps/urls.py\n@@ -5,5 +5,5 @@\n app_name = \"apps\"\n \n urlpatterns = [\n- path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"safe-apps\"),\n+ path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"list\"),\n ]\n", "issue": "Use different namespace and endpoint name for `/safe-apps`\nThe endpoint `/api/v1/safe-apps` is currently under the `v1` namespace and `safe-apps` endpoint name.\r\n\r\nTo align it better with the future endpoints the following should be changed:\r\n\r\n- the namespace changes from `v1` to `safe-apps`\r\n- the endpoint name changes from `safe-apps` to `list`\r\n\r\nThis results in a reverse url resolution with `safe-apps:list` instead of `v1:safe-apps`\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.urls import include, path, re_path\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\nschema_view = get_schema_view(\n validators=[\"flex\", \"ssv\"],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"api/v1/\", include(\"chains.urls\", namespace=\"chains\")),\n path(\"admin/\", admin.site.urls),\n path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n re_path(\n r\"^swagger(?P<format>\\.json|\\.yaml)$\",\n schema_view.without_ui(cache_timeout=0),\n name=\"schema-json\",\n ),\n re_path(\n r\"^$\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema-swagger-ui\",\n ),\n]\n", "path": "src/config/urls.py"}, {"content": "from django.urls import path\n\nfrom .views import SafeAppsListView\n\napp_name = \"apps\"\n\nurlpatterns = [\n path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"safe-apps\"),\n]\n", "path": "src/safe_apps/urls.py"}]} | 979 | 229 |
gh_patches_debug_41061 | rasdani/github-patches | git_diff | streamlink__streamlink-3019 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] BTV plugin needs updating
## Bug Report
- [x] This is a bug report and I have read the contribution guidelines.
### Description
The location of the BTV livestream has moved to https://btvplus.bg/live/
**Edit**: Livestreaming no longer requires a user to login, so that can be removed from the plugin info page.
### Expected / Actual behavior
Streamlink should be able to handle the link.
### Reproduction steps / Explicit stream URLs to test
1. streamlink https://btvplus.bg/live/ best
2. error: No plugin can handle URL: https://btvplus.bg/live/
</issue>
<code>
[start of src/streamlink/plugins/btv.py]
1 from __future__ import print_function
2 import re
3
4 from streamlink import PluginError
5 from streamlink.plugin import Plugin
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8 from streamlink.utils import parse_json
9 from streamlink.plugin import PluginArgument, PluginArguments
10
11
12 class BTV(Plugin):
13 arguments = PluginArguments(
14 PluginArgument(
15 "username",
16 metavar="USERNAME",
17 requires=["password"],
18 help="""
19 A BTV username required to access any stream.
20 """
21 ),
22 PluginArgument(
23 "password",
24 sensitive=True,
25 metavar="PASSWORD",
26 help="""
27 A BTV account password to use with --btv-username.
28 """
29 )
30 )
31 url_re = re.compile(r"https?://(?:www\.)?btv\.bg/live/?")
32
33 api_url = "http://www.btv.bg/lbin/global/player_config.php"
34 check_login_url = "http://www.btv.bg/lbin/userRegistration/check_user_login.php"
35 login_url = "https://www.btv.bg/bin/registration2/login.php?action=login&settings=0"
36
37 media_id_re = re.compile(r"media_id=(\d+)")
38 src_re = re.compile(r"src: \"(http.*?)\"")
39 api_schema = validate.Schema(
40 validate.all(
41 {"status": "ok", "config": validate.text},
42 validate.get("config"),
43 validate.all(
44 validate.transform(src_re.search),
45 validate.any(
46 None,
47 validate.get(1),
48 validate.url()
49 )
50 )
51 )
52 )
53
54 @classmethod
55 def can_handle_url(cls, url):
56 return cls.url_re.match(url) is not None
57
58 def login(self, username, password):
59 res = self.session.http.post(self.login_url, data={"username": username, "password": password})
60 if "success_logged_in" in res.text:
61 return True
62 else:
63 return False
64
65 def get_hls_url(self, media_id):
66 res = self.session.http.get(self.api_url, params=dict(media_id=media_id))
67 try:
68 return parse_json(res.text, schema=self.api_schema)
69 except PluginError:
70 return
71
72 def _get_streams(self):
73 if not self.options.get("username") or not self.options.get("password"):
74 self.logger.error("BTV requires registration, set the username and password"
75 " with --btv-username and --btv-password")
76 elif self.login(self.options.get("username"), self.options.get("password")):
77 res = self.session.http.get(self.url)
78 media_match = self.media_id_re.search(res.text)
79 media_id = media_match and media_match.group(1)
80 if media_id:
81 self.logger.debug("Found media id: {0}", media_id)
82 stream_url = self.get_hls_url(media_id)
83 if stream_url:
84 return HLSStream.parse_variant_playlist(self.session, stream_url)
85 else:
86 self.logger.error("Login failed, a valid username and password is required")
87
88
89 __plugin__ = BTV
90
[end of src/streamlink/plugins/btv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py
--- a/src/streamlink/plugins/btv.py
+++ b/src/streamlink/plugins/btv.py
@@ -1,38 +1,30 @@
-from __future__ import print_function
+import argparse
+import logging
import re
-from streamlink import PluginError
-from streamlink.plugin import Plugin
+from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import parse_json
-from streamlink.plugin import PluginArgument, PluginArguments
+
+log = logging.getLogger(__name__)
class BTV(Plugin):
arguments = PluginArguments(
PluginArgument(
"username",
- metavar="USERNAME",
- requires=["password"],
- help="""
- A BTV username required to access any stream.
- """
+ help=argparse.SUPPRESS
),
PluginArgument(
"password",
sensitive=True,
- metavar="PASSWORD",
- help="""
- A BTV account password to use with --btv-username.
- """
+ help=argparse.SUPPRESS
)
)
- url_re = re.compile(r"https?://(?:www\.)?btv\.bg/live/?")
- api_url = "http://www.btv.bg/lbin/global/player_config.php"
- check_login_url = "http://www.btv.bg/lbin/userRegistration/check_user_login.php"
- login_url = "https://www.btv.bg/bin/registration2/login.php?action=login&settings=0"
+ url_re = re.compile(r"https?://(?:www\.)?btvplus\.bg/live/?")
+ api_url = "https://btvplus.bg/lbin/v3/btvplus/player_config.php"
media_id_re = re.compile(r"media_id=(\d+)")
src_re = re.compile(r"src: \"(http.*?)\"")
@@ -55,35 +47,19 @@
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
- def login(self, username, password):
- res = self.session.http.post(self.login_url, data={"username": username, "password": password})
- if "success_logged_in" in res.text:
- return True
- else:
- return False
-
def get_hls_url(self, media_id):
res = self.session.http.get(self.api_url, params=dict(media_id=media_id))
- try:
- return parse_json(res.text, schema=self.api_schema)
- except PluginError:
- return
+ return parse_json(res.text, schema=self.api_schema)
def _get_streams(self):
- if not self.options.get("username") or not self.options.get("password"):
- self.logger.error("BTV requires registration, set the username and password"
- " with --btv-username and --btv-password")
- elif self.login(self.options.get("username"), self.options.get("password")):
- res = self.session.http.get(self.url)
- media_match = self.media_id_re.search(res.text)
- media_id = media_match and media_match.group(1)
- if media_id:
- self.logger.debug("Found media id: {0}", media_id)
- stream_url = self.get_hls_url(media_id)
- if stream_url:
- return HLSStream.parse_variant_playlist(self.session, stream_url)
- else:
- self.logger.error("Login failed, a valid username and password is required")
+ res = self.session.http.get(self.url)
+ media_match = self.media_id_re.search(res.text)
+ media_id = media_match and media_match.group(1)
+ if media_id:
+ log.debug("Found media id: {0}", media_id)
+ stream_url = self.get_hls_url(media_id)
+ if stream_url:
+ return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = BTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py\n--- a/src/streamlink/plugins/btv.py\n+++ b/src/streamlink/plugins/btv.py\n@@ -1,38 +1,30 @@\n-from __future__ import print_function\n+import argparse\n+import logging\n import re\n \n-from streamlink import PluginError\n-from streamlink.plugin import Plugin\n+from streamlink.plugin import Plugin, PluginArguments, PluginArgument\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n from streamlink.utils import parse_json\n-from streamlink.plugin import PluginArgument, PluginArguments\n+\n+log = logging.getLogger(__name__)\n \n \n class BTV(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"username\",\n- metavar=\"USERNAME\",\n- requires=[\"password\"],\n- help=\"\"\"\n- A BTV username required to access any stream.\n- \"\"\"\n+ help=argparse.SUPPRESS\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n- metavar=\"PASSWORD\",\n- help=\"\"\"\n- A BTV account password to use with --btv-username.\n- \"\"\"\n+ help=argparse.SUPPRESS\n )\n )\n- url_re = re.compile(r\"https?://(?:www\\.)?btv\\.bg/live/?\")\n \n- api_url = \"http://www.btv.bg/lbin/global/player_config.php\"\n- check_login_url = \"http://www.btv.bg/lbin/userRegistration/check_user_login.php\"\n- login_url = \"https://www.btv.bg/bin/registration2/login.php?action=login&settings=0\"\n+ url_re = re.compile(r\"https?://(?:www\\.)?btvplus\\.bg/live/?\")\n+ api_url = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n \n media_id_re = re.compile(r\"media_id=(\\d+)\")\n src_re = re.compile(r\"src: \\\"(http.*?)\\\"\")\n@@ -55,35 +47,19 @@\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n \n- def login(self, username, password):\n- res = self.session.http.post(self.login_url, data={\"username\": username, \"password\": password})\n- if \"success_logged_in\" in res.text:\n- return True\n- else:\n- return False\n-\n def get_hls_url(self, media_id):\n res = self.session.http.get(self.api_url, params=dict(media_id=media_id))\n- try:\n- return parse_json(res.text, schema=self.api_schema)\n- except PluginError:\n- return\n+ return parse_json(res.text, schema=self.api_schema)\n \n def _get_streams(self):\n- if not self.options.get(\"username\") or not self.options.get(\"password\"):\n- self.logger.error(\"BTV requires registration, set the username and password\"\n- \" with --btv-username and --btv-password\")\n- elif self.login(self.options.get(\"username\"), self.options.get(\"password\")):\n- res = self.session.http.get(self.url)\n- media_match = self.media_id_re.search(res.text)\n- media_id = media_match and media_match.group(1)\n- if media_id:\n- self.logger.debug(\"Found media id: {0}\", media_id)\n- stream_url = self.get_hls_url(media_id)\n- if stream_url:\n- return HLSStream.parse_variant_playlist(self.session, stream_url)\n- else:\n- self.logger.error(\"Login failed, a valid username and password is required\")\n+ res = self.session.http.get(self.url)\n+ media_match = self.media_id_re.search(res.text)\n+ media_id = media_match and media_match.group(1)\n+ if media_id:\n+ log.debug(\"Found media id: {0}\", media_id)\n+ stream_url = self.get_hls_url(media_id)\n+ if stream_url:\n+ return HLSStream.parse_variant_playlist(self.session, stream_url)\n \n \n __plugin__ = BTV\n", "issue": "[bug] BTV plugin needs updating\n## Bug Report\r\n- [x] This is a bug report and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\nThe location of the BTV livestream has moved to https://btvplus.bg/live/\r\n**Edit**: Livestreaming no longer requires a user to login, so that can be removed from the plugin info page.\r\n\r\n\r\n### Expected / Actual behavior\r\nStreamlink should be able to handle the link.\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n1. streamlink https://btvplus.bg/live/ best \r\n2. error: No plugin can handle URL: https://btvplus.bg/live/\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink import PluginError\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json\nfrom streamlink.plugin import PluginArgument, PluginArguments\n\n\nclass BTV(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"username\",\n metavar=\"USERNAME\",\n requires=[\"password\"],\n help=\"\"\"\n A BTV username required to access any stream.\n \"\"\"\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"\"\"\n A BTV account password to use with --btv-username.\n \"\"\"\n )\n )\n url_re = re.compile(r\"https?://(?:www\\.)?btv\\.bg/live/?\")\n\n api_url = \"http://www.btv.bg/lbin/global/player_config.php\"\n check_login_url = \"http://www.btv.bg/lbin/userRegistration/check_user_login.php\"\n login_url = \"https://www.btv.bg/bin/registration2/login.php?action=login&settings=0\"\n\n media_id_re = re.compile(r\"media_id=(\\d+)\")\n src_re = re.compile(r\"src: \\\"(http.*?)\\\"\")\n api_schema = validate.Schema(\n validate.all(\n {\"status\": \"ok\", \"config\": validate.text},\n validate.get(\"config\"),\n validate.all(\n validate.transform(src_re.search),\n validate.any(\n None,\n validate.get(1),\n validate.url()\n )\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def login(self, username, password):\n res = self.session.http.post(self.login_url, data={\"username\": username, \"password\": password})\n if \"success_logged_in\" in res.text:\n return True\n else:\n return False\n\n def get_hls_url(self, media_id):\n res = self.session.http.get(self.api_url, params=dict(media_id=media_id))\n try:\n return parse_json(res.text, schema=self.api_schema)\n except PluginError:\n return\n\n def _get_streams(self):\n if not self.options.get(\"username\") or not self.options.get(\"password\"):\n self.logger.error(\"BTV requires registration, set the username and password\"\n \" with --btv-username and --btv-password\")\n elif self.login(self.options.get(\"username\"), self.options.get(\"password\")):\n res = self.session.http.get(self.url)\n media_match = self.media_id_re.search(res.text)\n media_id = media_match and media_match.group(1)\n if media_id:\n self.logger.debug(\"Found media id: {0}\", media_id)\n stream_url = self.get_hls_url(media_id)\n if stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n else:\n self.logger.error(\"Login failed, a valid username and password is required\")\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}]} | 1,510 | 891 |
gh_patches_debug_17070 | rasdani/github-patches | git_diff | xonsh__xonsh-341 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
xonsh dies if the prompt raises an exception
If a function in the prompt raises an exception, it kills xonsh. I would expect the error to be displayed, but not kill the shell.
</issue>
<code>
[start of xonsh/base_shell.py]
1 """The base class for xonsh shell"""
2 import os
3 import sys
4 import builtins
5 import traceback
6
7 from xonsh.execer import Execer
8 from xonsh.tools import XonshError, escape_windows_title_string
9 from xonsh.tools import ON_WINDOWS
10 from xonsh.completer import Completer
11 from xonsh.environ import multiline_prompt, format_prompt
12
13
14 class BaseShell(object):
15 """The xonsh shell."""
16
17 def __init__(self, execer, ctx, **kwargs):
18 super().__init__(**kwargs)
19 self.execer = execer
20 self.ctx = ctx
21 self.completer = Completer()
22 self.buffer = []
23 self.need_more_lines = False
24 self.mlprompt = None
25
26 def emptyline(self):
27 """Called when an empty line has been entered."""
28 self.need_more_lines = False
29 self.default('')
30
31 def precmd(self, line):
32 """Called just before execution of line."""
33 return line if self.need_more_lines else line.lstrip()
34
35 def default(self, line):
36 """Implements code execution."""
37 line = line if line.endswith('\n') else line + '\n'
38 code = self.push(line)
39 if code is None:
40 return
41 try:
42 self.execer.exec(code, mode='single', glbs=self.ctx) # no locals
43 except XonshError as e:
44 print(e.args[0], file=sys.stderr)
45 except:
46 _print_exception()
47 if builtins.__xonsh_exit__:
48 return True
49
50 def push(self, line):
51 """Pushes a line onto the buffer and compiles the code in a way that
52 enables multiline input.
53 """
54 code = None
55 self.buffer.append(line)
56 if self.need_more_lines:
57 return code
58 src = ''.join(self.buffer)
59 try:
60 code = self.execer.compile(src,
61 mode='single',
62 glbs=None,
63 locs=self.ctx)
64 self.reset_buffer()
65 except SyntaxError:
66 if line == '\n':
67 self.reset_buffer()
68 _print_exception()
69 return None
70 self.need_more_lines = True
71 except:
72 self.reset_buffer()
73 _print_exception()
74 return None
75 return code
76
77 def reset_buffer(self):
78 """Resets the line buffer."""
79 self.buffer.clear()
80 self.need_more_lines = False
81 self.mlprompt = None
82
83 def settitle(self):
84 """Sets terminal title."""
85 env = builtins.__xonsh_env__
86 term = env.get('TERM', None)
87 if term is None or term == 'linux':
88 return
89 if 'TITLE' in env:
90 t = env['TITLE']
91 else:
92 return
93 t = format_prompt(t)
94 if ON_WINDOWS and 'ANSICON' not in env:
95 t = escape_windows_title_string(t)
96 os.system('title {}'.format(t))
97 else:
98 sys.stdout.write("\x1b]2;{0}\x07".format(t))
99
100 @property
101 def prompt(self):
102 """Obtains the current prompt string."""
103 if self.need_more_lines:
104 if self.mlprompt is None:
105 self.mlprompt = multiline_prompt()
106 return self.mlprompt
107 env = builtins.__xonsh_env__
108 if 'PROMPT' in env:
109 p = env['PROMPT']
110 p = format_prompt(p)
111 else:
112 p = "set '$PROMPT = ...' $ "
113 self.settitle()
114 return p
115
116 def _print_exception():
117 """Print exceptions with/without traceback."""
118 if not 'XONSH_SHOW_TRACEBACK' in builtins.__xonsh_env__:
119 sys.stderr.write('xonsh: For full traceback set: '
120 '$XONSH_SHOW_TRACEBACK=True\n')
121 if builtins.__xonsh_env__.get('XONSH_SHOW_TRACEBACK', False):
122 traceback.print_exc()
123 else:
124 exc_type, exc_value, exc_traceback = sys.exc_info()
125 exception_only = traceback.format_exception_only(exc_type, exc_value)
126 sys.stderr.write(''.join(exception_only))
127
[end of xonsh/base_shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py
--- a/xonsh/base_shell.py
+++ b/xonsh/base_shell.py
@@ -102,12 +102,19 @@
"""Obtains the current prompt string."""
if self.need_more_lines:
if self.mlprompt is None:
- self.mlprompt = multiline_prompt()
+ try:
+ self.mlprompt = multiline_prompt()
+ except Exception:
+ _print_exception()
+ self.mlprompt = '<multiline prompt error> '
return self.mlprompt
env = builtins.__xonsh_env__
if 'PROMPT' in env:
p = env['PROMPT']
- p = format_prompt(p)
+ try:
+ p = format_prompt(p)
+ except Exception:
+ _print_exception()
else:
p = "set '$PROMPT = ...' $ "
self.settitle()
| {"golden_diff": "diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py\n--- a/xonsh/base_shell.py\n+++ b/xonsh/base_shell.py\n@@ -102,12 +102,19 @@\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n- self.mlprompt = multiline_prompt()\n+ try:\n+ self.mlprompt = multiline_prompt()\n+ except Exception:\n+ _print_exception()\n+ self.mlprompt = '<multiline prompt error> '\n return self.mlprompt\n env = builtins.__xonsh_env__\n if 'PROMPT' in env:\n p = env['PROMPT']\n- p = format_prompt(p)\n+ try:\n+ p = format_prompt(p)\n+ except Exception:\n+ _print_exception()\n else:\n p = \"set '$PROMPT = ...' $ \"\n self.settitle()\n", "issue": "xonsh dies if the prompt raises an exception\nIf a function in the prompt raises an exception, it kills xonsh. I would expect the error to be displayed, but not kill the shell. \n\n", "before_files": [{"content": "\"\"\"The base class for xonsh shell\"\"\"\nimport os\nimport sys\nimport builtins\nimport traceback\n\nfrom xonsh.execer import Execer\nfrom xonsh.tools import XonshError, escape_windows_title_string\nfrom xonsh.tools import ON_WINDOWS\nfrom xonsh.completer import Completer\nfrom xonsh.environ import multiline_prompt, format_prompt\n\n\nclass BaseShell(object):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, execer, ctx, **kwargs):\n super().__init__(**kwargs)\n self.execer = execer\n self.ctx = ctx\n self.completer = Completer()\n self.buffer = []\n self.need_more_lines = False\n self.mlprompt = None\n\n def emptyline(self):\n \"\"\"Called when an empty line has been entered.\"\"\"\n self.need_more_lines = False\n self.default('')\n\n def precmd(self, line):\n \"\"\"Called just before execution of line.\"\"\"\n return line if self.need_more_lines else line.lstrip()\n\n def default(self, line):\n \"\"\"Implements code execution.\"\"\"\n line = line if line.endswith('\\n') else line + '\\n'\n code = self.push(line)\n if code is None:\n return\n try:\n self.execer.exec(code, mode='single', glbs=self.ctx) # no locals\n except XonshError as e:\n print(e.args[0], file=sys.stderr)\n except:\n _print_exception()\n if builtins.__xonsh_exit__:\n return True\n\n def push(self, line):\n \"\"\"Pushes a line onto the buffer and compiles the code in a way that\n enables multiline input.\n \"\"\"\n code = None\n self.buffer.append(line)\n if self.need_more_lines:\n return code\n src = ''.join(self.buffer)\n try:\n code = self.execer.compile(src,\n mode='single',\n glbs=None,\n locs=self.ctx)\n self.reset_buffer()\n except SyntaxError:\n if line == '\\n':\n self.reset_buffer()\n _print_exception()\n return None\n self.need_more_lines = True\n except:\n self.reset_buffer()\n _print_exception()\n return None\n return code\n\n def reset_buffer(self):\n \"\"\"Resets the line buffer.\"\"\"\n self.buffer.clear()\n self.need_more_lines = False\n self.mlprompt = None\n\n def settitle(self):\n \"\"\"Sets terminal title.\"\"\"\n env = builtins.__xonsh_env__\n term = env.get('TERM', None)\n if term is None or term == 'linux':\n return\n if 'TITLE' in env:\n t = env['TITLE']\n else:\n return\n t = format_prompt(t)\n if ON_WINDOWS and 'ANSICON' not in env:\n t = escape_windows_title_string(t)\n os.system('title {}'.format(t))\n else:\n sys.stdout.write(\"\\x1b]2;{0}\\x07\".format(t))\n\n @property\n def prompt(self):\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n self.mlprompt = multiline_prompt()\n return self.mlprompt\n env = builtins.__xonsh_env__\n if 'PROMPT' in env:\n p = env['PROMPT']\n p = format_prompt(p)\n else:\n p = \"set '$PROMPT = ...' $ \"\n self.settitle()\n return p\n \ndef _print_exception():\n \"\"\"Print exceptions with/without traceback.\"\"\"\n if not 'XONSH_SHOW_TRACEBACK' in builtins.__xonsh_env__:\n sys.stderr.write('xonsh: For full traceback set: '\n '$XONSH_SHOW_TRACEBACK=True\\n')\n if builtins.__xonsh_env__.get('XONSH_SHOW_TRACEBACK', False):\n traceback.print_exc()\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n exception_only = traceback.format_exception_only(exc_type, exc_value)\n sys.stderr.write(''.join(exception_only))\n", "path": "xonsh/base_shell.py"}]} | 1,759 | 218 |
gh_patches_debug_36944 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-903 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Password reset after docker container restarted
*Copy from old repository*: https://github.com/jonaswinkler/paperless-ng/issues/1511
**Describe the bug**
I deployed Paperless-NG in TrueNAS via the TrueCharts integration. TrueCharts uses the official docker container and passes environment variables to configure the superuser.
I changed the admin password in the Django admin interface. However, after redeploying the application (for example due to an update) the password gets overridden by the initial password passed via environment variable.
**To Reproduce**
Steps to reproduce the behavior:
1. Deploy Paperless with credentials admin//secret
2. Open Paperless
3. Navigate to admin interface
4. Change password to "mysupersecretpassword"
5. Restart/update the docker container
6. Navigate to Paperless and try to login with admin/mysupersecretpassword
7. You can't login.
**Expected behavior**
The admin password should not be overridden by the initial password.
**Relevant information**
- Version
- Installation method: **docker**
- Any configuration changes you made in `docker-compose.yml`, `docker-compose.env` or `paperless.conf`. -
I think this is related to the admin user password reset when the docker container is started:
docker-entrypoint.sh calls docker-prepare.sh calls the manage_superuser mgmt command and there the password is updated:
https://github.com/jonaswinkler/paperless-ng/blob/master/src/documents/management/commands/manage_superuser.py#L29
Am I missing something?
</issue>
<code>
[start of src/documents/management/commands/manage_superuser.py]
1 import logging
2 import os
3
4 from django.contrib.auth.models import User
5 from django.core.management.base import BaseCommand
6
7
8 logger = logging.getLogger("paperless.management.superuser")
9
10
11 class Command(BaseCommand):
12
13 help = """
14 Creates a Django superuser based on env variables.
15 """.replace(
16 " ",
17 "",
18 )
19
20 def handle(self, *args, **options):
21
22 username = os.getenv("PAPERLESS_ADMIN_USER")
23 if not username:
24 return
25
26 mail = os.getenv("PAPERLESS_ADMIN_MAIL", "root@localhost")
27 password = os.getenv("PAPERLESS_ADMIN_PASSWORD")
28
29 # Check if user exists already, leave as is if it does
30 if User.objects.filter(username=username).exists():
31 user: User = User.objects.get_by_natural_key(username)
32 user.set_password(password)
33 user.save()
34 self.stdout.write(f"Changed password of user {username}.")
35 elif password:
36 # Create superuser based on env variables
37 User.objects.create_superuser(username, mail, password)
38 self.stdout.write(f'Created superuser "{username}" with provided password.')
39 else:
40 self.stdout.write(f'Did not create superuser "{username}".')
41 self.stdout.write(
42 'Make sure you specified "PAPERLESS_ADMIN_PASSWORD" in your '
43 '"docker-compose.env" file.',
44 )
45
[end of src/documents/management/commands/manage_superuser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/documents/management/commands/manage_superuser.py b/src/documents/management/commands/manage_superuser.py
--- a/src/documents/management/commands/manage_superuser.py
+++ b/src/documents/management/commands/manage_superuser.py
@@ -11,7 +11,14 @@
class Command(BaseCommand):
help = """
- Creates a Django superuser based on env variables.
+ Creates a Django superuser:
+ User named: admin
+ Email: root@localhost
+ with password based on env variable.
+ No superuser will be created, when:
+ - The username is taken already exists
+ - A superuser already exists
+ - PAPERLESS_ADMIN_PASSWORD is not set
""".replace(
" ",
"",
@@ -19,26 +26,41 @@
def handle(self, *args, **options):
- username = os.getenv("PAPERLESS_ADMIN_USER")
- if not username:
- return
-
+ username = os.getenv("PAPERLESS_ADMIN_USER", "admin")
mail = os.getenv("PAPERLESS_ADMIN_MAIL", "root@localhost")
password = os.getenv("PAPERLESS_ADMIN_PASSWORD")
- # Check if user exists already, leave as is if it does
+ # Check if there's already a user called admin
if User.objects.filter(username=username).exists():
- user: User = User.objects.get_by_natural_key(username)
- user.set_password(password)
- user.save()
- self.stdout.write(f"Changed password of user {username}.")
- elif password:
- # Create superuser based on env variables
- User.objects.create_superuser(username, mail, password)
- self.stdout.write(f'Created superuser "{username}" with provided password.')
+ self.stdout.write(
+ self.style.NOTICE(
+ f"Did not create superuser, a user {username} already exists",
+ ),
+ )
+ return
+
+ # Check if any superuseruser
+ # exists already, leave as is if it does
+ if User.objects.filter(is_superuser=True).count() > 0:
+ self.stdout.write(
+ self.style.NOTICE(
+ "Did not create superuser, the DB already contains superusers",
+ ),
+ )
+ return
+
+ if password is None:
+ self.stdout.write(
+ self.style.ERROR(
+ "Please check if PAPERLESS_ADMIN_PASSWORD has been"
+ " set in the environment",
+ ),
+ )
else:
- self.stdout.write(f'Did not create superuser "{username}".')
+ # Create superuser with password based on env variable
+ User.objects.create_superuser(username, mail, password)
self.stdout.write(
- 'Make sure you specified "PAPERLESS_ADMIN_PASSWORD" in your '
- '"docker-compose.env" file.',
+ self.style.SUCCESS(
+ f'Created superuser "{username}" with provided password.',
+ ),
)
| {"golden_diff": "diff --git a/src/documents/management/commands/manage_superuser.py b/src/documents/management/commands/manage_superuser.py\n--- a/src/documents/management/commands/manage_superuser.py\n+++ b/src/documents/management/commands/manage_superuser.py\n@@ -11,7 +11,14 @@\n class Command(BaseCommand):\n \n help = \"\"\"\n- Creates a Django superuser based on env variables.\n+ Creates a Django superuser:\n+ User named: admin\n+ Email: root@localhost\n+ with password based on env variable.\n+ No superuser will be created, when:\n+ - The username is taken already exists\n+ - A superuser already exists\n+ - PAPERLESS_ADMIN_PASSWORD is not set\n \"\"\".replace(\n \" \",\n \"\",\n@@ -19,26 +26,41 @@\n \n def handle(self, *args, **options):\n \n- username = os.getenv(\"PAPERLESS_ADMIN_USER\")\n- if not username:\n- return\n-\n+ username = os.getenv(\"PAPERLESS_ADMIN_USER\", \"admin\")\n mail = os.getenv(\"PAPERLESS_ADMIN_MAIL\", \"root@localhost\")\n password = os.getenv(\"PAPERLESS_ADMIN_PASSWORD\")\n \n- # Check if user exists already, leave as is if it does\n+ # Check if there's already a user called admin\n if User.objects.filter(username=username).exists():\n- user: User = User.objects.get_by_natural_key(username)\n- user.set_password(password)\n- user.save()\n- self.stdout.write(f\"Changed password of user {username}.\")\n- elif password:\n- # Create superuser based on env variables\n- User.objects.create_superuser(username, mail, password)\n- self.stdout.write(f'Created superuser \"{username}\" with provided password.')\n+ self.stdout.write(\n+ self.style.NOTICE(\n+ f\"Did not create superuser, a user {username} already exists\",\n+ ),\n+ )\n+ return\n+\n+ # Check if any superuseruser\n+ # exists already, leave as is if it does\n+ if User.objects.filter(is_superuser=True).count() > 0:\n+ self.stdout.write(\n+ self.style.NOTICE(\n+ \"Did not create superuser, the DB already contains superusers\",\n+ ),\n+ )\n+ return\n+\n+ if password is None:\n+ self.stdout.write(\n+ self.style.ERROR(\n+ \"Please check if PAPERLESS_ADMIN_PASSWORD has been\"\n+ \" set in the environment\",\n+ ),\n+ )\n else:\n- self.stdout.write(f'Did not create superuser \"{username}\".')\n+ # Create superuser with password based on env variable\n+ User.objects.create_superuser(username, mail, password)\n self.stdout.write(\n- 'Make sure you specified \"PAPERLESS_ADMIN_PASSWORD\" in your '\n- '\"docker-compose.env\" file.',\n+ self.style.SUCCESS(\n+ f'Created superuser \"{username}\" with provided password.',\n+ ),\n )\n", "issue": "[BUG] Password reset after docker container restarted\n*Copy from old repository*: https://github.com/jonaswinkler/paperless-ng/issues/1511\r\n\r\n**Describe the bug**\r\nI deployed Paperless-NG in TrueNAS via the TrueCharts integration. TrueCharts uses the official docker container and passes environment variables to configure the superuser.\r\n\r\nI changed the admin password in the Django admin interface. However, after redeploying the application (for example due to an update) the password gets overridden by the initial password passed via environment variable.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Deploy Paperless with credentials admin//secret\r\n2. Open Paperless\r\n3. Navigate to admin interface\r\n4. Change password to \"mysupersecretpassword\"\r\n5. Restart/update the docker container\r\n6. Navigate to Paperless and try to login with admin/mysupersecretpassword\r\n7. You can't login.\r\n\r\n**Expected behavior**\r\nThe admin password should not be overridden by the initial password.\r\n\r\n**Relevant information**\r\n - Version \r\n - Installation method: **docker**\r\n - Any configuration changes you made in `docker-compose.yml`, `docker-compose.env` or `paperless.conf`. -\r\n\r\nI think this is related to the admin user password reset when the docker container is started:\r\ndocker-entrypoint.sh calls docker-prepare.sh calls the manage_superuser mgmt command and there the password is updated:\r\nhttps://github.com/jonaswinkler/paperless-ng/blob/master/src/documents/management/commands/manage_superuser.py#L29\r\n\r\nAm I missing something?\n", "before_files": [{"content": "import logging\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\n\nlogger = logging.getLogger(\"paperless.management.superuser\")\n\n\nclass Command(BaseCommand):\n\n help = \"\"\"\n Creates a Django superuser based on env variables.\n \"\"\".replace(\n \" \",\n \"\",\n )\n\n def handle(self, *args, **options):\n\n username = os.getenv(\"PAPERLESS_ADMIN_USER\")\n if not username:\n return\n\n mail = os.getenv(\"PAPERLESS_ADMIN_MAIL\", \"root@localhost\")\n password = os.getenv(\"PAPERLESS_ADMIN_PASSWORD\")\n\n # Check if user exists already, leave as is if it does\n if User.objects.filter(username=username).exists():\n user: User = User.objects.get_by_natural_key(username)\n user.set_password(password)\n user.save()\n self.stdout.write(f\"Changed password of user {username}.\")\n elif password:\n # Create superuser based on env variables\n User.objects.create_superuser(username, mail, password)\n self.stdout.write(f'Created superuser \"{username}\" with provided password.')\n else:\n self.stdout.write(f'Did not create superuser \"{username}\".')\n self.stdout.write(\n 'Make sure you specified \"PAPERLESS_ADMIN_PASSWORD\" in your '\n '\"docker-compose.env\" file.',\n )\n", "path": "src/documents/management/commands/manage_superuser.py"}]} | 1,241 | 659 |
gh_patches_debug_9140 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
notation typo in Cosine Similarity docs
## 📚 Documentation
There is a typo in the notation for the [pairwise_cosine_similarity](https://torchmetrics.readthedocs.io/en/stable/pairwise/cosine_similarity.html)

</issue>
<code>
[start of src/torchmetrics/functional/pairwise/cosine.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
21 from torchmetrics.utilities.compute import _safe_matmul
22
23
24 def _pairwise_cosine_similarity_update(
25 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
26 ) -> Tensor:
27 """Calculates the pairwise cosine similarity matrix.
28
29 Args:
30 x: tensor of shape ``[N,d]``
31 y: tensor of shape ``[M,d]``
32 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
33 """
34 x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
35
36 norm = torch.norm(x, p=2, dim=1)
37 x /= norm.unsqueeze(1)
38 norm = torch.norm(y, p=2, dim=1)
39 y /= norm.unsqueeze(1)
40
41 distance = _safe_matmul(x, y)
42 if zero_diagonal:
43 distance.fill_diagonal_(0)
44 return distance
45
46
47 def pairwise_cosine_similarity(
48 x: Tensor,
49 y: Optional[Tensor] = None,
50 reduction: Literal["mean", "sum", "none", None] = None,
51 zero_diagonal: Optional[bool] = None,
52 ) -> Tensor:
53 r"""Calculates pairwise cosine similarity:
54
55 .. math::
56 s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
57 = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}}
58
59 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
60 between the rows of :math:`x` and :math:`y`.
61 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
62
63 Args:
64 x: Tensor with shape ``[N, d]``
65 y: Tensor with shape ``[M, d]``, optional
66 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
67 (applied along column dimension) or `'none'`, `None` for no reduction
68 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given
69 this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``
70
71 Returns:
72 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
73
74 Example:
75 >>> import torch
76 >>> from torchmetrics.functional import pairwise_cosine_similarity
77 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
78 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
79 >>> pairwise_cosine_similarity(x, y)
80 tensor([[0.5547, 0.8682],
81 [0.5145, 0.8437],
82 [0.5300, 0.8533]])
83 >>> pairwise_cosine_similarity(x)
84 tensor([[0.0000, 0.9989, 0.9996],
85 [0.9989, 0.0000, 0.9998],
86 [0.9996, 0.9998, 0.0000]])
87
88 """
89 distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)
90 return _reduce_distance_matrix(distance, reduction)
91
[end of src/torchmetrics/functional/pairwise/cosine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py
--- a/src/torchmetrics/functional/pairwise/cosine.py
+++ b/src/torchmetrics/functional/pairwise/cosine.py
@@ -54,7 +54,7 @@
.. math::
s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
- = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}}
+ = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D y_i^2}}
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
between the rows of :math:`x` and :math:`y`.
| {"golden_diff": "diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py\n--- a/src/torchmetrics/functional/pairwise/cosine.py\n+++ b/src/torchmetrics/functional/pairwise/cosine.py\n@@ -54,7 +54,7 @@\n \n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n- = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n+ = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D y_i^2}}\n \n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n", "issue": "notation typo in Cosine Similarity docs \n## \ud83d\udcda Documentation\r\n\r\nThere is a typo in the notation for the [pairwise_cosine_similarity](https://torchmetrics.readthedocs.io/en/stable/pairwise/cosine_similarity.html)\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\nfrom torchmetrics.utilities.compute import _safe_matmul\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/cosine.py"}]} | 1,849 | 260 |
gh_patches_debug_27323 | rasdani/github-patches | git_diff | mindsdb__lightwood-168 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Construct comperhensive test suite to evaluate predictions with missing column
We should have a test suite to evaluate prediction accuracy with missing column.
This should take the form of:
Given `M` columns and a Lightwood model trained with them to predict `y`, the accuracy for `y` when predicting with `M` columns (where `M` is a subset of `N`), should be about equal to or greater than that of a Gradient Boosting Regressor or Classifier trained with just the columns `M` to predict `y`.
The reason we are using a Gradient Booster to determine the benchmark accuracy is that it's safe to assume they are fairly generic (i.e. should get about the same accuracy as a well trained neural network) and fast&easy to train.
We can do this testing in two phases:
First, we can add this as a check to the generate-data tests in lightwood, which should be fairly easy.
Second, we can add these tests to mindsdb_examples, the helpers that are already present in there can help.
I'll be handling this but @torrmal feel free to review the methodology
</issue>
<code>
[start of docs/examples/learn_to_classify.py]
1 import lightwood
2 import random
3 import pandas as pd
4 import numpy as np
5 from collections import Counter
6
7
8 random.seed(66)
9 n = 100
10 m = 500
11 train = True
12 nr_inputs = 10
13
14 #options = ['a','b','c','d','e','f','g','h','n','m']
15 options = ['a','b','c']
16
17 data_train = {}
18 data_test = {}
19
20 for data, nr_ele in [(data_train,n), (data_test,m)]:
21 for i in range(nr_inputs):
22 data[f'x_{i}'] = [random.choice(options) for _ in range(nr_ele)]
23
24 data['y'] = [Counter([data[f'x_{i}'][n] for i in range(nr_inputs)]).most_common(1)[0][0] for n in range(nr_ele)]
25
26 data_train = pd.DataFrame(data_train)
27 data_test = pd.DataFrame(data_test)
28
29 def iter_function(epoch, training_error, test_error, test_error_gradient, test_accuracy):
30 print(f'Epoch: {epoch}, Train Error: {training_error}, Test Error: {test_error}, Test Error Gradient: {test_error_gradient}, Test Accuracy: {test_accuracy}')
31
32 if train:
33 predictor = lightwood.Predictor(output=['y'])
34 predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)
35 predictor.save('/tmp/ltcrl.pkl')
36
37 predictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')
38 print('Train accuracy: ', predictor.train_accuracy['y']['value'])
39 print('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])
40
41 predictions = predictor.predict(when_data=data_test)
42 print(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))
43
44 for i_drop in range(nr_inputs):
45 predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))
46 print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])
47 print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))
48
[end of docs/examples/learn_to_classify.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/learn_to_classify.py b/docs/examples/learn_to_classify.py
--- a/docs/examples/learn_to_classify.py
+++ b/docs/examples/learn_to_classify.py
@@ -34,14 +34,18 @@
predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)
predictor.save('/tmp/ltcrl.pkl')
+
predictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')
print('Train accuracy: ', predictor.train_accuracy['y']['value'])
print('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])
-predictions = predictor.predict(when_data=data_test)
+print(f'Accuracy for all columns present: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])
+
+predictions = predictor.calculate_accuracy(from_data=data_test)
print(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))
for i_drop in range(nr_inputs):
- predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))
print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])
+
+ predictions = predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))
print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))
| {"golden_diff": "diff --git a/docs/examples/learn_to_classify.py b/docs/examples/learn_to_classify.py\n--- a/docs/examples/learn_to_classify.py\n+++ b/docs/examples/learn_to_classify.py\n@@ -34,14 +34,18 @@\n predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)\n predictor.save('/tmp/ltcrl.pkl')\n \n+\n predictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')\n print('Train accuracy: ', predictor.train_accuracy['y']['value'])\n print('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n \n-predictions = predictor.predict(when_data=data_test)\n+print(f'Accuracy for all columns present: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n+\n+predictions = predictor.calculate_accuracy(from_data=data_test)\n print(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))\n \n for i_drop in range(nr_inputs):\n- predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])\n+\n+ predictions = predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))\n", "issue": "Construct comperhensive test suite to evaluate predictions with missing column\nWe should have a test suite to evaluate prediction accuracy with missing column.\r\n\r\nThis should take the form of:\r\n\r\nGiven `M` columns and a Lightwood model trained with them to predict `y`, the accuracy for `y` when predicting with `M` columns (where `M` is a subset of `N`), should be about equal to or greater than that of a Gradient Boosting Regressor or Classifier trained with just the columns `M` to predict `y`.\r\n\r\nThe reason we are using a Gradient Booster to determine the benchmark accuracy is that it's safe to assume they are fairly generic (i.e. should get about the same accuracy as a well trained neural network) and fast&easy to train.\r\n\r\nWe can do this testing in two phases:\r\n\r\nFirst, we can add this as a check to the generate-data tests in lightwood, which should be fairly easy.\r\n\r\nSecond, we can add these tests to mindsdb_examples, the helpers that are already present in there can help.\r\n\r\nI'll be handling this but @torrmal feel free to review the methodology\n", "before_files": [{"content": "import lightwood\nimport random\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\n\n\nrandom.seed(66)\nn = 100\nm = 500\ntrain = True\nnr_inputs = 10\n\n#options = ['a','b','c','d','e','f','g','h','n','m']\noptions = ['a','b','c']\n\ndata_train = {}\ndata_test = {}\n\nfor data, nr_ele in [(data_train,n), (data_test,m)]:\n for i in range(nr_inputs):\n data[f'x_{i}'] = [random.choice(options) for _ in range(nr_ele)]\n\n data['y'] = [Counter([data[f'x_{i}'][n] for i in range(nr_inputs)]).most_common(1)[0][0] for n in range(nr_ele)]\n\ndata_train = pd.DataFrame(data_train)\ndata_test = pd.DataFrame(data_test)\n\ndef iter_function(epoch, training_error, test_error, test_error_gradient, test_accuracy):\n print(f'Epoch: {epoch}, Train Error: {training_error}, Test Error: {test_error}, Test Error Gradient: {test_error_gradient}, Test Accuracy: {test_accuracy}')\n\nif train:\n predictor = lightwood.Predictor(output=['y'])\n predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)\n predictor.save('/tmp/ltcrl.pkl')\n\npredictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')\nprint('Train accuracy: ', predictor.train_accuracy['y']['value'])\nprint('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n\npredictions = predictor.predict(when_data=data_test)\nprint(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))\n\nfor i_drop in range(nr_inputs):\n predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])\n print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))\n", "path": "docs/examples/learn_to_classify.py"}]} | 1,356 | 334 |
gh_patches_debug_35750 | rasdani/github-patches | git_diff | chainer__chainer-1663 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test N-dimensional convolution link for dtypes of FP16 and FP64
Follows #1279 and #1556.
Since #1295 is now merged to master, we can add test for dtypes of FP16 and FP64 to N-dimensional convolution **LINK**.
</issue>
<code>
[start of chainer/links/connection/convolution_nd.py]
1 from chainer.functions.connection import convolution_nd
2 from chainer import initializers
3 from chainer import link
4 from chainer.utils import conv_nd
5
6
7 class ConvolutionND(link.Link):
8 """N-dimensional convolution layer.
9
10 This link wraps the :func:`~chainer.functions.convolution_nd` function and
11 holds the filter weight and bias vector as parameters.
12
13 Args:
14 ndim (int): Number of spatial dimensions.
15 in_channels (int): Number of channels of input arrays.
16 out_channels (int): Number of channels of output arrays.
17 ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
18 ``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
19 stride (int or tuple of ints): Stride of filter application.
20 ``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
21 pad (int or tuple of ints): Spatial padding width for input arrays.
22 ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
23 initialW: Value used to initialize the filter weight. May be an
24 initializer instance or another value that
25 :func:`~chainer.init_weight` helper function can take. This link
26 uses :func:`~chainer.init_weight` to initialize the filter weight
27 and passes the value of ``initialW`` to it as it is.
28 initial_bias: Value used to initialize the bias vector. May be an
29 initializer instance or another value except ``None`` that
30 :func:`~chainer.init_weight` helper function can take. If ``None``
31 is given, this link does not use the bias vector. This link uses
32 :func:`~chainer.init_weight` to initialize the bias vector and
33 passes the value of ``initial_bias`` other than ``None`` to it as
34 it is.
35 use_cudnn (bool): If ``True``, then this link uses cuDNN if available.
36 See :func:`~chainer.functions.convolution_nd` for exact conditions
37 of cuDNN availability.
38 cover_all (bool): If ``True``, all spatial locations are convoluted
39 into some output pixels. It may make the output size larger.
40 ``cover_all`` needs to be ``False`` if you want to use cuDNN.
41
42 .. seealso::
43 See :func:`~chainer.functions.convolution_nd` for the definition of
44 N-dimensional convolution. See
45 :func:`~chainer.functions.convolution_2d` for the definition of
46 two-dimensional convolution.
47
48 Attributes:
49 W (~chainer.Variable): Weight parameter.
50 b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,
51 set to ``None``.
52
53 """
54
55 def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0,
56 initialW=None, initial_bias=None, use_cudnn=True,
57 cover_all=False):
58 ksize = conv_nd.as_tuple(ksize, ndim)
59 self.stride = stride
60 self.pad = pad
61 self.use_cudnn = use_cudnn
62 self.cover_all = cover_all
63
64 W_shape = (out_channels, in_channels) + ksize
65 super(ConvolutionND, self).__init__(W=W_shape)
66 initializers.init_weight(self.W.data, initialW)
67
68 if initial_bias is None:
69 self.b = None
70 else:
71 self.add_param('b', out_channels)
72 initializers.init_weight(self.b.data, initial_bias)
73
74 def __call__(self, x):
75 """Applies N-dimensional convolution layer.
76
77 Args:
78 x (~chainer.Variable): Input image.
79
80 Returns:
81 ~chainer.Variable: Output of convolution.
82
83 """
84 return convolution_nd.convolution_nd(
85 x, self.W, self.b, self.stride, self.pad,
86 use_cudnn=self.use_cudnn, cover_all=self.cover_all)
87
[end of chainer/links/connection/convolution_nd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/links/connection/convolution_nd.py b/chainer/links/connection/convolution_nd.py
--- a/chainer/links/connection/convolution_nd.py
+++ b/chainer/links/connection/convolution_nd.py
@@ -22,16 +22,11 @@
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
initialW: Value used to initialize the filter weight. May be an
initializer instance or another value that
- :func:`~chainer.init_weight` helper function can take. This link
- uses :func:`~chainer.init_weight` to initialize the filter weight
- and passes the value of ``initialW`` to it as it is.
+ :func:`~chainer.init_weight` helper function can take.
initial_bias: Value used to initialize the bias vector. May be an
initializer instance or another value except ``None`` that
:func:`~chainer.init_weight` helper function can take. If ``None``
- is given, this link does not use the bias vector. This link uses
- :func:`~chainer.init_weight` to initialize the bias vector and
- passes the value of ``initial_bias`` other than ``None`` to it as
- it is.
+ is given, this link does not use the bias vector.
use_cudnn (bool): If ``True``, then this link uses cuDNN if available.
See :func:`~chainer.functions.convolution_nd` for exact conditions
of cuDNN availability.
@@ -61,15 +56,17 @@
self.use_cudnn = use_cudnn
self.cover_all = cover_all
+ super(ConvolutionND, self).__init__()
+
W_shape = (out_channels, in_channels) + ksize
- super(ConvolutionND, self).__init__(W=W_shape)
- initializers.init_weight(self.W.data, initialW)
+ initialW = initializers._get_initializer(initialW)
+ self.add_param('W', W_shape, initializer=initialW)
if initial_bias is None:
self.b = None
else:
- self.add_param('b', out_channels)
- initializers.init_weight(self.b.data, initial_bias)
+ initial_bias = initializers._get_initializer(initial_bias)
+ self.add_param('b', out_channels, initializer=initial_bias)
def __call__(self, x):
"""Applies N-dimensional convolution layer.
| {"golden_diff": "diff --git a/chainer/links/connection/convolution_nd.py b/chainer/links/connection/convolution_nd.py\n--- a/chainer/links/connection/convolution_nd.py\n+++ b/chainer/links/connection/convolution_nd.py\n@@ -22,16 +22,11 @@\n ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.\n initialW: Value used to initialize the filter weight. May be an\n initializer instance or another value that\n- :func:`~chainer.init_weight` helper function can take. This link\n- uses :func:`~chainer.init_weight` to initialize the filter weight\n- and passes the value of ``initialW`` to it as it is.\n+ :func:`~chainer.init_weight` helper function can take.\n initial_bias: Value used to initialize the bias vector. May be an\n initializer instance or another value except ``None`` that\n :func:`~chainer.init_weight` helper function can take. If ``None``\n- is given, this link does not use the bias vector. This link uses\n- :func:`~chainer.init_weight` to initialize the bias vector and\n- passes the value of ``initial_bias`` other than ``None`` to it as\n- it is.\n+ is given, this link does not use the bias vector.\n use_cudnn (bool): If ``True``, then this link uses cuDNN if available.\n See :func:`~chainer.functions.convolution_nd` for exact conditions\n of cuDNN availability.\n@@ -61,15 +56,17 @@\n self.use_cudnn = use_cudnn\n self.cover_all = cover_all\n \n+ super(ConvolutionND, self).__init__()\n+\n W_shape = (out_channels, in_channels) + ksize\n- super(ConvolutionND, self).__init__(W=W_shape)\n- initializers.init_weight(self.W.data, initialW)\n+ initialW = initializers._get_initializer(initialW)\n+ self.add_param('W', W_shape, initializer=initialW)\n \n if initial_bias is None:\n self.b = None\n else:\n- self.add_param('b', out_channels)\n- initializers.init_weight(self.b.data, initial_bias)\n+ initial_bias = initializers._get_initializer(initial_bias)\n+ self.add_param('b', out_channels, initializer=initial_bias)\n \n def __call__(self, x):\n \"\"\"Applies N-dimensional convolution layer.\n", "issue": "Test N-dimensional convolution link for dtypes of FP16 and FP64\nFollows #1279 and #1556.\n\nSince #1295 is now merged to master, we can add test for dtypes of FP16 and FP64 to N-dimensional convolution **LINK**.\n\n", "before_files": [{"content": "from chainer.functions.connection import convolution_nd\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer.utils import conv_nd\n\n\nclass ConvolutionND(link.Link):\n \"\"\"N-dimensional convolution layer.\n\n This link wraps the :func:`~chainer.functions.convolution_nd` function and\n holds the filter weight and bias vector as parameters.\n\n Args:\n ndim (int): Number of spatial dimensions.\n in_channels (int): Number of channels of input arrays.\n out_channels (int): Number of channels of output arrays.\n ksize (int or tuple of ints): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.\n stride (int or tuple of ints): Stride of filter application.\n ``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.\n pad (int or tuple of ints): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.\n initialW: Value used to initialize the filter weight. May be an\n initializer instance or another value that\n :func:`~chainer.init_weight` helper function can take. This link\n uses :func:`~chainer.init_weight` to initialize the filter weight\n and passes the value of ``initialW`` to it as it is.\n initial_bias: Value used to initialize the bias vector. May be an\n initializer instance or another value except ``None`` that\n :func:`~chainer.init_weight` helper function can take. If ``None``\n is given, this link does not use the bias vector. This link uses\n :func:`~chainer.init_weight` to initialize the bias vector and\n passes the value of ``initial_bias`` other than ``None`` to it as\n it is.\n use_cudnn (bool): If ``True``, then this link uses cuDNN if available.\n See :func:`~chainer.functions.convolution_nd` for exact conditions\n of cuDNN availability.\n cover_all (bool): If ``True``, all spatial locations are convoluted\n into some output pixels. It may make the output size larger.\n ``cover_all`` needs to be ``False`` if you want to use cuDNN.\n\n .. seealso::\n See :func:`~chainer.functions.convolution_nd` for the definition of\n N-dimensional convolution. See\n :func:`~chainer.functions.convolution_2d` for the definition of\n two-dimensional convolution.\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,\n set to ``None``.\n\n \"\"\"\n\n def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0,\n initialW=None, initial_bias=None, use_cudnn=True,\n cover_all=False):\n ksize = conv_nd.as_tuple(ksize, ndim)\n self.stride = stride\n self.pad = pad\n self.use_cudnn = use_cudnn\n self.cover_all = cover_all\n\n W_shape = (out_channels, in_channels) + ksize\n super(ConvolutionND, self).__init__(W=W_shape)\n initializers.init_weight(self.W.data, initialW)\n\n if initial_bias is None:\n self.b = None\n else:\n self.add_param('b', out_channels)\n initializers.init_weight(self.b.data, initial_bias)\n\n def __call__(self, x):\n \"\"\"Applies N-dimensional convolution layer.\n\n Args:\n x (~chainer.Variable): Input image.\n\n Returns:\n ~chainer.Variable: Output of convolution.\n\n \"\"\"\n return convolution_nd.convolution_nd(\n x, self.W, self.b, self.stride, self.pad,\n use_cudnn=self.use_cudnn, cover_all=self.cover_all)\n", "path": "chainer/links/connection/convolution_nd.py"}]} | 1,640 | 549 |
gh_patches_debug_36694 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🪲stats for distinct incorrect
### Thank you for taking the time to report a problem with Opteryx.
_To help us to respond to your request we ask that you try to provide the below detail about the bug._
**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._
**Expected behaviour** _A clear and concise description of what you expected to happen._
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
~~~sql
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
</issue>
<code>
[start of opteryx/operators/distinct_node.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 """
14 Distinct Node
15
16 This is a SQL Query Execution Plan Node.
17
18 This Node eliminates duplicate records.
19 """
20 import time
21 from typing import Generator
22
23 import pyarrow
24 import pyarrow.compute
25
26 from opteryx.models import QueryProperties
27 from opteryx.operators import BasePlanNode
28
29
30 class DistinctNode(BasePlanNode):
31 def __init__(self, properties: QueryProperties, **config):
32 super().__init__(properties=properties)
33 self._distinct_on = config.get("on")
34 if self._distinct_on:
35 self._distinct_on = [col.schema_column.identity for col in self._distinct_on]
36
37 @property
38 def config(self): # pragma: no cover
39 return ""
40
41 @property
42 def greedy(self): # pragma: no cover
43 return True
44
45 @property
46 def name(self): # pragma: no cover
47 return "Distinction"
48
49 def execute(self) -> Generator[pyarrow.Table, None, None]:
50
51 from opteryx.compiled.functions import HashSet
52 from opteryx.compiled.functions import distinct
53
54 # We create a HashSet outside the distinct call, this allows us to pass
55 # the hash to each run of the distinct which means we don't need to concat
56 # all of the tables together to return a result.
57 # The Cython distinct is about 8x faster on a 10 million row dataset with
58 # approx 85k distinct entries (4.8sec vs 0.8sec) and faster on a 177 record
59 # dataset with 7 distinct entries.
60 # Being able to run morsel-by-morsel means if we have a LIMIT clause, we can
61 # limit processing
62 hash_set = HashSet()
63
64 morsels = self._producers[0] # type:ignore
65
66 start = time.monotonic_ns()
67 for morsel in morsels.execute():
68 deduped, hash_set = distinct(
69 morsel, columns=self._distinct_on, seen_hashes=hash_set, return_seen_hashes=True
70 )
71 if deduped.num_rows > 0:
72 self.statistics.time_distincting += time.monotonic_ns() - start
73 yield deduped
74 start = time.monotonic_ns()
75
[end of opteryx/operators/distinct_node.py]
[start of opteryx/__version__.py]
1 __build__ = 296
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 14
31 _revision = 0
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
[end of opteryx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 296
+__build__ = 298
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/operators/distinct_node.py b/opteryx/operators/distinct_node.py
--- a/opteryx/operators/distinct_node.py
+++ b/opteryx/operators/distinct_node.py
@@ -29,10 +29,13 @@
class DistinctNode(BasePlanNode):
def __init__(self, properties: QueryProperties, **config):
+ from opteryx.compiled.functions import HashSet
+
super().__init__(properties=properties)
self._distinct_on = config.get("on")
if self._distinct_on:
self._distinct_on = [col.schema_column.identity for col in self._distinct_on]
+ self.hash_set = HashSet()
@property
def config(self): # pragma: no cover
@@ -48,7 +51,6 @@
def execute(self) -> Generator[pyarrow.Table, None, None]:
- from opteryx.compiled.functions import HashSet
from opteryx.compiled.functions import distinct
# We create a HashSet outside the distinct call, this allows us to pass
@@ -59,16 +61,17 @@
# dataset with 7 distinct entries.
# Being able to run morsel-by-morsel means if we have a LIMIT clause, we can
# limit processing
- hash_set = HashSet()
morsels = self._producers[0] # type:ignore
- start = time.monotonic_ns()
for morsel in morsels.execute():
- deduped, hash_set = distinct(
- morsel, columns=self._distinct_on, seen_hashes=hash_set, return_seen_hashes=True
+ start = time.monotonic_ns()
+ deduped, self.hash_set = distinct(
+ morsel,
+ columns=self._distinct_on,
+ seen_hashes=self.hash_set,
+ return_seen_hashes=True,
)
+ self.statistics.time_distincting += time.monotonic_ns() - start
if deduped.num_rows > 0:
- self.statistics.time_distincting += time.monotonic_ns() - start
yield deduped
- start = time.monotonic_ns()
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 296\n+__build__ = 298\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/operators/distinct_node.py b/opteryx/operators/distinct_node.py\n--- a/opteryx/operators/distinct_node.py\n+++ b/opteryx/operators/distinct_node.py\n@@ -29,10 +29,13 @@\n \n class DistinctNode(BasePlanNode):\n def __init__(self, properties: QueryProperties, **config):\n+ from opteryx.compiled.functions import HashSet\n+\n super().__init__(properties=properties)\n self._distinct_on = config.get(\"on\")\n if self._distinct_on:\n self._distinct_on = [col.schema_column.identity for col in self._distinct_on]\n+ self.hash_set = HashSet()\n \n @property\n def config(self): # pragma: no cover\n@@ -48,7 +51,6 @@\n \n def execute(self) -> Generator[pyarrow.Table, None, None]:\n \n- from opteryx.compiled.functions import HashSet\n from opteryx.compiled.functions import distinct\n \n # We create a HashSet outside the distinct call, this allows us to pass\n@@ -59,16 +61,17 @@\n # dataset with 7 distinct entries.\n # Being able to run morsel-by-morsel means if we have a LIMIT clause, we can\n # limit processing\n- hash_set = HashSet()\n \n morsels = self._producers[0] # type:ignore\n \n- start = time.monotonic_ns()\n for morsel in morsels.execute():\n- deduped, hash_set = distinct(\n- morsel, columns=self._distinct_on, seen_hashes=hash_set, return_seen_hashes=True\n+ start = time.monotonic_ns()\n+ deduped, self.hash_set = distinct(\n+ morsel,\n+ columns=self._distinct_on,\n+ seen_hashes=self.hash_set,\n+ return_seen_hashes=True,\n )\n+ self.statistics.time_distincting += time.monotonic_ns() - start\n if deduped.num_rows > 0:\n- self.statistics.time_distincting += time.monotonic_ns() - start\n yield deduped\n- start = time.monotonic_ns()\n", "issue": "\ud83e\udeb2stats for distinct incorrect\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDistinct Node\n\nThis is a SQL Query Execution Plan Node.\n\nThis Node eliminates duplicate records.\n\"\"\"\nimport time\nfrom typing import Generator\n\nimport pyarrow\nimport pyarrow.compute\n\nfrom opteryx.models import QueryProperties\nfrom opteryx.operators import BasePlanNode\n\n\nclass DistinctNode(BasePlanNode):\n def __init__(self, properties: QueryProperties, **config):\n super().__init__(properties=properties)\n self._distinct_on = config.get(\"on\")\n if self._distinct_on:\n self._distinct_on = [col.schema_column.identity for col in self._distinct_on]\n\n @property\n def config(self): # pragma: no cover\n return \"\"\n\n @property\n def greedy(self): # pragma: no cover\n return True\n\n @property\n def name(self): # pragma: no cover\n return \"Distinction\"\n\n def execute(self) -> Generator[pyarrow.Table, None, None]:\n\n from opteryx.compiled.functions import HashSet\n from opteryx.compiled.functions import distinct\n\n # We create a HashSet outside the distinct call, this allows us to pass\n # the hash to each run of the distinct which means we don't need to concat\n # all of the tables together to return a result.\n # The Cython distinct is about 8x faster on a 10 million row dataset with\n # approx 85k distinct entries (4.8sec vs 0.8sec) and faster on a 177 record\n # dataset with 7 distinct entries.\n # Being able to run morsel-by-morsel means if we have a LIMIT clause, we can\n # limit processing\n hash_set = HashSet()\n\n morsels = self._producers[0] # type:ignore\n\n start = time.monotonic_ns()\n for morsel in morsels.execute():\n deduped, hash_set = distinct(\n morsel, columns=self._distinct_on, seen_hashes=hash_set, return_seen_hashes=True\n )\n if deduped.num_rows > 0:\n self.statistics.time_distincting += time.monotonic_ns() - start\n yield deduped\n start = time.monotonic_ns()\n", "path": "opteryx/operators/distinct_node.py"}, {"content": "__build__ = 296\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 14\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 1,838 | 587 |
gh_patches_debug_15665 | rasdani/github-patches | git_diff | meltano__meltano-6562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: No way to dismiss image scan alerts
### Meltano Version
NA
### Python Version
NA
### Bug scope
Other
### Operating System
NA
### Description
Currently we use `.github/actions/docker-build-scan-push/check_sarif.py` to analyze the SARIF report created from running `grype` to scan our Docker images. It parses the SARIF JSON file itself to check if there are any issues detected with a severity above some threshold in the range [0.0, 10.0].
Before running this check, we upload the SARIF results to GitHub, which stores them for our repository using the "code scanning" feature. From there, we can review them, dismiss them, and create issues to address them. [An example can be found here](https://github.com/meltano/meltano/security/code-scanning?query=ref%3Arefs%2Fpull%2F6410%2Fmerge+tool%3AGrype).
Our `check_sarif.py` script does not consider whether we've dismissed the issue via GitHub's "code scanning" feature, so we have no way to deem a detected issue acceptable, and have the Docker publish workflow pass. To fix this we should replace `check_sarif.py` with some steps that use [the GitHub code scanning API](https://docs.github.com/en/rest/code-scanning#list-code-scanning-alerts-for-a-repository) to check if there are any issues above some set severity level *that haven't been dismissed*.
### Code
_No response_
</issue>
<code>
[start of .github/actions/docker-build-scan-push/check_sarif.py]
1 """Check if the provided SARIF file has any violations at or above some severity level."""
2
3 from __future__ import annotations
4
5 import argparse
6 import json
7
8 DEFAULT_SEVERITY_CUTOFF = 4.0
9
10 parser = argparse.ArgumentParser()
11 parser.add_argument(
12 "sarif_path",
13 help="The path to the SARIF file to be checked.",
14 )
15 parser.add_argument(
16 "--severity-cutoff",
17 help="Violations with a severity >= this value result in an exit code of 1"
18 + " - must be a number in the range [0.0, 10.0].",
19 type=float,
20 default=DEFAULT_SEVERITY_CUTOFF,
21 )
22 args = parser.parse_args()
23
24 with open(args.sarif_path) as sarif_file:
25 sarif_data = json.load(sarif_file)
26
27 first_run = sarif_data["runs"][0]
28 triggered_rules = first_run["tool"]["driver"]["rules"]
29
30 exit( # noqa: WPS421
31 any(
32 float(rule["properties"]["security-severity"]) >= args.severity_cutoff
33 for rule in triggered_rules
34 )
35 )
36
[end of .github/actions/docker-build-scan-push/check_sarif.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/.github/actions/docker-build-scan-push/check_sarif.py b/.github/actions/docker-build-scan-push/check_sarif.py
deleted file mode 100644
--- a/.github/actions/docker-build-scan-push/check_sarif.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Check if the provided SARIF file has any violations at or above some severity level."""
-
-from __future__ import annotations
-
-import argparse
-import json
-
-DEFAULT_SEVERITY_CUTOFF = 4.0
-
-parser = argparse.ArgumentParser()
-parser.add_argument(
- "sarif_path",
- help="The path to the SARIF file to be checked.",
-)
-parser.add_argument(
- "--severity-cutoff",
- help="Violations with a severity >= this value result in an exit code of 1"
- + " - must be a number in the range [0.0, 10.0].",
- type=float,
- default=DEFAULT_SEVERITY_CUTOFF,
-)
-args = parser.parse_args()
-
-with open(args.sarif_path) as sarif_file:
- sarif_data = json.load(sarif_file)
-
-first_run = sarif_data["runs"][0]
-triggered_rules = first_run["tool"]["driver"]["rules"]
-
-exit( # noqa: WPS421
- any(
- float(rule["properties"]["security-severity"]) >= args.severity_cutoff
- for rule in triggered_rules
- )
-)
| {"golden_diff": "diff --git a/.github/actions/docker-build-scan-push/check_sarif.py b/.github/actions/docker-build-scan-push/check_sarif.py\ndeleted file mode 100644\n--- a/.github/actions/docker-build-scan-push/check_sarif.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-\"\"\"Check if the provided SARIF file has any violations at or above some severity level.\"\"\"\n-\n-from __future__ import annotations\n-\n-import argparse\n-import json\n-\n-DEFAULT_SEVERITY_CUTOFF = 4.0\n-\n-parser = argparse.ArgumentParser()\n-parser.add_argument(\n- \"sarif_path\",\n- help=\"The path to the SARIF file to be checked.\",\n-)\n-parser.add_argument(\n- \"--severity-cutoff\",\n- help=\"Violations with a severity >= this value result in an exit code of 1\"\n- + \" - must be a number in the range [0.0, 10.0].\",\n- type=float,\n- default=DEFAULT_SEVERITY_CUTOFF,\n-)\n-args = parser.parse_args()\n-\n-with open(args.sarif_path) as sarif_file:\n- sarif_data = json.load(sarif_file)\n-\n-first_run = sarif_data[\"runs\"][0]\n-triggered_rules = first_run[\"tool\"][\"driver\"][\"rules\"]\n-\n-exit( # noqa: WPS421\n- any(\n- float(rule[\"properties\"][\"security-severity\"]) >= args.severity_cutoff\n- for rule in triggered_rules\n- )\n-)\n", "issue": "bug: No way to dismiss image scan alerts\n### Meltano Version\n\nNA\n\n### Python Version\n\nNA\n\n### Bug scope\n\nOther\n\n### Operating System\n\nNA\n\n### Description\n\nCurrently we use `.github/actions/docker-build-scan-push/check_sarif.py` to analyze the SARIF report created from running `grype` to scan our Docker images. It parses the SARIF JSON file itself to check if there are any issues detected with a severity above some threshold in the range [0.0, 10.0].\r\n\r\nBefore running this check, we upload the SARIF results to GitHub, which stores them for our repository using the \"code scanning\" feature. From there, we can review them, dismiss them, and create issues to address them. [An example can be found here](https://github.com/meltano/meltano/security/code-scanning?query=ref%3Arefs%2Fpull%2F6410%2Fmerge+tool%3AGrype).\r\n\r\nOur `check_sarif.py` script does not consider whether we've dismissed the issue via GitHub's \"code scanning\" feature, so we have no way to deem a detected issue acceptable, and have the Docker publish workflow pass. To fix this we should replace `check_sarif.py` with some steps that use [the GitHub code scanning API](https://docs.github.com/en/rest/code-scanning#list-code-scanning-alerts-for-a-repository) to check if there are any issues above some set severity level *that haven't been dismissed*.\n\n### Code\n\n_No response_\n", "before_files": [{"content": "\"\"\"Check if the provided SARIF file has any violations at or above some severity level.\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport json\n\nDEFAULT_SEVERITY_CUTOFF = 4.0\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"sarif_path\",\n help=\"The path to the SARIF file to be checked.\",\n)\nparser.add_argument(\n \"--severity-cutoff\",\n help=\"Violations with a severity >= this value result in an exit code of 1\"\n + \" - must be a number in the range [0.0, 10.0].\",\n type=float,\n default=DEFAULT_SEVERITY_CUTOFF,\n)\nargs = parser.parse_args()\n\nwith open(args.sarif_path) as sarif_file:\n sarif_data = json.load(sarif_file)\n\nfirst_run = sarif_data[\"runs\"][0]\ntriggered_rules = first_run[\"tool\"][\"driver\"][\"rules\"]\n\nexit( # noqa: WPS421\n any(\n float(rule[\"properties\"][\"security-severity\"]) >= args.severity_cutoff\n for rule in triggered_rules\n )\n)\n", "path": ".github/actions/docker-build-scan-push/check_sarif.py"}]} | 1,192 | 341 |
gh_patches_debug_4122 | rasdani/github-patches | git_diff | mozilla__bugbug-3897 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Restrict the training set of the StepsToReproduce model only to defects
Given that STRs don't apply to enhancement or task.
</issue>
<code>
[start of bugbug/models/stepstoreproduce.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7
8 import xgboost
9 from imblearn.pipeline import Pipeline as ImblearnPipeline
10 from imblearn.under_sampling import RandomUnderSampler
11 from sklearn.compose import ColumnTransformer
12 from sklearn.feature_extraction import DictVectorizer
13 from sklearn.pipeline import Pipeline
14
15 from bugbug import bug_features, bugzilla, feature_cleanup, utils
16 from bugbug.model import BugModel
17
18 logging.basicConfig(level=logging.INFO)
19 logger = logging.getLogger(__name__)
20
21
22 class StepsToReproduceModel(BugModel):
23 def __init__(self, lemmatization=False):
24 BugModel.__init__(self, lemmatization)
25
26 feature_extractors = [
27 bug_features.HasRegressionRange(),
28 bug_features.Severity(),
29 bug_features.Keywords({"stepswanted"}),
30 bug_features.IsCoverityIssue(),
31 bug_features.HasCrashSignature(),
32 bug_features.HasURL(),
33 bug_features.HasW3CURL(),
34 bug_features.HasGithubURL(),
35 bug_features.Whiteboard(),
36 bug_features.Patches(),
37 bug_features.Landings(),
38 ]
39
40 cleanup_functions = [
41 feature_cleanup.fileref(),
42 feature_cleanup.url(),
43 feature_cleanup.synonyms(),
44 ]
45
46 self.extraction_pipeline = Pipeline(
47 [
48 (
49 "bug_extractor",
50 bug_features.BugExtractor(feature_extractors, cleanup_functions),
51 ),
52 ]
53 )
54
55 self.clf = ImblearnPipeline(
56 [
57 (
58 "union",
59 ColumnTransformer(
60 [
61 ("data", DictVectorizer(), "data"),
62 ("title", self.text_vectorizer(), "title"),
63 ("comments", self.text_vectorizer(), "comments"),
64 ]
65 ),
66 ),
67 ("sampler", RandomUnderSampler(random_state=0)),
68 (
69 "estimator",
70 xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()),
71 ),
72 ]
73 )
74
75 def get_labels(self):
76 classes = {}
77
78 for bug_data in bugzilla.get_bugs():
79 if "cf_has_str" in bug_data:
80 if bug_data["cf_has_str"] == "no":
81 classes[int(bug_data["id"])] = 0
82 elif bug_data["cf_has_str"] == "yes":
83 classes[int(bug_data["id"])] = 1
84 elif "stepswanted" in bug_data["keywords"]:
85 classes[int(bug_data["id"])] = 0
86 else:
87 for entry in bug_data["history"]:
88 for change in entry["changes"]:
89 if change["removed"].startswith("stepswanted"):
90 classes[int(bug_data["id"])] = 1
91
92 logger.info(
93 "%d bugs have no steps to reproduce",
94 sum(label == 0 for label in classes.values()),
95 )
96 logger.info(
97 "%d bugs have steps to reproduce",
98 sum(label == 1 for label in classes.values()),
99 )
100
101 return classes, [0, 1]
102
103 def overwrite_classes(self, bugs, classes, probabilities):
104 for i, bug in enumerate(bugs):
105 if "cf_has_str" in bug and bug["cf_has_str"] == "no":
106 classes[i] = 0 if not probabilities else [1.0, 0.0]
107 elif "cf_has_str" in bug and bug["cf_has_str"] == "yes":
108 classes[i] = 1 if not probabilities else [0.0, 1.0]
109 elif "stepswanted" in bug["keywords"]:
110 classes[i] = 0 if not probabilities else [1.0, 0.0]
111
112 return classes
113
114 def get_feature_names(self):
115 return self.clf.named_steps["union"].get_feature_names_out()
116
[end of bugbug/models/stepstoreproduce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/stepstoreproduce.py b/bugbug/models/stepstoreproduce.py
--- a/bugbug/models/stepstoreproduce.py
+++ b/bugbug/models/stepstoreproduce.py
@@ -76,6 +76,8 @@
classes = {}
for bug_data in bugzilla.get_bugs():
+ if bug_data["type"] != "defect":
+ continue
if "cf_has_str" in bug_data:
if bug_data["cf_has_str"] == "no":
classes[int(bug_data["id"])] = 0
| {"golden_diff": "diff --git a/bugbug/models/stepstoreproduce.py b/bugbug/models/stepstoreproduce.py\n--- a/bugbug/models/stepstoreproduce.py\n+++ b/bugbug/models/stepstoreproduce.py\n@@ -76,6 +76,8 @@\n classes = {}\n \n for bug_data in bugzilla.get_bugs():\n+ if bug_data[\"type\"] != \"defect\":\n+ continue\n if \"cf_has_str\" in bug_data:\n if bug_data[\"cf_has_str\"] == \"no\":\n classes[int(bug_data[\"id\"])] = 0\n", "issue": "Restrict the training set of the StepsToReproduce model only to defects\nGiven that STRs don't apply to enhancement or task.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\n\nimport xgboost\nfrom imblearn.pipeline import Pipeline as ImblearnPipeline\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass StepsToReproduceModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n feature_extractors = [\n bug_features.HasRegressionRange(),\n bug_features.Severity(),\n bug_features.Keywords({\"stepswanted\"}),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n ]\n )\n\n self.clf = ImblearnPipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(), \"title\"),\n (\"comments\", self.text_vectorizer(), \"comments\"),\n ]\n ),\n ),\n (\"sampler\", RandomUnderSampler(random_state=0)),\n (\n \"estimator\",\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()),\n ),\n ]\n )\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n if \"cf_has_str\" in bug_data:\n if bug_data[\"cf_has_str\"] == \"no\":\n classes[int(bug_data[\"id\"])] = 0\n elif bug_data[\"cf_has_str\"] == \"yes\":\n classes[int(bug_data[\"id\"])] = 1\n elif \"stepswanted\" in bug_data[\"keywords\"]:\n classes[int(bug_data[\"id\"])] = 0\n else:\n for entry in bug_data[\"history\"]:\n for change in entry[\"changes\"]:\n if change[\"removed\"].startswith(\"stepswanted\"):\n classes[int(bug_data[\"id\"])] = 1\n\n logger.info(\n \"%d bugs have no steps to reproduce\",\n sum(label == 0 for label in classes.values()),\n )\n logger.info(\n \"%d bugs have steps to reproduce\",\n sum(label == 1 for label in classes.values()),\n )\n\n return classes, [0, 1]\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if \"cf_has_str\" in bug and bug[\"cf_has_str\"] == \"no\":\n classes[i] = 0 if not probabilities else [1.0, 0.0]\n elif \"cf_has_str\" in bug and bug[\"cf_has_str\"] == \"yes\":\n classes[i] = 1 if not probabilities else [0.0, 1.0]\n elif \"stepswanted\" in bug[\"keywords\"]:\n classes[i] = 0 if not probabilities else [1.0, 0.0]\n\n return classes\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n", "path": "bugbug/models/stepstoreproduce.py"}]} | 1,655 | 132 |
gh_patches_debug_58 | rasdani/github-patches | git_diff | Anselmoo__spectrafit-701 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: ASCII Char in creating branch
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Is crashing
### Expected Behavior
Is realising a change in changeling
### Steps To Reproduce
_No response_
### ⚙️ Environment
```markdown
- OS:
- Python:
- spectrafit:
```
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of spectrafit/__init__.py]
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0b1"
3
[end of spectrafit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0b1"
+__version__ = "1.0.0b2"
| {"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0b1\"\n+__version__ = \"1.0.0b2\"\n", "issue": "[Bug]: ASCII Char in creating branch\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nIs crashing\n\n### Expected Behavior\n\nIs realising a change in changeling\n\n### Steps To Reproduce\n\n_No response_\n\n### \u2699\ufe0f Environment\n\n```markdown\n- OS:\r\n- Python:\r\n- spectrafit:\n```\n\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0b1\"\n", "path": "spectrafit/__init__.py"}]} | 675 | 97 |
gh_patches_debug_35958 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider advanceautoparts is broken
During the global build at 2021-05-21-20-28-08, spider **advanceautoparts** failed with **0 features** and **405 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/logs/advanceautoparts.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/advanceautoparts.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/advanceautoparts.geojson))
</issue>
<code>
[start of locations/spiders/advanceautoparts.py]
1 import json
2 import re
3
4 import scrapy
5
6 from locations.hours import OpeningHours
7 from locations.items import GeojsonPointItem
8
9
10 class AdvanceautopartsSpider(scrapy.Spider):
11
12 name = "advanceautoparts"
13 item_attributes = {"brand": "Advance Auto Parts", "brand_wikidata": "Q4686051"}
14 allowed_domains = ["stores.advanceautoparts.com"]
15 start_urls = ("https://stores.advanceautoparts.com/sitemap.xml",)
16
17 def parse(self, response):
18 response.selector.remove_namespaces()
19 urls = response.xpath("//loc/text()").getall()
20 storeRe = re.compile(r"^https://stores.advanceautoparts.com/[^/]+/[^/]+/[^/]+$")
21 for url in urls:
22 if storeRe.fullmatch(url):
23 yield scrapy.Request(url, callback=self.parse_store)
24
25 def parse_hours(self, store_hours):
26 opening_hours = OpeningHours()
27
28 for weekday in store_hours:
29 day = weekday.get("day").title()
30 for interval in weekday.get("intervals", []):
31 open_time = str(interval.get("start"))
32 close_time = str(interval.get("end"))
33 opening_hours.add_range(
34 day=day[:2],
35 open_time=open_time,
36 close_time=close_time,
37 time_format="%H%M",
38 )
39
40 return opening_hours.as_opening_hours()
41
42 def parse_store(self, response):
43 name = response.xpath('//h1[@itemprop="name"]/text()').extract_first()
44
45 js = json.loads(response.xpath('//script[@class="js-map-config"]/text()').get())
46 ref = js["entities"][0]["profile"]["meta"]["id"]
47
48 hours = response.xpath(
49 '//div[@class="c-hours-details-wrapper js-hours-table"]/@data-days'
50 ).extract_first()
51 try:
52 opening_hours = self.parse_hours(json.loads(hours))
53 except ValueError:
54 opening_hours = None
55
56 properties = {
57 "addr_full": response.xpath(
58 'normalize-space(//meta[@itemprop="streetAddress"]/@content)'
59 ).extract_first(),
60 "phone": response.xpath(
61 'normalize-space(//div[@itemprop="telephone"]/text())'
62 ).extract_first(),
63 "city": response.xpath(
64 'normalize-space(//meta[@itemprop="addressLocality"]/@content)'
65 ).extract_first(),
66 "state": response.xpath(
67 'normalize-space(//abbr[@itemprop="addressRegion"]/text())'
68 ).extract_first(),
69 "postcode": response.xpath(
70 'normalize-space(//span[@itemprop="postalCode"]/text())'
71 ).extract_first(),
72 "ref": ref,
73 "website": response.url,
74 "lat": response.xpath(
75 'normalize-space(//meta[@itemprop="latitude"]/@content)'
76 ).extract_first(),
77 "lon": response.xpath(
78 'normalize-space(//meta[@itemprop="longitude"]/@content)'
79 ).extract_first(),
80 "name": name,
81 "opening_hours": opening_hours,
82 "extras": {"shop": "car_parts"},
83 }
84 yield GeojsonPointItem(**properties)
85
[end of locations/spiders/advanceautoparts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/advanceautoparts.py b/locations/spiders/advanceautoparts.py
--- a/locations/spiders/advanceautoparts.py
+++ b/locations/spiders/advanceautoparts.py
@@ -45,38 +45,22 @@
js = json.loads(response.xpath('//script[@class="js-map-config"]/text()').get())
ref = js["entities"][0]["profile"]["meta"]["id"]
- hours = response.xpath(
- '//div[@class="c-hours-details-wrapper js-hours-table"]/@data-days'
- ).extract_first()
+ hours = response.xpath('//div[@class="c-hours-details-wrapper js-hours-table"]/@data-days').extract_first()
try:
opening_hours = self.parse_hours(json.loads(hours))
except ValueError:
opening_hours = None
properties = {
- "addr_full": response.xpath(
- 'normalize-space(//meta[@itemprop="streetAddress"]/@content)'
- ).extract_first(),
- "phone": response.xpath(
- 'normalize-space(//div[@itemprop="telephone"]/text())'
- ).extract_first(),
- "city": response.xpath(
- 'normalize-space(//meta[@itemprop="addressLocality"]/@content)'
- ).extract_first(),
- "state": response.xpath(
- 'normalize-space(//abbr[@itemprop="addressRegion"]/text())'
- ).extract_first(),
- "postcode": response.xpath(
- 'normalize-space(//span[@itemprop="postalCode"]/text())'
- ).extract_first(),
+ "addr_full": response.xpath('normalize-space(//meta[@itemprop="streetAddress"]/@content)').extract_first(),
+ "phone": response.xpath('normalize-space(//div[@itemprop="telephone"]/text())').extract_first(),
+ "city": response.xpath('normalize-space(//meta[@itemprop="addressLocality"]/@content)').extract_first(),
+ "state": response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(),
+ "postcode": response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
"ref": ref,
"website": response.url,
- "lat": response.xpath(
- 'normalize-space(//meta[@itemprop="latitude"]/@content)'
- ).extract_first(),
- "lon": response.xpath(
- 'normalize-space(//meta[@itemprop="longitude"]/@content)'
- ).extract_first(),
+ "lat": response.xpath('normalize-space(//meta[@itemprop="latitude"]/@content)').extract_first(),
+ "lon": response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
"name": name,
"opening_hours": opening_hours,
"extras": {"shop": "car_parts"},
| {"golden_diff": "diff --git a/locations/spiders/advanceautoparts.py b/locations/spiders/advanceautoparts.py\n--- a/locations/spiders/advanceautoparts.py\n+++ b/locations/spiders/advanceautoparts.py\n@@ -45,38 +45,22 @@\n js = json.loads(response.xpath('//script[@class=\"js-map-config\"]/text()').get())\n ref = js[\"entities\"][0][\"profile\"][\"meta\"][\"id\"]\n \n- hours = response.xpath(\n- '//div[@class=\"c-hours-details-wrapper js-hours-table\"]/@data-days'\n- ).extract_first()\n+ hours = response.xpath('//div[@class=\"c-hours-details-wrapper js-hours-table\"]/@data-days').extract_first()\n try:\n opening_hours = self.parse_hours(json.loads(hours))\n except ValueError:\n opening_hours = None\n \n properties = {\n- \"addr_full\": response.xpath(\n- 'normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)'\n- ).extract_first(),\n- \"phone\": response.xpath(\n- 'normalize-space(//div[@itemprop=\"telephone\"]/text())'\n- ).extract_first(),\n- \"city\": response.xpath(\n- 'normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)'\n- ).extract_first(),\n- \"state\": response.xpath(\n- 'normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())'\n- ).extract_first(),\n- \"postcode\": response.xpath(\n- 'normalize-space(//span[@itemprop=\"postalCode\"]/text())'\n- ).extract_first(),\n+ \"addr_full\": response.xpath('normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)').extract_first(),\n+ \"phone\": response.xpath('normalize-space(//div[@itemprop=\"telephone\"]/text())').extract_first(),\n+ \"city\": response.xpath('normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)').extract_first(),\n+ \"state\": response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n+ \"postcode\": response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n \"ref\": ref,\n \"website\": response.url,\n- \"lat\": response.xpath(\n- 'normalize-space(//meta[@itemprop=\"latitude\"]/@content)'\n- ).extract_first(),\n- \"lon\": response.xpath(\n- 'normalize-space(//meta[@itemprop=\"longitude\"]/@content)'\n- ).extract_first(),\n+ \"lat\": response.xpath('normalize-space(//meta[@itemprop=\"latitude\"]/@content)').extract_first(),\n+ \"lon\": response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n \"name\": name,\n \"opening_hours\": opening_hours,\n \"extras\": {\"shop\": \"car_parts\"},\n", "issue": "Spider advanceautoparts is broken\nDuring the global build at 2021-05-21-20-28-08, spider **advanceautoparts** failed with **0 features** and **405 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/logs/advanceautoparts.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/advanceautoparts.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/advanceautoparts.geojson))\n", "before_files": [{"content": "import json\nimport re\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass AdvanceautopartsSpider(scrapy.Spider):\n\n name = \"advanceautoparts\"\n item_attributes = {\"brand\": \"Advance Auto Parts\", \"brand_wikidata\": \"Q4686051\"}\n allowed_domains = [\"stores.advanceautoparts.com\"]\n start_urls = (\"https://stores.advanceautoparts.com/sitemap.xml\",)\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath(\"//loc/text()\").getall()\n storeRe = re.compile(r\"^https://stores.advanceautoparts.com/[^/]+/[^/]+/[^/]+$\")\n for url in urls:\n if storeRe.fullmatch(url):\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n day = weekday.get(\"day\").title()\n for interval in weekday.get(\"intervals\", []):\n open_time = str(interval.get(\"start\"))\n close_time = str(interval.get(\"end\"))\n opening_hours.add_range(\n day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%H%M\",\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n name = response.xpath('//h1[@itemprop=\"name\"]/text()').extract_first()\n\n js = json.loads(response.xpath('//script[@class=\"js-map-config\"]/text()').get())\n ref = js[\"entities\"][0][\"profile\"][\"meta\"][\"id\"]\n\n hours = response.xpath(\n '//div[@class=\"c-hours-details-wrapper js-hours-table\"]/@data-days'\n ).extract_first()\n try:\n opening_hours = self.parse_hours(json.loads(hours))\n except ValueError:\n opening_hours = None\n\n properties = {\n \"addr_full\": response.xpath(\n 'normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)'\n ).extract_first(),\n \"phone\": response.xpath(\n 'normalize-space(//div[@itemprop=\"telephone\"]/text())'\n ).extract_first(),\n \"city\": response.xpath(\n 'normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)'\n ).extract_first(),\n \"state\": response.xpath(\n 'normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())'\n ).extract_first(),\n \"postcode\": response.xpath(\n 'normalize-space(//span[@itemprop=\"postalCode\"]/text())'\n ).extract_first(),\n \"ref\": ref,\n \"website\": response.url,\n \"lat\": response.xpath(\n 'normalize-space(//meta[@itemprop=\"latitude\"]/@content)'\n ).extract_first(),\n \"lon\": response.xpath(\n 'normalize-space(//meta[@itemprop=\"longitude\"]/@content)'\n ).extract_first(),\n \"name\": name,\n \"opening_hours\": opening_hours,\n \"extras\": {\"shop\": \"car_parts\"},\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/advanceautoparts.py"}]} | 1,566 | 628 |
gh_patches_debug_24842 | rasdani/github-patches | git_diff | Kinto__kinto-667 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Relax record id validation
Do we really need to ensure that posted record ids match a uuid regex?
We can generate a uuid when a record without id is posted, and leave the usage of uuid in our official clients.
But is there any reason to use a different regex that collection and bucket names?
edit: The usecase is the Web sync extension chrome.storage.sync: since any key is accepted, it takes the md5 of the key to "generate" UUIDs. Instead we could let the client push any key as record id.
- Related #140
</issue>
<code>
[start of kinto/views/records.py]
1 import copy
2
3 import jsonschema
4 from kinto.core import resource
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import object_exists_or_404
11
12
13 class RecordSchema(resource.ResourceSchema):
14 class Options:
15 preserve_unknown = True
16
17
18 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
19
20
21 @resource.register(name='record',
22 collection_path=_parent_path + '/records',
23 record_path=_parent_path + '/records/{{id}}')
24 class Record(resource.ShareableResource):
25
26 mapping = RecordSchema()
27 schema_field = 'schema'
28
29 def __init__(self, *args, **kwargs):
30 super(Record, self).__init__(*args, **kwargs)
31
32 # Check if already fetched before (in batch).
33 collections = self.request.bound_data.setdefault('collections', {})
34 collection_uri = self.get_parent_id(self.request)
35 if collection_uri not in collections:
36 # Unknown yet, fetch from storage.
37 collection_parent_id = '/buckets/%s' % self.bucket_id
38 collection = object_exists_or_404(self.request,
39 collection_id='collection',
40 parent_id=collection_parent_id,
41 object_id=self.collection_id)
42 collections[collection_uri] = collection
43
44 self._collection = collections[collection_uri]
45
46 def get_parent_id(self, request):
47 self.bucket_id = request.matchdict['bucket_id']
48 self.collection_id = request.matchdict['collection_id']
49 return '/buckets/%s/collections/%s' % (self.bucket_id,
50 self.collection_id)
51
52 def is_known_field(self, field_name):
53 """Without schema, any field is considered as known."""
54 return True
55
56 def process_record(self, new, old=None):
57 """Validate records against collection schema, if any."""
58 new = super(Record, self).process_record(new, old)
59
60 schema = self._collection.get('schema')
61 settings = self.request.registry.settings
62 schema_validation = 'experimental_collection_schema_validation'
63 if not schema or not asbool(settings.get(schema_validation)):
64 return new
65
66 collection_timestamp = self._collection[self.model.modified_field]
67
68 try:
69 stripped = copy.deepcopy(new)
70 stripped.pop(self.model.id_field, None)
71 stripped.pop(self.model.modified_field, None)
72 stripped.pop(self.model.permissions_field, None)
73 stripped.pop(self.schema_field, None)
74 jsonschema.validate(stripped, schema)
75 except jsonschema_exceptions.ValidationError as e:
76 try:
77 field = e.path.pop() if e.path else e.validator_value.pop()
78 except AttributeError:
79 field = None
80 raise_invalid(self.request, name=field, description=e.message)
81
82 new[self.schema_field] = collection_timestamp
83 return new
84
85 def collection_get(self):
86 result = super(Record, self).collection_get()
87 self._handle_cache_expires(self.request.response)
88 return result
89
90 def get(self):
91 result = super(Record, self).get()
92 self._handle_cache_expires(self.request.response)
93 return result
94
95 def _handle_cache_expires(self, response):
96 """If the parent collection defines a ``cache_expires`` attribute,
97 then cache-control response headers are sent.
98
99 .. note::
100
101 Those headers are also sent if the
102 ``kinto.record_cache_expires_seconds`` setting is defined.
103 """
104 is_anonymous = Authenticated not in self.request.effective_principals
105 if not is_anonymous:
106 return
107
108 cache_expires = self._collection.get('cache_expires')
109 if cache_expires is None:
110 by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)
111 by_collection = '%s_%s_record_cache_expires_seconds' % (
112 self.bucket_id, self.collection_id)
113 settings = self.request.registry.settings
114 cache_expires = settings.get(by_collection,
115 settings.get(by_bucket))
116
117 if cache_expires is not None:
118 response.cache_expires(seconds=int(cache_expires))
119
[end of kinto/views/records.py]
[start of kinto/views/__init__.py]
1 import random
2 import string
3
4 from kinto.core.storage import generators, exceptions
5 from pyramid import httpexceptions
6
7
8 class NameGenerator(generators.Generator):
9 def __call__(self):
10 ascii_letters = ('abcdefghijklmopqrstuvwxyz'
11 'ABCDEFGHIJKLMOPQRSTUVWXYZ')
12 alphabet = ascii_letters + string.digits + '-_'
13 letters = [random.choice(ascii_letters + string.digits)]
14 letters += [random.choice(alphabet) for x in range(7)]
15 return ''.join(letters)
16
17
18 def object_exists_or_404(request, collection_id, object_id, parent_id=''):
19 storage = request.registry.storage
20 try:
21 return storage.get(collection_id=collection_id,
22 parent_id=parent_id,
23 object_id=object_id)
24 except exceptions.RecordNotFoundError:
25 # XXX: We gave up putting details about parent id here (See #53).
26 raise httpexceptions.HTTPNotFound()
27
[end of kinto/views/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/__init__.py b/kinto/views/__init__.py
--- a/kinto/views/__init__.py
+++ b/kinto/views/__init__.py
@@ -15,6 +15,12 @@
return ''.join(letters)
+class RelaxedUUID(generators.UUID4):
+ """A generator that generates UUIDs but accepts any string.
+ """
+ regexp = generators.Generator.regexp
+
+
def object_exists_or_404(request, collection_id, object_id, parent_id=''):
storage = request.registry.storage
try:
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -7,7 +7,7 @@
from pyramid.security import Authenticated
from pyramid.settings import asbool
-from kinto.views import object_exists_or_404
+from kinto.views import RelaxedUUID, object_exists_or_404
class RecordSchema(resource.ResourceSchema):
@@ -29,6 +29,8 @@
def __init__(self, *args, **kwargs):
super(Record, self).__init__(*args, **kwargs)
+ self.model.id_generator = RelaxedUUID()
+
# Check if already fetched before (in batch).
collections = self.request.bound_data.setdefault('collections', {})
collection_uri = self.get_parent_id(self.request)
| {"golden_diff": "diff --git a/kinto/views/__init__.py b/kinto/views/__init__.py\n--- a/kinto/views/__init__.py\n+++ b/kinto/views/__init__.py\n@@ -15,6 +15,12 @@\n return ''.join(letters)\n \n \n+class RelaxedUUID(generators.UUID4):\n+ \"\"\"A generator that generates UUIDs but accepts any string.\n+ \"\"\"\n+ regexp = generators.Generator.regexp\n+\n+\n def object_exists_or_404(request, collection_id, object_id, parent_id=''):\n storage = request.registry.storage\n try:\ndiff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -7,7 +7,7 @@\n from pyramid.security import Authenticated\n from pyramid.settings import asbool\n \n-from kinto.views import object_exists_or_404\n+from kinto.views import RelaxedUUID, object_exists_or_404\n \n \n class RecordSchema(resource.ResourceSchema):\n@@ -29,6 +29,8 @@\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n \n+ self.model.id_generator = RelaxedUUID()\n+\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n", "issue": "Relax record id validation\nDo we really need to ensure that posted record ids match a uuid regex?\n\nWe can generate a uuid when a record without id is posted, and leave the usage of uuid in our official clients.\nBut is there any reason to use a different regex that collection and bucket names?\n\nedit: The usecase is the Web sync extension chrome.storage.sync: since any key is accepted, it takes the md5 of the key to \"generate\" UUIDs. Instead we could let the client push any key as record id.\n- Related #140 \n\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}, {"content": "import random\nimport string\n\nfrom kinto.core.storage import generators, exceptions\nfrom pyramid import httpexceptions\n\n\nclass NameGenerator(generators.Generator):\n def __call__(self):\n ascii_letters = ('abcdefghijklmopqrstuvwxyz'\n 'ABCDEFGHIJKLMOPQRSTUVWXYZ')\n alphabet = ascii_letters + string.digits + '-_'\n letters = [random.choice(ascii_letters + string.digits)]\n letters += [random.choice(alphabet) for x in range(7)]\n return ''.join(letters)\n\n\ndef object_exists_or_404(request, collection_id, object_id, parent_id=''):\n storage = request.registry.storage\n try:\n return storage.get(collection_id=collection_id,\n parent_id=parent_id,\n object_id=object_id)\n except exceptions.RecordNotFoundError:\n # XXX: We gave up putting details about parent id here (See #53).\n raise httpexceptions.HTTPNotFound()\n", "path": "kinto/views/__init__.py"}]} | 2,045 | 317 |
gh_patches_debug_43674 | rasdani/github-patches | git_diff | dotkom__onlineweb4-293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Menu is missing link to admin page when user is logged in
Should only be visible when it's a privileged user with access to the panel
</issue>
<code>
[start of apps/authentication/forms.py]
1 # -*- coding: utf-8 -*-
2
3 import datetime
4 import re
5
6 from django import forms
7 from django.contrib import auth
8 from django.utils.translation import ugettext as _
9
10 from apps.authentication.models import OnlineUser as User
11
12 class LoginForm(forms.Form):
13 username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
14 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord"))
15 user = None
16
17 def clean(self):
18 if self._errors:
19 return
20
21 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])
22
23 if user:
24 if user.is_active:
25 self.user = user
26 else:
27 self._errors['username'] = self.error_class([_("Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")])
28 else:
29 self._errors['username'] = self.error_class([_("Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")])
30 return self.cleaned_data
31
32 def login(self, request):
33 try:
34 User.objects.get(username=request.POST['username'])
35 except:
36 return False
37 if self.is_valid():
38 auth.login(request, self.user)
39 request.session.set_expiry(0)
40 return True
41 return False
42
43 class RegisterForm(forms.Form):
44 username = forms.CharField(label=_("brukernavn"), max_length=20)
45 first_name = forms.CharField(label=_("fornavn"), max_length=50)
46 last_name = forms.CharField(label=_("etternavn"), max_length=50)
47 email = forms.EmailField(label=_("epost"), max_length=50)
48 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("passord"))
49 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("gjenta passord"))
50 address = forms.CharField(label=_("adresse"), max_length=50)
51 zip_code = forms.CharField(label=_("postnummer"), max_length=4)
52 phone = forms.CharField(label=_("telefon"), max_length=20)
53
54 def clean(self):
55 super(RegisterForm, self).clean()
56 if self.is_valid():
57 cleaned_data = self.cleaned_data
58
59 # Check passwords
60 if cleaned_data['password'] != cleaned_data['repeat_password']:
61 self._errors['repeat_password'] = self.error_class([_("Passordene er ikke like.")])
62
63 # Check username
64 username = cleaned_data['username']
65 if User.objects.filter(username=username).count() > 0:
66 self._errors['username'] = self.error_class([_("Brukernavnet er allerede registrert.")])
67 if not re.match("^[a-zA-Z0-9_-]+$", username):
68 self._errors['username'] = self.error_class([_("Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")])
69
70 # Check email
71 email = cleaned_data['email']
72 if User.objects.filter(email=email).count() > 0:
73 self._errors['email'] = self.error_class([_("Det fins allerede en bruker med denne epostadressen.")])
74
75 # ZIP code digits only
76 zip_code = cleaned_data['zip_code']
77 if len(zip_code) != 4 or not zip_code.isdigit():
78 self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")])
79
80 return cleaned_data
81
82 class RecoveryForm(forms.Form):
83 email = forms.EmailField(label="Email", max_length=50)
84
85 class ChangePasswordForm(forms.Form):
86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("nytt passord"))
87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("gjenta passord"))
88
89 def clean(self):
90 super(ChangePasswordForm, self).clean()
91 if self.is_valid():
92 cleaned_data = self.cleaned_data
93
94 # Check passwords
95 if cleaned_data['new_password'] != cleaned_data['repeat_password']:
96 self._errors['repeat_password'] = self.error_class([_("Passordene er ikke like.")])
97
98 return cleaned_data
99
100
101 class NewEmailForm(forms.Form):
102 new_email = forms.EmailField(_(u"ny epostadresse"))
103
[end of apps/authentication/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py
--- a/apps/authentication/forms.py
+++ b/apps/authentication/forms.py
@@ -11,7 +11,7 @@
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50)
- password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord"))
+ password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Passord"))
user = None
def clean(self):
@@ -24,9 +24,9 @@
if user.is_active:
self.user = user
else:
- self._errors['username'] = self.error_class([_("Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")])
+ self._errors['username'] = self.error_class([_(u"Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")])
else:
- self._errors['username'] = self.error_class([_("Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")])
+ self._errors['username'] = self.error_class([_(u"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")])
return self.cleaned_data
def login(self, request):
@@ -58,24 +58,24 @@
# Check passwords
if cleaned_data['password'] != cleaned_data['repeat_password']:
- self._errors['repeat_password'] = self.error_class([_("Passordene er ikke like.")])
+ self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
# Check username
username = cleaned_data['username']
if User.objects.filter(username=username).count() > 0:
- self._errors['username'] = self.error_class([_("Brukernavnet er allerede registrert.")])
+ self._errors['username'] = self.error_class([_(u"Brukernavnet er allerede registrert.")])
if not re.match("^[a-zA-Z0-9_-]+$", username):
- self._errors['username'] = self.error_class([_("Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")])
+ self._errors['username'] = self.error_class([_(u"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")])
# Check email
email = cleaned_data['email']
if User.objects.filter(email=email).count() > 0:
- self._errors['email'] = self.error_class([_("Det fins allerede en bruker med denne epostadressen.")])
+ self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")])
# ZIP code digits only
zip_code = cleaned_data['zip_code']
if len(zip_code) != 4 or not zip_code.isdigit():
- self._errors['zip_code'] = self.error_class([_("Postnummer må bestå av fire siffer.")])
+ self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
return cleaned_data
@@ -83,8 +83,8 @@
email = forms.EmailField(label="Email", max_length=50)
class ChangePasswordForm(forms.Form):
- new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("nytt passord"))
- repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("gjenta passord"))
+ new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"nytt passord"))
+ repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"gjenta passord"))
def clean(self):
super(ChangePasswordForm, self).clean()
@@ -93,7 +93,7 @@
# Check passwords
if cleaned_data['new_password'] != cleaned_data['repeat_password']:
- self._errors['repeat_password'] = self.error_class([_("Passordene er ikke like.")])
+ self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")])
return cleaned_data
| {"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -11,7 +11,7 @@\n \n class LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n- password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n+ password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n \n def clean(self):\n@@ -24,9 +24,9 @@\n if user.is_active:\n self.user = user\n else:\n- self._errors['username'] = self.error_class([_(\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n+ self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n- self._errors['username'] = self.error_class([_(\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n+ self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n \n def login(self, request):\n@@ -58,24 +58,24 @@\n \n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n- self._errors['repeat_password'] = self.error_class([_(\"Passordene er ikke like.\")])\n+ self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n \n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n- self._errors['username'] = self.error_class([_(\"Brukernavnet er allerede registrert.\")])\n+ self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n- self._errors['username'] = self.error_class([_(\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n+ self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n \n # Check email\n email = cleaned_data['email']\n if User.objects.filter(email=email).count() > 0:\n- self._errors['email'] = self.error_class([_(\"Det fins allerede en bruker med denne epostadressen.\")])\n+ self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n \n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n- self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n+ self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n \n return cleaned_data \n \n@@ -83,8 +83,8 @@\n email = forms.EmailField(label=\"Email\", max_length=50)\n \n class ChangePasswordForm(forms.Form):\n- new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"nytt passord\"))\n- repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"gjenta passord\"))\n+ new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"nytt passord\"))\n+ repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"gjenta passord\"))\n \n def clean(self):\n super(ChangePasswordForm, self).clean()\n@@ -93,7 +93,7 @@\n \n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n- self._errors['repeat_password'] = self.error_class([_(\"Passordene er ikke like.\")])\n+ self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n \n return cleaned_data\n", "issue": "Menu is missing link to admin page when user is logged in\nShould only be visible when it's a privileged user with access to the panel \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n request.session.set_expiry(0)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"gjenta passord\"))\n address = forms.CharField(label=_(\"adresse\"), max_length=50)\n zip_code = forms.CharField(label=_(\"postnummer\"), max_length=4)\n phone = forms.CharField(label=_(\"telefon\"), max_length=20)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email']\n if User.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}]} | 1,706 | 1,002 |
gh_patches_debug_23037 | rasdani/github-patches | git_diff | e-valuation__EvaP-1221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Contact modal broken in Firefox
The contact modal does not work in Firefox, because `event` is undefined. Chrome provides this in global scope, that's why it's working there (see https://stackoverflow.com/questions/18274383/ajax-post-working-in-chrome-but-not-in-firefox).
</issue>
<code>
[start of evap/evaluation/views.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages, auth
5 from django.contrib.auth.decorators import login_required
6 from django.core.mail import EmailMessage
7 from django.http import HttpResponse
8 from django.shortcuts import redirect, render
9 from django.utils.translation import ugettext as _
10 from django.views.decorators.http import require_POST
11 from django.views.decorators.debug import sensitive_post_parameters
12 from django.views.i18n import set_language
13
14 from evap.evaluation.forms import NewKeyForm, LoginUsernameForm
15 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
16
17 logger = logging.getLogger(__name__)
18
19
20 @sensitive_post_parameters("password")
21 def index(request):
22 """Main entry page into EvaP providing all the login options available. The username/password
23 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
24 The login key mechanism is meant to be used to include external participants, e.g. visiting
25 students or visiting contributors.
26 """
27
28 # parse the form data into the respective form
29 submit_type = request.POST.get("submit_type", "no_submit")
30 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
31 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
32
33 # process form data
34 if request.method == 'POST':
35 if new_key_form.is_valid():
36 # user wants a new login key
37 profile = new_key_form.get_user()
38 profile.ensure_valid_login_key()
39 profile.save()
40
41 EmailTemplate.send_login_url_to_user(new_key_form.get_user())
42
43 messages.success(request, _("We sent you an email with a one-time login URL. Please check your inbox."))
44 return redirect('evaluation:index')
45 elif login_username_form.is_valid():
46 # user would like to login with username and password and passed password test
47 auth.login(request, login_username_form.get_user())
48
49 # clean up our test cookie
50 if request.session.test_cookie_worked():
51 request.session.delete_test_cookie()
52
53 # if not logged in by now, render form
54 if not request.user.is_authenticated:
55 # set test cookie to verify whether they work in the next step
56 request.session.set_test_cookie()
57
58 template_data = dict(new_key_form=new_key_form, login_username_form=login_username_form)
59 return render(request, "index.html", template_data)
60 else:
61 user, __ = UserProfile.objects.get_or_create(username=request.user.username)
62
63 # check for redirect variable
64 redirect_to = request.GET.get("next", None)
65 if redirect_to is not None:
66 return redirect(redirect_to)
67
68 # redirect user to appropriate start page
69 if request.user.is_reviewer:
70 return redirect('staff:semester_view', Semester.active_semester().id)
71 if request.user.is_staff:
72 return redirect('staff:index')
73 elif request.user.is_grade_publisher:
74 return redirect('grades:semester_view', Semester.active_semester().id)
75 elif user.is_student:
76 return redirect('student:index')
77 elif user.is_contributor_or_delegate:
78 return redirect('contributor:index')
79 else:
80 return redirect('results:index')
81
82
83 def faq(request):
84 return render(request, "faq.html", dict(sections=FaqSection.objects.all()))
85
86
87 def legal_notice(request):
88 return render(request, "legal_notice.html", dict())
89
90
91 @require_POST
92 @login_required
93 def contact(request):
94 message = request.POST.get("message")
95 title = request.POST.get("title")
96 subject = "[EvaP] Message from {}".format(request.user.username)
97
98 if message:
99 mail = EmailMessage(
100 subject=subject,
101 body="{}\n{} ({})\n\n{}".format(title, request.user.username, request.user.email, message),
102 to=[settings.CONTACT_EMAIL])
103 try:
104 mail.send()
105 logger.info('Sent contact email: \n{}\n'.format(mail.message()))
106 except Exception:
107 logger.exception('An exception occurred when sending the following contact email:\n{}\n'.format(mail.message()))
108 raise
109
110 return HttpResponse()
111
112
113 @require_POST
114 def set_lang(request):
115 if request.user.is_authenticated:
116 user = request.user
117 user.language = request.POST['language']
118 user.save()
119
120 return set_language(request)
121
[end of evap/evaluation/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -4,7 +4,7 @@
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from django.core.mail import EmailMessage
-from django.http import HttpResponse
+from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect, render
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
@@ -103,11 +103,12 @@
try:
mail.send()
logger.info('Sent contact email: \n{}\n'.format(mail.message()))
+ return HttpResponse()
except Exception:
logger.exception('An exception occurred when sending the following contact email:\n{}\n'.format(mail.message()))
raise
- return HttpResponse()
+ return HttpResponseBadRequest()
@require_POST
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -4,7 +4,7 @@\n from django.contrib import messages, auth\n from django.contrib.auth.decorators import login_required\n from django.core.mail import EmailMessage\n-from django.http import HttpResponse\n+from django.http import HttpResponse, HttpResponseBadRequest\n from django.shortcuts import redirect, render\n from django.utils.translation import ugettext as _\n from django.views.decorators.http import require_POST\n@@ -103,11 +103,12 @@\n try:\n mail.send()\n logger.info('Sent contact email: \\n{}\\n'.format(mail.message()))\n+ return HttpResponse()\n except Exception:\n logger.exception('An exception occurred when sending the following contact email:\\n{}\\n'.format(mail.message()))\n raise\n \n- return HttpResponse()\n+ return HttpResponseBadRequest()\n \n \n @require_POST\n", "issue": "Contact modal broken in Firefox\nThe contact modal does not work in Firefox, because `event` is undefined. Chrome provides this in global scope, that's why it's working there (see https://stackoverflow.com/questions/18274383/ajax-post-working-in-chrome-but-not-in-firefox).\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.i18n import set_language\n\nfrom evap.evaluation.forms import NewKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n\nlogger = logging.getLogger(__name__)\n\n\n@sensitive_post_parameters(\"password\")\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.ensure_valid_login_key()\n profile.save()\n\n EmailTemplate.send_login_url_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"We sent you an email with a one-time login URL. Please check your inbox.\"))\n return redirect('evaluation:index')\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth.login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_authenticated:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(new_key_form=new_key_form, login_username_form=login_username_form)\n return render(request, \"index.html\", template_data)\n else:\n user, __ = UserProfile.objects.get_or_create(username=request.user.username)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_reviewer:\n return redirect('staff:semester_view', Semester.active_semester().id)\n if request.user.is_staff:\n return redirect('staff:index')\n elif request.user.is_grade_publisher:\n return redirect('grades:semester_view', Semester.active_semester().id)\n elif user.is_student:\n return redirect('student:index')\n elif user.is_contributor_or_delegate:\n return redirect('contributor:index')\n else:\n return redirect('results:index')\n\n\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\n\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n\n\n@require_POST\n@login_required\ndef contact(request):\n message = request.POST.get(\"message\")\n title = request.POST.get(\"title\")\n subject = \"[EvaP] Message from {}\".format(request.user.username)\n\n if message:\n mail = EmailMessage(\n subject=subject,\n body=\"{}\\n{} ({})\\n\\n{}\".format(title, request.user.username, request.user.email, message),\n to=[settings.CONTACT_EMAIL])\n try:\n mail.send()\n logger.info('Sent contact email: \\n{}\\n'.format(mail.message()))\n except Exception:\n logger.exception('An exception occurred when sending the following contact email:\\n{}\\n'.format(mail.message()))\n raise\n\n return HttpResponse()\n\n\n@require_POST\ndef set_lang(request):\n if request.user.is_authenticated:\n user = request.user\n user.language = request.POST['language']\n user.save()\n\n return set_language(request)\n", "path": "evap/evaluation/views.py"}]} | 1,783 | 208 |
gh_patches_debug_2976 | rasdani/github-patches | git_diff | qtile__qtile-1644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't use asyncio event loop in widgets
I am creating a widget that uses asyncio to run some external command (with `asyncio.create_subprocess_exec`). It doesn't work, and raises the `RuntimeError("Cannot add child handler, the child watcher does not have a loop attached")` exception instead.
If my understanding of the code is correct, calling `set_event_loop` after `new_event_loop` should fix this issue, but I'm not sure whether it will cause other problems.
</issue>
<code>
[start of libqtile/core/session_manager.py]
1 import asyncio
2 import os
3
4 from libqtile import ipc
5 from libqtile.backend import base
6 from libqtile.core.manager import Qtile
7
8
9 class SessionManager:
10 def __init__(
11 self, kore: base.Core, config, *, fname: str = None, no_spawn=False, state=None
12 ) -> None:
13 """Manages a qtile session
14
15 :param kore:
16 The core backend to use for the session.
17 :param config:
18 The configuration to use for the qtile instance.
19 :param fname:
20 The file name to use as the qtile socket file.
21 :param no_spawn:
22 If the instance has already been started, then don't re-run the
23 startup once hook.
24 :param state:
25 The state to restart the qtile instance with.
26 """
27 eventloop = asyncio.new_event_loop()
28
29 self.qtile = Qtile(kore, config, eventloop, no_spawn=no_spawn, state=state)
30
31 if fname is None:
32 # Dots might appear in the host part of the display name
33 # during remote X sessions. Let's strip the host part first
34 display_name = kore.display_name
35 display_number = display_name.partition(":")[2]
36 if "." not in display_number:
37 display_name += ".0"
38 fname = ipc.find_sockfile(display_name)
39
40 if os.path.exists(fname):
41 os.unlink(fname)
42 self.server = ipc.Server(fname, self.qtile.server.call, eventloop)
43
44 def loop(self) -> None:
45 """Run the event loop"""
46 with self.server:
47 self.qtile.loop()
48
[end of libqtile/core/session_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/core/session_manager.py b/libqtile/core/session_manager.py
--- a/libqtile/core/session_manager.py
+++ b/libqtile/core/session_manager.py
@@ -25,6 +25,7 @@
The state to restart the qtile instance with.
"""
eventloop = asyncio.new_event_loop()
+ asyncio.set_event_loop(eventloop)
self.qtile = Qtile(kore, config, eventloop, no_spawn=no_spawn, state=state)
| {"golden_diff": "diff --git a/libqtile/core/session_manager.py b/libqtile/core/session_manager.py\n--- a/libqtile/core/session_manager.py\n+++ b/libqtile/core/session_manager.py\n@@ -25,6 +25,7 @@\n The state to restart the qtile instance with.\n \"\"\"\n eventloop = asyncio.new_event_loop()\n+ asyncio.set_event_loop(eventloop)\n \n self.qtile = Qtile(kore, config, eventloop, no_spawn=no_spawn, state=state)\n", "issue": "Can't use asyncio event loop in widgets\nI am creating a widget that uses asyncio to run some external command (with `asyncio.create_subprocess_exec`). It doesn't work, and raises the `RuntimeError(\"Cannot add child handler, the child watcher does not have a loop attached\")` exception instead.\r\n\r\nIf my understanding of the code is correct, calling `set_event_loop` after `new_event_loop` should fix this issue, but I'm not sure whether it will cause other problems.\n", "before_files": [{"content": "import asyncio\nimport os\n\nfrom libqtile import ipc\nfrom libqtile.backend import base\nfrom libqtile.core.manager import Qtile\n\n\nclass SessionManager:\n def __init__(\n self, kore: base.Core, config, *, fname: str = None, no_spawn=False, state=None\n ) -> None:\n \"\"\"Manages a qtile session\n\n :param kore:\n The core backend to use for the session.\n :param config:\n The configuration to use for the qtile instance.\n :param fname:\n The file name to use as the qtile socket file.\n :param no_spawn:\n If the instance has already been started, then don't re-run the\n startup once hook.\n :param state:\n The state to restart the qtile instance with.\n \"\"\"\n eventloop = asyncio.new_event_loop()\n\n self.qtile = Qtile(kore, config, eventloop, no_spawn=no_spawn, state=state)\n\n if fname is None:\n # Dots might appear in the host part of the display name\n # during remote X sessions. Let's strip the host part first\n display_name = kore.display_name\n display_number = display_name.partition(\":\")[2]\n if \".\" not in display_number:\n display_name += \".0\"\n fname = ipc.find_sockfile(display_name)\n\n if os.path.exists(fname):\n os.unlink(fname)\n self.server = ipc.Server(fname, self.qtile.server.call, eventloop)\n\n def loop(self) -> None:\n \"\"\"Run the event loop\"\"\"\n with self.server:\n self.qtile.loop()\n", "path": "libqtile/core/session_manager.py"}]} | 1,079 | 109 |
gh_patches_debug_17499 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hooks: Runtime hook for subprocess block launching standalone cmd
<!--
Welcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller
- For questions and general support, use the discussions forum.
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
-->
<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->
## Description of the issue
In windowed build using `PySide2`, `subprocess.Popen(["cmd"])` no longer work (cmd open then close immediately). I figured out that the issue come from the subprocess hook (since v4.8, pr #6364). If I comment out this file, `cmd` start showing again and stay alive.
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.4.1```
* Version of Python: 3.7 / 3.8 / 3.9 / 3.10
* Platform: Windows
* How you installed Python: python.org/downloads
* Did you also try this on another platform? Does it work there? → not relevant on other platform.
### A minimal example program which shows the error
A cmd shows up at start, if you comment the hook it stays alive, if you don't the cmd disappear instantly.
```
import subprocess
import sys
from PySide2 import QtWidgets
class CmdExemple(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
p = subprocess.Popen(["cmd"])
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = CmdExemple()
window.show()
exitCode = app.exec_()
sys.exit(exitCode)
```
</issue>
<code>
[start of PyInstaller/hooks/rthooks/pyi_rth_subprocess.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2021-2022, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12 import subprocess
13 import sys
14 import io
15
16
17 class Popen(subprocess.Popen):
18
19 # In windowed mode, force any unused pipes (stdin, stdout and stderr) to be DEVNULL instead of inheriting the
20 # invalid corresponding handles from this parent process.
21 if sys.platform == "win32" and not isinstance(sys.stdout, io.IOBase):
22
23 def _get_handles(self, stdin, stdout, stderr):
24 stdin, stdout, stderr = (subprocess.DEVNULL if pipe is None else pipe for pipe in (stdin, stdout, stderr))
25 return super()._get_handles(stdin, stdout, stderr)
26
27
28 subprocess.Popen = Popen
29
[end of PyInstaller/hooks/rthooks/pyi_rth_subprocess.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/rthooks/pyi_rth_subprocess.py b/PyInstaller/hooks/rthooks/pyi_rth_subprocess.py
deleted file mode 100644
--- a/PyInstaller/hooks/rthooks/pyi_rth_subprocess.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#-----------------------------------------------------------------------------
-# Copyright (c) 2021-2022, PyInstaller Development Team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#
-# SPDX-License-Identifier: Apache-2.0
-#-----------------------------------------------------------------------------
-
-import subprocess
-import sys
-import io
-
-
-class Popen(subprocess.Popen):
-
- # In windowed mode, force any unused pipes (stdin, stdout and stderr) to be DEVNULL instead of inheriting the
- # invalid corresponding handles from this parent process.
- if sys.platform == "win32" and not isinstance(sys.stdout, io.IOBase):
-
- def _get_handles(self, stdin, stdout, stderr):
- stdin, stdout, stderr = (subprocess.DEVNULL if pipe is None else pipe for pipe in (stdin, stdout, stderr))
- return super()._get_handles(stdin, stdout, stderr)
-
-
-subprocess.Popen = Popen
| {"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_subprocess.py b/PyInstaller/hooks/rthooks/pyi_rth_subprocess.py\ndeleted file mode 100644\n--- a/PyInstaller/hooks/rthooks/pyi_rth_subprocess.py\n+++ /dev/null\n@@ -1,28 +0,0 @@\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2021-2022, PyInstaller Development Team.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#\n-# SPDX-License-Identifier: Apache-2.0\n-#-----------------------------------------------------------------------------\n-\n-import subprocess\n-import sys\n-import io\n-\n-\n-class Popen(subprocess.Popen):\n-\n- # In windowed mode, force any unused pipes (stdin, stdout and stderr) to be DEVNULL instead of inheriting the\n- # invalid corresponding handles from this parent process.\n- if sys.platform == \"win32\" and not isinstance(sys.stdout, io.IOBase):\n-\n- def _get_handles(self, stdin, stdout, stderr):\n- stdin, stdout, stderr = (subprocess.DEVNULL if pipe is None else pipe for pipe in (stdin, stdout, stderr))\n- return super()._get_handles(stdin, stdout, stderr)\n-\n-\n-subprocess.Popen = Popen\n", "issue": "Hooks: Runtime hook for subprocess block launching standalone cmd\n<!--\r\nWelcome to the PyInstaller issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to PyInstaller\r\n - For questions and general support, use the discussions forum.\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n-->\r\n\r\n<!-- +++ ONLY TEXT +++ DO NOT POST IMAGES +++ -->\r\n\r\n## Description of the issue\r\nIn windowed build using `PySide2`, `subprocess.Popen([\"cmd\"])` no longer work (cmd open then close immediately). I figured out that the issue come from the subprocess hook (since v4.8, pr #6364). If I comment out this file, `cmd` start showing again and stay alive.\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.4.1```\r\n* Version of Python: 3.7 / 3.8 / 3.9 / 3.10\r\n* Platform: Windows\r\n* How you installed Python: python.org/downloads\r\n* Did you also try this on another platform? Does it work there? \u2192 not relevant on other platform.\r\n\r\n### A minimal example program which shows the error\r\nA cmd shows up at start, if you comment the hook it stays alive, if you don't the cmd disappear instantly.\r\n\r\n```\r\nimport subprocess\r\nimport sys\r\n\r\nfrom PySide2 import QtWidgets\r\n\r\nclass CmdExemple(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n p = subprocess.Popen([\"cmd\"])\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n window = CmdExemple()\r\n window.show()\r\n exitCode = app.exec_()\r\n sys.exit(exitCode)\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021-2022, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\nimport subprocess\nimport sys\nimport io\n\n\nclass Popen(subprocess.Popen):\n\n # In windowed mode, force any unused pipes (stdin, stdout and stderr) to be DEVNULL instead of inheriting the\n # invalid corresponding handles from this parent process.\n if sys.platform == \"win32\" and not isinstance(sys.stdout, io.IOBase):\n\n def _get_handles(self, stdin, stdout, stderr):\n stdin, stdout, stderr = (subprocess.DEVNULL if pipe is None else pipe for pipe in (stdin, stdout, stderr))\n return super()._get_handles(stdin, stdout, stderr)\n\n\nsubprocess.Popen = Popen\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_subprocess.py"}]} | 1,260 | 323 |
gh_patches_debug_26936 | rasdani/github-patches | git_diff | kartoza__prj.app-435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
500 on bad certificate number
# Problem
When I try and add a bad certificate number, then I get a 500, I should get a 404.
See:
http://staging.changelog.qgis.org/en/qgis/certificate/0246242/
# Proposed Solution
Return a 404
</issue>
<code>
[start of django_project/certification/views/certificate.py]
1 # coding=utf-8
2 from django.http import Http404
3 from django.views.generic import CreateView, DetailView
4 from django.core.urlresolvers import reverse
5 from braces.views import LoginRequiredMixin
6 from ..models import Certificate, Course, Attendee
7 from ..forms import CertificateForm
8
9
10 class CertificateMixin(object):
11 """Mixin class to provide standard settings for Certificate."""
12
13 model = Certificate
14 form_class = CertificateForm
15
16
17 class CertificateCreateView(
18 LoginRequiredMixin, CertificateMixin, CreateView):
19 """Create view for Certificate."""
20
21 context_object_name = 'certificate'
22 template_name = 'certificate/create.html'
23
24 def get_success_url(self):
25 """Define the redirect URL.
26
27 After successful creation of the object, the User will be redirected
28 to the Course detail page.
29
30 :returns: URL
31 :rtype: HttpResponse
32 """
33
34 return reverse('course-detail', kwargs={
35 'project_slug': self.project_slug,
36 'organisation_slug': self.organisation_slug,
37 'slug': self.course_slug
38 })
39
40 def get_context_data(self, **kwargs):
41 """Get the context data which is passed to a template.
42
43 :param kwargs: Any arguments to pass to the superclass.
44 :type kwargs: dict
45
46 :returns: Context data which will be passed to the template.
47 :rtype: dict
48 """
49
50 context = super(
51 CertificateCreateView, self).get_context_data(**kwargs)
52 context['course'] = Course.objects.get(slug=self.course_slug)
53 context['attendee'] = Attendee.objects.get(pk=self.pk)
54 return context
55
56 def get_form_kwargs(self):
57 """Get keyword arguments from form.
58
59 :returns keyword argument from the form
60 :rtype: dict
61 """
62
63 kwargs = super(CertificateCreateView, self).get_form_kwargs()
64 self.project_slug = self.kwargs.get('project_slug', None)
65 self.organisation_slug = self.kwargs.get('organisation_slug', None)
66 self.course_slug = self.kwargs.get('course_slug', None)
67 self.pk = self.kwargs.get('pk', None)
68 self.course = Course.objects.get(slug=self.course_slug)
69 self.attendee = Attendee.objects.get(pk=self.pk)
70 kwargs.update({
71 'user': self.request.user,
72 'course': self.course,
73 'attendee': self.attendee,
74 })
75 return kwargs
76
77
78 class CertificateDetailView(DetailView):
79 """Detail view for Certificate."""
80
81 model = Certificate
82 context_object_name = 'certificate'
83 template_name = 'certificate/detail.html'
84
85 def get_context_data(self, **kwargs):
86 """Get the context data which is passed to a template.
87
88 :param kwargs: Any arguments to pass to the superclass.
89 :type kwargs: dict
90
91 :returns: Context data which will be passed to the template.
92 :rtype: dict
93 """
94
95 self.certificateID = self.kwargs.get('id', None)
96 context = super(
97 CertificateDetailView, self).get_context_data(**kwargs)
98 context['certificate'] = \
99 Certificate.objects.get(certificateID=self.certificateID)
100 return context
101
102 def get_queryset(self):
103 """Get the queryset for this view.
104
105 :returns: Queryset which is all certificate in the
106 corresponding organisation.
107 :rtype: QuerySet
108 """
109
110 qs = Certificate.objects.all()
111 return qs
112
113 def get_object(self, queryset=None):
114 """Get the object for this view.
115
116 :param queryset: A query set
117 :type queryset: QuerySet
118
119 :returns: Queryset which is filtered to only show a certificate
120 depends on the input certificate ID.
121 :rtype: QuerySet
122 :raises: Http404
123 """
124
125 if queryset is None:
126 queryset = self.get_queryset()
127 certificateID = self.kwargs.get('id', None)
128 if certificateID:
129 obj = queryset.get(
130 certificateID=certificateID)
131 return obj
132 else:
133 raise Http404('Sorry! Certificate by this ID is not exist.')
134
[end of django_project/certification/views/certificate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py
--- a/django_project/certification/views/certificate.py
+++ b/django_project/certification/views/certificate.py
@@ -93,10 +93,15 @@
"""
self.certificateID = self.kwargs.get('id', None)
+ self.project_slug = self.kwargs.get('project_slug', None)
context = super(
CertificateDetailView, self).get_context_data(**kwargs)
- context['certificate'] = \
- Certificate.objects.get(certificateID=self.certificateID)
+ issued_id = \
+ Certificate.objects.all().values_list('certificateID', flat=True)
+ if self.certificateID in issued_id:
+ context['certificate'] = \
+ Certificate.objects.get(certificateID=self.certificateID)
+ context['project_slug'] = self.project_slug
return context
def get_queryset(self):
@@ -126,8 +131,10 @@
queryset = self.get_queryset()
certificateID = self.kwargs.get('id', None)
if certificateID:
- obj = queryset.get(
- certificateID=certificateID)
- return obj
+ try:
+ obj = queryset.get(certificateID=certificateID)
+ return obj
+ except Certificate.DoesNotExist:
+ return None
else:
raise Http404('Sorry! Certificate by this ID is not exist.')
| {"golden_diff": "diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py\n--- a/django_project/certification/views/certificate.py\n+++ b/django_project/certification/views/certificate.py\n@@ -93,10 +93,15 @@\n \"\"\"\n \n self.certificateID = self.kwargs.get('id', None)\n+ self.project_slug = self.kwargs.get('project_slug', None)\n context = super(\n CertificateDetailView, self).get_context_data(**kwargs)\n- context['certificate'] = \\\n- Certificate.objects.get(certificateID=self.certificateID)\n+ issued_id = \\\n+ Certificate.objects.all().values_list('certificateID', flat=True)\n+ if self.certificateID in issued_id:\n+ context['certificate'] = \\\n+ Certificate.objects.get(certificateID=self.certificateID)\n+ context['project_slug'] = self.project_slug\n return context\n \n def get_queryset(self):\n@@ -126,8 +131,10 @@\n queryset = self.get_queryset()\n certificateID = self.kwargs.get('id', None)\n if certificateID:\n- obj = queryset.get(\n- certificateID=certificateID)\n- return obj\n+ try:\n+ obj = queryset.get(certificateID=certificateID)\n+ return obj\n+ except Certificate.DoesNotExist:\n+ return None\n else:\n raise Http404('Sorry! Certificate by this ID is not exist.')\n", "issue": "500 on bad certificate number\n# Problem\r\n\r\nWhen I try and add a bad certificate number, then I get a 500, I should get a 404.\r\nSee:\r\nhttp://staging.changelog.qgis.org/en/qgis/certificate/0246242/\r\n\r\n# Proposed Solution\r\n\r\nReturn a 404\n", "before_files": [{"content": "# coding=utf-8\nfrom django.http import Http404\nfrom django.views.generic import CreateView, DetailView\nfrom django.core.urlresolvers import reverse\nfrom braces.views import LoginRequiredMixin\nfrom ..models import Certificate, Course, Attendee\nfrom ..forms import CertificateForm\n\n\nclass CertificateMixin(object):\n \"\"\"Mixin class to provide standard settings for Certificate.\"\"\"\n\n model = Certificate\n form_class = CertificateForm\n\n\nclass CertificateCreateView(\n LoginRequiredMixin, CertificateMixin, CreateView):\n \"\"\"Create view for Certificate.\"\"\"\n\n context_object_name = 'certificate'\n template_name = 'certificate/create.html'\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful creation of the object, the User will be redirected\n to the Course detail page.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n\n return reverse('course-detail', kwargs={\n 'project_slug': self.project_slug,\n 'organisation_slug': self.organisation_slug,\n 'slug': self.course_slug\n })\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n context = super(\n CertificateCreateView, self).get_context_data(**kwargs)\n context['course'] = Course.objects.get(slug=self.course_slug)\n context['attendee'] = Attendee.objects.get(pk=self.pk)\n return context\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype: dict\n \"\"\"\n\n kwargs = super(CertificateCreateView, self).get_form_kwargs()\n self.project_slug = self.kwargs.get('project_slug', None)\n self.organisation_slug = self.kwargs.get('organisation_slug', None)\n self.course_slug = self.kwargs.get('course_slug', None)\n self.pk = self.kwargs.get('pk', None)\n self.course = Course.objects.get(slug=self.course_slug)\n self.attendee = Attendee.objects.get(pk=self.pk)\n kwargs.update({\n 'user': self.request.user,\n 'course': self.course,\n 'attendee': self.attendee,\n })\n return kwargs\n\n\nclass CertificateDetailView(DetailView):\n \"\"\"Detail view for Certificate.\"\"\"\n\n model = Certificate\n context_object_name = 'certificate'\n template_name = 'certificate/detail.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n self.certificateID = self.kwargs.get('id', None)\n context = super(\n CertificateDetailView, self).get_context_data(**kwargs)\n context['certificate'] = \\\n Certificate.objects.get(certificateID=self.certificateID)\n return context\n\n def get_queryset(self):\n \"\"\"Get the queryset for this view.\n\n :returns: Queryset which is all certificate in the\n corresponding organisation.\n :rtype: QuerySet\n \"\"\"\n\n qs = Certificate.objects.all()\n return qs\n\n def get_object(self, queryset=None):\n \"\"\"Get the object for this view.\n\n :param queryset: A query set\n :type queryset: QuerySet\n\n :returns: Queryset which is filtered to only show a certificate\n depends on the input certificate ID.\n :rtype: QuerySet\n :raises: Http404\n \"\"\"\n\n if queryset is None:\n queryset = self.get_queryset()\n certificateID = self.kwargs.get('id', None)\n if certificateID:\n obj = queryset.get(\n certificateID=certificateID)\n return obj\n else:\n raise Http404('Sorry! Certificate by this ID is not exist.')\n", "path": "django_project/certification/views/certificate.py"}]} | 1,787 | 324 |
gh_patches_debug_34950 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-2971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
singleuser server version check spams the logs
**Describe the bug**
We have ~277 active single user servers in our deployment right now and on restart of the hub service we see this for each one:
> Mar 4 09:20:45 hub-7bccd48cd5-mp4fk hub [W 2020-03-04 15:20:45.996 JupyterHub _version:56] jupyterhub version 1.2.0dev != jupyterhub-singleuser version 1.1.0. This could cause failure to authenticate and result in redirect loops!
My only complaint is that logging that per server is redundant and spams the logs. Can we just log that once per restart of the hub?
**To Reproduce**
Have the jupyterhub and jupyterhub-singleuser services at different minor versions.
**Expected behavior**
Just log the warning once since there is no user/server specific context in the message.
**Compute Information**
- 1.2.0dev - we're running with a custom build based on b4391d0f796864a5b01167701d95eafce3ad987e so that we can pick up the performance fix for issue #2928.
</issue>
<code>
[start of jupyterhub/_version.py]
1 """JupyterHub version info"""
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4
5 version_info = (
6 1,
7 2,
8 0,
9 # "", # release (b1, rc1, or "" for final or dev)
10 "dev", # dev or nothing for beta/rc/stable releases
11 )
12
13 # pep 440 version: no dot before beta/rc, but before .dev
14 # 0.1.0rc1
15 # 0.1.0a1
16 # 0.1.0b1.dev
17 # 0.1.0.dev
18
19 __version__ = ".".join(map(str, version_info[:3])) + ".".join(version_info[3:])
20
21
22 def _check_version(hub_version, singleuser_version, log):
23 """Compare Hub and single-user server versions"""
24 if not hub_version:
25 log.warning(
26 "Hub has no version header, which means it is likely < 0.8. Expected %s",
27 __version__,
28 )
29 return
30
31 if not singleuser_version:
32 log.warning(
33 "Single-user server has no version header, which means it is likely < 0.8. Expected %s",
34 __version__,
35 )
36 return
37
38 # compare minor X.Y versions
39 if hub_version != singleuser_version:
40 from distutils.version import LooseVersion as V
41
42 hub_major_minor = V(hub_version).version[:2]
43 singleuser_major_minor = V(singleuser_version).version[:2]
44 extra = ""
45 if singleuser_major_minor == hub_major_minor:
46 # patch-level mismatch or lower, log difference at debug-level
47 # because this should be fine
48 log_method = log.debug
49 else:
50 # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.
51 log_method = log.warning
52 extra = " This could cause failure to authenticate and result in redirect loops!"
53 log_method(
54 "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
55 hub_version,
56 singleuser_version,
57 )
58 else:
59 log.debug(
60 "jupyterhub and jupyterhub-singleuser both on version %s" % hub_version
61 )
62
[end of jupyterhub/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py
--- a/jupyterhub/_version.py
+++ b/jupyterhub/_version.py
@@ -18,6 +18,15 @@
__version__ = ".".join(map(str, version_info[:3])) + ".".join(version_info[3:])
+# Singleton flag to only log the major/minor mismatch warning once per mismatch combo.
+_version_mismatch_warning_logged = {}
+
+
+def reset_globals():
+ """Used to reset globals between test cases."""
+ global _version_mismatch_warning_logged
+ _version_mismatch_warning_logged = {}
+
def _check_version(hub_version, singleuser_version, log):
"""Compare Hub and single-user server versions"""
@@ -42,19 +51,27 @@
hub_major_minor = V(hub_version).version[:2]
singleuser_major_minor = V(singleuser_version).version[:2]
extra = ""
+ do_log = True
if singleuser_major_minor == hub_major_minor:
# patch-level mismatch or lower, log difference at debug-level
# because this should be fine
log_method = log.debug
else:
# log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.
- log_method = log.warning
- extra = " This could cause failure to authenticate and result in redirect loops!"
- log_method(
- "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
- hub_version,
- singleuser_version,
- )
+ key = '%s-%s' % (hub_version, singleuser_version)
+ global _version_mismatch_warning_logged
+ if _version_mismatch_warning_logged.get(key):
+ do_log = False # We already logged this warning so don't log it again.
+ else:
+ log_method = log.warning
+ extra = " This could cause failure to authenticate and result in redirect loops!"
+ _version_mismatch_warning_logged[key] = True
+ if do_log:
+ log_method(
+ "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
+ hub_version,
+ singleuser_version,
+ )
else:
log.debug(
"jupyterhub and jupyterhub-singleuser both on version %s" % hub_version
| {"golden_diff": "diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py\n--- a/jupyterhub/_version.py\n+++ b/jupyterhub/_version.py\n@@ -18,6 +18,15 @@\n \n __version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n \n+# Singleton flag to only log the major/minor mismatch warning once per mismatch combo.\n+_version_mismatch_warning_logged = {}\n+\n+\n+def reset_globals():\n+ \"\"\"Used to reset globals between test cases.\"\"\"\n+ global _version_mismatch_warning_logged\n+ _version_mismatch_warning_logged = {}\n+\n \n def _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n@@ -42,19 +51,27 @@\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n+ do_log = True\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n- log_method = log.warning\n- extra = \" This could cause failure to authenticate and result in redirect loops!\"\n- log_method(\n- \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n- hub_version,\n- singleuser_version,\n- )\n+ key = '%s-%s' % (hub_version, singleuser_version)\n+ global _version_mismatch_warning_logged\n+ if _version_mismatch_warning_logged.get(key):\n+ do_log = False # We already logged this warning so don't log it again.\n+ else:\n+ log_method = log.warning\n+ extra = \" This could cause failure to authenticate and result in redirect loops!\"\n+ _version_mismatch_warning_logged[key] = True\n+ if do_log:\n+ log_method(\n+ \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n+ hub_version,\n+ singleuser_version,\n+ )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n", "issue": "singleuser server version check spams the logs\n**Describe the bug**\r\n\r\nWe have ~277 active single user servers in our deployment right now and on restart of the hub service we see this for each one:\r\n\r\n> Mar 4 09:20:45 hub-7bccd48cd5-mp4fk hub [W 2020-03-04 15:20:45.996 JupyterHub _version:56] jupyterhub version 1.2.0dev != jupyterhub-singleuser version 1.1.0. This could cause failure to authenticate and result in redirect loops! \r\n\r\nMy only complaint is that logging that per server is redundant and spams the logs. Can we just log that once per restart of the hub?\r\n\r\n**To Reproduce**\r\n\r\nHave the jupyterhub and jupyterhub-singleuser services at different minor versions.\r\n\r\n**Expected behavior**\r\n\r\nJust log the warning once since there is no user/server specific context in the message.\r\n\r\n**Compute Information**\r\n - 1.2.0dev - we're running with a custom build based on b4391d0f796864a5b01167701d95eafce3ad987e so that we can pick up the performance fix for issue #2928.\n", "before_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 2,\n 0,\n # \"\", # release (b1, rc1, or \"\" for final or dev)\n \"dev\", # dev or nothing for beta/rc/stable releases\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}]} | 1,449 | 529 |
gh_patches_debug_35171 | rasdani/github-patches | git_diff | awslabs__gluonts-709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tracking: mxnet 1.6
### To update
- [x] documentation
- [x] README.md
- [x] test-dependencies
### Fix
- [x] https://github.com/awslabs/gluon-ts/issues/583
### Other
- [x] Update `numpy~1.18`
</issue>
<code>
[start of src/gluonts/model/seq2seq/_forking_network.py]
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 # Third-party imports
15 from mxnet import gluon, nd
16
17 # First-party imports
18 from gluonts.block.decoder import Seq2SeqDecoder
19 from gluonts.block.enc2dec import Seq2SeqEnc2Dec
20 from gluonts.block.encoder import Seq2SeqEncoder
21 from gluonts.block.quantile_output import QuantileOutput
22 from gluonts.core.component import validated
23 from gluonts.model.common import Tensor
24
25 nd_None = nd.array([])
26
27
28 class ForkingSeq2SeqNetworkBase(gluon.HybridBlock):
29 """
30 Base network for the :class:`ForkingSeq2SeqEstimator`.
31
32 Parameters
33 ----------
34 encoder: Seq2SeqEncoder
35 encoder block
36 enc2dec: Seq2SeqEnc2Dec
37 encoder to decoder mapping block
38 decoder: Seq2SeqDecoder
39 decoder block
40 quantile_output: QuantileOutput
41 quantile output block
42 kwargs: dict
43 dictionary of Gluon HybridBlock parameters
44 """
45
46 @validated()
47 def __init__(
48 self,
49 encoder: Seq2SeqEncoder,
50 enc2dec: Seq2SeqEnc2Dec,
51 decoder: Seq2SeqDecoder,
52 quantile_output: QuantileOutput,
53 **kwargs,
54 ) -> None:
55 super().__init__(**kwargs)
56
57 self.encoder = encoder
58 self.enc2dec = enc2dec
59 self.decoder = decoder
60 self.quantile_output = quantile_output
61
62 with self.name_scope():
63 self.quantile_proj = quantile_output.get_quantile_proj()
64 self.loss = quantile_output.get_loss()
65
66
67 class ForkingSeq2SeqTrainingNetwork(ForkingSeq2SeqNetworkBase):
68 # noinspection PyMethodOverriding
69 def hybrid_forward(
70 self, F, past_target: Tensor, future_target: Tensor
71 ) -> Tensor:
72 """
73 Parameters
74 ----------
75 F: mx.symbol or mx.ndarray
76 Gluon function space
77 past_target: Tensor
78 FIXME
79 future_target: Tensor
80 shape (num_ts, encoder_length, 1) FIXME
81
82 Returns
83 -------
84 loss with shape (FIXME, FIXME)
85 """
86
87 # FIXME: can we factor out a common prefix in the base network?
88 feat_static_real = nd_None
89 past_feat_dynamic_real = nd_None
90 future_feat_dynamic_real = nd_None
91
92 enc_output_static, enc_output_dynamic = self.encoder(
93 past_target, feat_static_real, past_feat_dynamic_real
94 )
95
96 dec_input_static, dec_input_dynamic, _ = self.enc2dec(
97 enc_output_static, enc_output_dynamic, future_feat_dynamic_real
98 )
99
100 dec_output = self.decoder(dec_input_dynamic, dec_input_static)
101 dec_dist_output = self.quantile_proj(dec_output)
102
103 loss = self.loss(future_target, dec_dist_output)
104 return loss.mean(axis=1)
105
106
107 class ForkingSeq2SeqPredictionNetwork(ForkingSeq2SeqNetworkBase):
108 # noinspection PyMethodOverriding
109 def hybrid_forward(self, F, past_target: Tensor) -> Tensor:
110 """
111 Parameters
112 ----------
113 F: mx.symbol or mx.ndarray
114 Gluon function space
115 past_target: Tensor
116 FIXME
117
118 Returns
119 -------
120 prediction tensor with shape (FIXME, FIXME)
121 """
122
123 # FIXME: can we factor out a common prefix in the base network?
124 feat_static_real = nd_None
125 past_feat_dynamic_real = nd_None
126 future_feat_dynamic_real = nd_None
127
128 enc_output_static, enc_output_dynamic = self.encoder(
129 past_target, feat_static_real, past_feat_dynamic_real
130 )
131
132 enc_output_static = (
133 nd_None if enc_output_static is None else enc_output_static
134 )
135
136 dec_inp_static, dec_inp_dynamic, _ = self.enc2dec(
137 enc_output_static, enc_output_dynamic, future_feat_dynamic_real
138 )
139
140 dec_output = self.decoder(dec_inp_dynamic, dec_inp_static)
141 fcst_output = F.slice_axis(dec_output, axis=1, begin=-1, end=None)
142 fcst_output = F.squeeze(fcst_output, axis=1)
143
144 predictions = self.quantile_proj(fcst_output).swapaxes(2, 1)
145 return predictions
146
[end of src/gluonts/model/seq2seq/_forking_network.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/gluonts/model/seq2seq/_forking_network.py b/src/gluonts/model/seq2seq/_forking_network.py
--- a/src/gluonts/model/seq2seq/_forking_network.py
+++ b/src/gluonts/model/seq2seq/_forking_network.py
@@ -12,7 +12,8 @@
# permissions and limitations under the License.
# Third-party imports
-from mxnet import gluon, nd
+import mxnet as mx
+from mxnet import gluon
# First-party imports
from gluonts.block.decoder import Seq2SeqDecoder
@@ -22,8 +23,6 @@
from gluonts.core.component import validated
from gluonts.model.common import Tensor
-nd_None = nd.array([])
-
class ForkingSeq2SeqNetworkBase(gluon.HybridBlock):
"""
@@ -85,9 +84,9 @@
"""
# FIXME: can we factor out a common prefix in the base network?
- feat_static_real = nd_None
- past_feat_dynamic_real = nd_None
- future_feat_dynamic_real = nd_None
+ feat_static_real = F.zeros(shape=(1,))
+ past_feat_dynamic_real = F.zeros(shape=(1,))
+ future_feat_dynamic_real = F.zeros(shape=(1,))
enc_output_static, enc_output_dynamic = self.encoder(
past_target, feat_static_real, past_feat_dynamic_real
@@ -121,16 +120,18 @@
"""
# FIXME: can we factor out a common prefix in the base network?
- feat_static_real = nd_None
- past_feat_dynamic_real = nd_None
- future_feat_dynamic_real = nd_None
+ feat_static_real = F.zeros(shape=(1,))
+ past_feat_dynamic_real = F.zeros(shape=(1,))
+ future_feat_dynamic_real = F.zeros(shape=(1,))
enc_output_static, enc_output_dynamic = self.encoder(
past_target, feat_static_real, past_feat_dynamic_real
)
enc_output_static = (
- nd_None if enc_output_static is None else enc_output_static
+ F.zeros(shape=(1,))
+ if enc_output_static is None
+ else enc_output_static
)
dec_inp_static, dec_inp_dynamic, _ = self.enc2dec(
| {"golden_diff": "diff --git a/src/gluonts/model/seq2seq/_forking_network.py b/src/gluonts/model/seq2seq/_forking_network.py\n--- a/src/gluonts/model/seq2seq/_forking_network.py\n+++ b/src/gluonts/model/seq2seq/_forking_network.py\n@@ -12,7 +12,8 @@\n # permissions and limitations under the License.\n \n # Third-party imports\n-from mxnet import gluon, nd\n+import mxnet as mx\n+from mxnet import gluon\n \n # First-party imports\n from gluonts.block.decoder import Seq2SeqDecoder\n@@ -22,8 +23,6 @@\n from gluonts.core.component import validated\n from gluonts.model.common import Tensor\n \n-nd_None = nd.array([])\n-\n \n class ForkingSeq2SeqNetworkBase(gluon.HybridBlock):\n \"\"\"\n@@ -85,9 +84,9 @@\n \"\"\"\n \n # FIXME: can we factor out a common prefix in the base network?\n- feat_static_real = nd_None\n- past_feat_dynamic_real = nd_None\n- future_feat_dynamic_real = nd_None\n+ feat_static_real = F.zeros(shape=(1,))\n+ past_feat_dynamic_real = F.zeros(shape=(1,))\n+ future_feat_dynamic_real = F.zeros(shape=(1,))\n \n enc_output_static, enc_output_dynamic = self.encoder(\n past_target, feat_static_real, past_feat_dynamic_real\n@@ -121,16 +120,18 @@\n \"\"\"\n \n # FIXME: can we factor out a common prefix in the base network?\n- feat_static_real = nd_None\n- past_feat_dynamic_real = nd_None\n- future_feat_dynamic_real = nd_None\n+ feat_static_real = F.zeros(shape=(1,))\n+ past_feat_dynamic_real = F.zeros(shape=(1,))\n+ future_feat_dynamic_real = F.zeros(shape=(1,))\n \n enc_output_static, enc_output_dynamic = self.encoder(\n past_target, feat_static_real, past_feat_dynamic_real\n )\n \n enc_output_static = (\n- nd_None if enc_output_static is None else enc_output_static\n+ F.zeros(shape=(1,))\n+ if enc_output_static is None\n+ else enc_output_static\n )\n \n dec_inp_static, dec_inp_dynamic, _ = self.enc2dec(\n", "issue": "Tracking: mxnet 1.6\n### To update\r\n\r\n- [x] documentation\r\n- [x] README.md\r\n- [x] test-dependencies\r\n\r\n### Fix\r\n\r\n- [x] https://github.com/awslabs/gluon-ts/issues/583\r\n\r\n### Other\r\n\r\n- [x] Update `numpy~1.18`\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Third-party imports\nfrom mxnet import gluon, nd\n\n# First-party imports\nfrom gluonts.block.decoder import Seq2SeqDecoder\nfrom gluonts.block.enc2dec import Seq2SeqEnc2Dec\nfrom gluonts.block.encoder import Seq2SeqEncoder\nfrom gluonts.block.quantile_output import QuantileOutput\nfrom gluonts.core.component import validated\nfrom gluonts.model.common import Tensor\n\nnd_None = nd.array([])\n\n\nclass ForkingSeq2SeqNetworkBase(gluon.HybridBlock):\n \"\"\"\n Base network for the :class:`ForkingSeq2SeqEstimator`.\n\n Parameters\n ----------\n encoder: Seq2SeqEncoder\n encoder block\n enc2dec: Seq2SeqEnc2Dec\n encoder to decoder mapping block\n decoder: Seq2SeqDecoder\n decoder block\n quantile_output: QuantileOutput\n quantile output block\n kwargs: dict\n dictionary of Gluon HybridBlock parameters\n \"\"\"\n\n @validated()\n def __init__(\n self,\n encoder: Seq2SeqEncoder,\n enc2dec: Seq2SeqEnc2Dec,\n decoder: Seq2SeqDecoder,\n quantile_output: QuantileOutput,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n\n self.encoder = encoder\n self.enc2dec = enc2dec\n self.decoder = decoder\n self.quantile_output = quantile_output\n\n with self.name_scope():\n self.quantile_proj = quantile_output.get_quantile_proj()\n self.loss = quantile_output.get_loss()\n\n\nclass ForkingSeq2SeqTrainingNetwork(ForkingSeq2SeqNetworkBase):\n # noinspection PyMethodOverriding\n def hybrid_forward(\n self, F, past_target: Tensor, future_target: Tensor\n ) -> Tensor:\n \"\"\"\n Parameters\n ----------\n F: mx.symbol or mx.ndarray\n Gluon function space\n past_target: Tensor\n FIXME\n future_target: Tensor\n shape (num_ts, encoder_length, 1) FIXME\n\n Returns\n -------\n loss with shape (FIXME, FIXME)\n \"\"\"\n\n # FIXME: can we factor out a common prefix in the base network?\n feat_static_real = nd_None\n past_feat_dynamic_real = nd_None\n future_feat_dynamic_real = nd_None\n\n enc_output_static, enc_output_dynamic = self.encoder(\n past_target, feat_static_real, past_feat_dynamic_real\n )\n\n dec_input_static, dec_input_dynamic, _ = self.enc2dec(\n enc_output_static, enc_output_dynamic, future_feat_dynamic_real\n )\n\n dec_output = self.decoder(dec_input_dynamic, dec_input_static)\n dec_dist_output = self.quantile_proj(dec_output)\n\n loss = self.loss(future_target, dec_dist_output)\n return loss.mean(axis=1)\n\n\nclass ForkingSeq2SeqPredictionNetwork(ForkingSeq2SeqNetworkBase):\n # noinspection PyMethodOverriding\n def hybrid_forward(self, F, past_target: Tensor) -> Tensor:\n \"\"\"\n Parameters\n ----------\n F: mx.symbol or mx.ndarray\n Gluon function space\n past_target: Tensor\n FIXME\n\n Returns\n -------\n prediction tensor with shape (FIXME, FIXME)\n \"\"\"\n\n # FIXME: can we factor out a common prefix in the base network?\n feat_static_real = nd_None\n past_feat_dynamic_real = nd_None\n future_feat_dynamic_real = nd_None\n\n enc_output_static, enc_output_dynamic = self.encoder(\n past_target, feat_static_real, past_feat_dynamic_real\n )\n\n enc_output_static = (\n nd_None if enc_output_static is None else enc_output_static\n )\n\n dec_inp_static, dec_inp_dynamic, _ = self.enc2dec(\n enc_output_static, enc_output_dynamic, future_feat_dynamic_real\n )\n\n dec_output = self.decoder(dec_inp_dynamic, dec_inp_static)\n fcst_output = F.slice_axis(dec_output, axis=1, begin=-1, end=None)\n fcst_output = F.squeeze(fcst_output, axis=1)\n\n predictions = self.quantile_proj(fcst_output).swapaxes(2, 1)\n return predictions\n", "path": "src/gluonts/model/seq2seq/_forking_network.py"}]} | 2,016 | 515 |
gh_patches_debug_35010 | rasdani/github-patches | git_diff | ephios-dev__ephios-884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve calendar design
As a user, I expect the event calendar view to display the shifts in small boxes with times inside of each calendar day (similiar to Google Calendar etc.)
</issue>
<code>
[start of ephios/core/calendar.py]
1 from calendar import HTMLCalendar, day_abbr
2 from datetime import date, datetime
3 from itertools import groupby
4
5 from django.utils.formats import date_format
6 from django.utils.translation import gettext as _
7
8
9 class ShiftCalendar(HTMLCalendar):
10 cssclass_month = "table table-fixed"
11
12 def __init__(self, shifts, *args, **kwargs):
13 super().__init__(*args, **kwargs)
14 self.shifts = {
15 k: list(v) for (k, v) in groupby(shifts, lambda shift: shift.start_time.date().day)
16 }
17
18 def formatmonth(self, theyear, themonth, withyear=True):
19 self.year, self.month = theyear, themonth
20 return super().formatmonth(theyear, themonth)
21
22 def formatmonthname(self, theyear, themonth, withyear=True):
23 dt = datetime(theyear, themonth, 1)
24 return f'<tr><th colspan="7" class="month">{date_format(dt, format="b Y")}</th></tr>'
25
26 def formatweekday(self, day):
27 return f'<th class="{self.cssclasses[day]}">{_(day_abbr[day])}</th>'
28
29 def formatday(self, day, weekday):
30 if day != 0:
31 cssclass = self.cssclasses[weekday]
32 if date.today() == date(self.year, self.month, day):
33 cssclass += " calendar-today"
34 if day in self.shifts:
35 cssclass += " filled"
36 body = ["<br />"]
37 for shift in self.shifts[day]:
38 body.append(f'<a href="{shift.event.get_absolute_url()}">')
39 body.append(shift.event.title)
40 body.append("</a><br />")
41 return self.day_cell(cssclass, f"{day} {''.join(body)}")
42 return self.day_cell(cssclass, day)
43 return self.day_cell("noday", " ")
44
45 def day_cell(self, cssclass, body):
46 return f'<td class="calendar-row-height p-1 break-word {cssclass}">{body}</td>'
47
[end of ephios/core/calendar.py]
[start of ephios/extra/colors.py]
1 # inspired by https://jfelix.info/blog/how-to-make-a-text-color-fit-any-background-color
2 from math import sqrt
3
4 from ephios.core.models import EventType
5
6
7 def calculate_luminance(rgb: tuple):
8 r, g, b = map(
9 lambda channel: channel / 255 / 12.92
10 if channel / 255 <= 0.03928
11 else ((channel / 255 + 0.055) / 1.055) ** 2.4,
12 rgb,
13 )
14 return 0.2126 * r + 0.7152 * g + 0.0722 * b
15
16
17 def get_text_color_for_background(background_luminance: int):
18 return "#000000" if background_luminance > sqrt(1.05 * 0.05) - 0.05 else "#ffffff"
19
20
21 def get_eventtype_color_style(eventtype: EventType):
22 luminance = calculate_luminance(
23 (
24 int(eventtype.color[1:3], 16),
25 int(eventtype.color[3:5], 16),
26 int(eventtype.color[5:7], 16),
27 )
28 )
29 text_color = get_text_color_for_background(luminance)
30 return f".badge-{eventtype.pk}-color{{background-color:{eventtype.color};color:{text_color}}}"
31
[end of ephios/extra/colors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/core/calendar.py b/ephios/core/calendar.py
--- a/ephios/core/calendar.py
+++ b/ephios/core/calendar.py
@@ -2,6 +2,7 @@
from datetime import date, datetime
from itertools import groupby
+from django.template.loader import render_to_string
from django.utils.formats import date_format
from django.utils.translation import gettext as _
@@ -24,23 +25,20 @@
return f'<tr><th colspan="7" class="month">{date_format(dt, format="b Y")}</th></tr>'
def formatweekday(self, day):
- return f'<th class="{self.cssclasses[day]}">{_(day_abbr[day])}</th>'
+ return f'<th class="text-center {self.cssclasses[day]}">{_(day_abbr[day])}</th>'
def formatday(self, day, weekday):
if day != 0:
cssclass = self.cssclasses[weekday]
- if date.today() == date(self.year, self.month, day):
- cssclass += " calendar-today"
+ today = date.today() == date(self.year, self.month, day)
if day in self.shifts:
cssclass += " filled"
- body = ["<br />"]
- for shift in self.shifts[day]:
- body.append(f'<a href="{shift.event.get_absolute_url()}">')
- body.append(shift.event.title)
- body.append("</a><br />")
- return self.day_cell(cssclass, f"{day} {''.join(body)}")
- return self.day_cell(cssclass, day)
+ content = render_to_string(
+ "core/fragments/calendar_day.html",
+ {"day": day, "shifts": self.shifts.get(day, None), "today": today},
+ )
+ return self.day_cell(cssclass, content)
return self.day_cell("noday", " ")
def day_cell(self, cssclass, body):
- return f'<td class="calendar-row-height p-1 break-word {cssclass}">{body}</td>'
+ return f'<td class="calendar-row-height p-0 pe-1 p-lg-1 {cssclass}">{body}</td>'
diff --git a/ephios/extra/colors.py b/ephios/extra/colors.py
--- a/ephios/extra/colors.py
+++ b/ephios/extra/colors.py
@@ -27,4 +27,6 @@
)
)
text_color = get_text_color_for_background(luminance)
- return f".badge-{eventtype.pk}-color{{background-color:{eventtype.color};color:{text_color}}}"
+ return (
+ f".eventtype-{eventtype.pk}-color{{background-color:{eventtype.color};color:{text_color}}}"
+ )
| {"golden_diff": "diff --git a/ephios/core/calendar.py b/ephios/core/calendar.py\n--- a/ephios/core/calendar.py\n+++ b/ephios/core/calendar.py\n@@ -2,6 +2,7 @@\n from datetime import date, datetime\n from itertools import groupby\n \n+from django.template.loader import render_to_string\n from django.utils.formats import date_format\n from django.utils.translation import gettext as _\n \n@@ -24,23 +25,20 @@\n return f'<tr><th colspan=\"7\" class=\"month\">{date_format(dt, format=\"b Y\")}</th></tr>'\n \n def formatweekday(self, day):\n- return f'<th class=\"{self.cssclasses[day]}\">{_(day_abbr[day])}</th>'\n+ return f'<th class=\"text-center {self.cssclasses[day]}\">{_(day_abbr[day])}</th>'\n \n def formatday(self, day, weekday):\n if day != 0:\n cssclass = self.cssclasses[weekday]\n- if date.today() == date(self.year, self.month, day):\n- cssclass += \" calendar-today\"\n+ today = date.today() == date(self.year, self.month, day)\n if day in self.shifts:\n cssclass += \" filled\"\n- body = [\"<br />\"]\n- for shift in self.shifts[day]:\n- body.append(f'<a href=\"{shift.event.get_absolute_url()}\">')\n- body.append(shift.event.title)\n- body.append(\"</a><br />\")\n- return self.day_cell(cssclass, f\"{day} {''.join(body)}\")\n- return self.day_cell(cssclass, day)\n+ content = render_to_string(\n+ \"core/fragments/calendar_day.html\",\n+ {\"day\": day, \"shifts\": self.shifts.get(day, None), \"today\": today},\n+ )\n+ return self.day_cell(cssclass, content)\n return self.day_cell(\"noday\", \" \")\n \n def day_cell(self, cssclass, body):\n- return f'<td class=\"calendar-row-height p-1 break-word {cssclass}\">{body}</td>'\n+ return f'<td class=\"calendar-row-height p-0 pe-1 p-lg-1 {cssclass}\">{body}</td>'\ndiff --git a/ephios/extra/colors.py b/ephios/extra/colors.py\n--- a/ephios/extra/colors.py\n+++ b/ephios/extra/colors.py\n@@ -27,4 +27,6 @@\n )\n )\n text_color = get_text_color_for_background(luminance)\n- return f\".badge-{eventtype.pk}-color{{background-color:{eventtype.color};color:{text_color}}}\"\n+ return (\n+ f\".eventtype-{eventtype.pk}-color{{background-color:{eventtype.color};color:{text_color}}}\"\n+ )\n", "issue": "Improve calendar design\nAs a user, I expect the event calendar view to display the shifts in small boxes with times inside of each calendar day (similiar to Google Calendar etc.)\n", "before_files": [{"content": "from calendar import HTMLCalendar, day_abbr\nfrom datetime import date, datetime\nfrom itertools import groupby\n\nfrom django.utils.formats import date_format\nfrom django.utils.translation import gettext as _\n\n\nclass ShiftCalendar(HTMLCalendar):\n cssclass_month = \"table table-fixed\"\n\n def __init__(self, shifts, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.shifts = {\n k: list(v) for (k, v) in groupby(shifts, lambda shift: shift.start_time.date().day)\n }\n\n def formatmonth(self, theyear, themonth, withyear=True):\n self.year, self.month = theyear, themonth\n return super().formatmonth(theyear, themonth)\n\n def formatmonthname(self, theyear, themonth, withyear=True):\n dt = datetime(theyear, themonth, 1)\n return f'<tr><th colspan=\"7\" class=\"month\">{date_format(dt, format=\"b Y\")}</th></tr>'\n\n def formatweekday(self, day):\n return f'<th class=\"{self.cssclasses[day]}\">{_(day_abbr[day])}</th>'\n\n def formatday(self, day, weekday):\n if day != 0:\n cssclass = self.cssclasses[weekday]\n if date.today() == date(self.year, self.month, day):\n cssclass += \" calendar-today\"\n if day in self.shifts:\n cssclass += \" filled\"\n body = [\"<br />\"]\n for shift in self.shifts[day]:\n body.append(f'<a href=\"{shift.event.get_absolute_url()}\">')\n body.append(shift.event.title)\n body.append(\"</a><br />\")\n return self.day_cell(cssclass, f\"{day} {''.join(body)}\")\n return self.day_cell(cssclass, day)\n return self.day_cell(\"noday\", \" \")\n\n def day_cell(self, cssclass, body):\n return f'<td class=\"calendar-row-height p-1 break-word {cssclass}\">{body}</td>'\n", "path": "ephios/core/calendar.py"}, {"content": "# inspired by https://jfelix.info/blog/how-to-make-a-text-color-fit-any-background-color\nfrom math import sqrt\n\nfrom ephios.core.models import EventType\n\n\ndef calculate_luminance(rgb: tuple):\n r, g, b = map(\n lambda channel: channel / 255 / 12.92\n if channel / 255 <= 0.03928\n else ((channel / 255 + 0.055) / 1.055) ** 2.4,\n rgb,\n )\n return 0.2126 * r + 0.7152 * g + 0.0722 * b\n\n\ndef get_text_color_for_background(background_luminance: int):\n return \"#000000\" if background_luminance > sqrt(1.05 * 0.05) - 0.05 else \"#ffffff\"\n\n\ndef get_eventtype_color_style(eventtype: EventType):\n luminance = calculate_luminance(\n (\n int(eventtype.color[1:3], 16),\n int(eventtype.color[3:5], 16),\n int(eventtype.color[5:7], 16),\n )\n )\n text_color = get_text_color_for_background(luminance)\n return f\".badge-{eventtype.pk}-color{{background-color:{eventtype.color};color:{text_color}}}\"\n", "path": "ephios/extra/colors.py"}]} | 1,508 | 628 |
gh_patches_debug_15713 | rasdani/github-patches | git_diff | netket__netket-122 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Save and load objects from python
The last main design issue to be solved for v2.0 concerns saving and loading objects from python.
Pybind11 has some [pickling support](https://pybind11.readthedocs.io/en/stable/advanced/classes.html#pickling-support).
However, the design issue to be addressed is how to serialize objects stored internally as pointers.
Basically, each pickable object needs to define a GetState function, returning a python tuple of the arguments needed to construct the object.
```c++
py::tuple GetState(const Pickleable &p) {
return py::make_tuple(p.Field1(),p.Field2(),...);
}
```
However, if the `Pickeable` stores a pointer to some abstract object (say Hilbert), then one obviously cannot do:
```c++
py::tuple GetState(const Pickleable &p) {
auto hilbert= p.GetHilbert(); //NO!
return py::make_tuple(p.Field1(),p.Field2(),hilbert);
}
```
Suggestions are welcome.
</issue>
<code>
[start of Tutorials/PyNetKet/machine.py]
1 # Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import print_function
16 import netket as nk
17 import networkx as nx
18 import numpy as np
19 from mpi4py import MPI
20 import scipy.sparse as sparse
21
22 #Constructing a 1d lattice
23 g = nk.graph.Hypercube(L=4, ndim=1)
24
25 # Hilbert space of spins from given graph
26 hi = nk.hilbert.Spin(s=0.5, graph=g)
27
28 #Hamiltonian
29 ha = nk.operator.Ising(h=1.0, hilbert=hi)
30
31 #Machine
32 ma = nk.machine.RbmSpin(hilbert=hi, alpha=1)
33 ma.InitRandomPars(seed=1234, sigma=0.1)
34 print(ma.GetParameters())
35
36 #Layer
37 a = np.ones(3, dtype=complex)
38 b = np.zeros(3, dtype=complex)
39 act = nk.activation.Tanh()
40
41 act(a, b)
42 print(b)
43
[end of Tutorials/PyNetKet/machine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Tutorials/PyNetKet/machine.py b/Tutorials/PyNetKet/machine.py
--- a/Tutorials/PyNetKet/machine.py
+++ b/Tutorials/PyNetKet/machine.py
@@ -19,24 +19,20 @@
from mpi4py import MPI
import scipy.sparse as sparse
-#Constructing a 1d lattice
-g = nk.graph.Hypercube(L=4, ndim=1)
+# Constructing a 1d lattice
+g = nk.graph.Hypercube(length=4, n_dim=1)
# Hilbert space of spins from given graph
hi = nk.hilbert.Spin(s=0.5, graph=g)
-#Hamiltonian
+# Hamiltonian
ha = nk.operator.Ising(h=1.0, hilbert=hi)
-#Machine
+# Machine
ma = nk.machine.RbmSpin(hilbert=hi, alpha=1)
-ma.InitRandomPars(seed=1234, sigma=0.1)
-print(ma.GetParameters())
+ma.init_random_parameters(seed=1234, sigma=0.1)
-#Layer
-a = np.ones(3, dtype=complex)
-b = np.zeros(3, dtype=complex)
-act = nk.activation.Tanh()
-
-act(a, b)
-print(b)
+ma.save("test.wf")
+ma.parameters = np.zeros(ma.n_par)
+ma.load("test.wf")
+print(ma.parameters)
| {"golden_diff": "diff --git a/Tutorials/PyNetKet/machine.py b/Tutorials/PyNetKet/machine.py\n--- a/Tutorials/PyNetKet/machine.py\n+++ b/Tutorials/PyNetKet/machine.py\n@@ -19,24 +19,20 @@\n from mpi4py import MPI\n import scipy.sparse as sparse\n \n-#Constructing a 1d lattice\n-g = nk.graph.Hypercube(L=4, ndim=1)\n+# Constructing a 1d lattice\n+g = nk.graph.Hypercube(length=4, n_dim=1)\n \n # Hilbert space of spins from given graph\n hi = nk.hilbert.Spin(s=0.5, graph=g)\n \n-#Hamiltonian\n+# Hamiltonian\n ha = nk.operator.Ising(h=1.0, hilbert=hi)\n \n-#Machine\n+# Machine\n ma = nk.machine.RbmSpin(hilbert=hi, alpha=1)\n-ma.InitRandomPars(seed=1234, sigma=0.1)\n-print(ma.GetParameters())\n+ma.init_random_parameters(seed=1234, sigma=0.1)\n \n-#Layer\n-a = np.ones(3, dtype=complex)\n-b = np.zeros(3, dtype=complex)\n-act = nk.activation.Tanh()\n-\n-act(a, b)\n-print(b)\n+ma.save(\"test.wf\")\n+ma.parameters = np.zeros(ma.n_par)\n+ma.load(\"test.wf\")\n+print(ma.parameters)\n", "issue": "Save and load objects from python\nThe last main design issue to be solved for v2.0 concerns saving and loading objects from python.\r\n\r\nPybind11 has some [pickling support](https://pybind11.readthedocs.io/en/stable/advanced/classes.html#pickling-support).\r\n\r\nHowever, the design issue to be addressed is how to serialize objects stored internally as pointers.\r\nBasically, each pickable object needs to define a GetState function, returning a python tuple of the arguments needed to construct the object. \r\n\r\n```c++\r\npy::tuple GetState(const Pickleable &p) { \r\n return py::make_tuple(p.Field1(),p.Field2(),...);\r\n}\r\n```\r\nHowever, if the `Pickeable` stores a pointer to some abstract object (say Hilbert), then one obviously cannot do: \r\n```c++\r\npy::tuple GetState(const Pickleable &p) { \r\n auto hilbert= p.GetHilbert(); //NO! \r\n return py::make_tuple(p.Field1(),p.Field2(),hilbert);\r\n}\r\n```\r\n\r\nSuggestions are welcome. \n", "before_files": [{"content": "# Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport netket as nk\nimport networkx as nx\nimport numpy as np\nfrom mpi4py import MPI\nimport scipy.sparse as sparse\n\n#Constructing a 1d lattice\ng = nk.graph.Hypercube(L=4, ndim=1)\n\n# Hilbert space of spins from given graph\nhi = nk.hilbert.Spin(s=0.5, graph=g)\n\n#Hamiltonian\nha = nk.operator.Ising(h=1.0, hilbert=hi)\n\n#Machine\nma = nk.machine.RbmSpin(hilbert=hi, alpha=1)\nma.InitRandomPars(seed=1234, sigma=0.1)\nprint(ma.GetParameters())\n\n#Layer\na = np.ones(3, dtype=complex)\nb = np.zeros(3, dtype=complex)\nact = nk.activation.Tanh()\n\nact(a, b)\nprint(b)\n", "path": "Tutorials/PyNetKet/machine.py"}]} | 1,195 | 333 |
gh_patches_debug_16817 | rasdani/github-patches | git_diff | microsoft__ptvsd-1992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using Ctrl+C causes launcher to print traceback
## Environment data
- PTVSD version: master
- OS and version: linux
- Python version (& distribution if applicable, e.g. Anaconda): 3.7
- Using VS Code or Visual Studio: VSC
## Actual behavior
```console
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/kanadig/GIT/ptvsd/src/ptvsd/launcher/__main__.py", line 74, in <module>
main()
File "/home/kanadig/GIT/ptvsd/src/ptvsd/launcher/__main__.py", line 37, in main
adapter.channel.wait()
File "/home/kanadig/GIT/ptvsd/src/ptvsd/launcher/../../ptvsd/common/messaging.py", line 1231, in wait
parser_thread.join()
File "/usr/lib/python3.7/threading.py", line 1032, in join
self._wait_for_tstate_lock()
File "/usr/lib/python3.7/threading.py", line 1048, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
```
## Expected behavior
This is normal way to shutdown the servers. Show not print exception in launcher.
</issue>
<code>
[start of src/ptvsd/launcher/__main__.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 __all__ = ["main"]
8
9 import locale
10 import os
11 import sys
12
13 # WARNING: ptvsd and submodules must not be imported on top level in this module,
14 # and should be imported locally inside main() instead.
15
16 # Force absolute path on Python 2.
17 __file__ = os.path.abspath(__file__)
18
19
20 def main():
21 from ptvsd.common import log
22 from ptvsd import launcher
23 from ptvsd.launcher import debuggee
24
25 log.to_file(prefix="ptvsd.launcher")
26 log.describe_environment("ptvsd.launcher startup environment:")
27
28 def option(name, type, *args):
29 try:
30 return type(os.environ.pop(name, *args))
31 except Exception:
32 raise log.exception("Error parsing {0!r}:", name)
33
34 launcher_port = option("PTVSD_LAUNCHER_PORT", int)
35
36 launcher.connect(launcher_port)
37 launcher.channel.wait()
38
39 if debuggee.process is not None:
40 sys.exit(debuggee.process.returncode)
41
42
43 if __name__ == "__main__":
44 # ptvsd can also be invoked directly rather than via -m. In this case, the first
45 # entry on sys.path is the one added automatically by Python for the directory
46 # containing this file. This means that import ptvsd will not work, since we need
47 # the parent directory of ptvsd/ to be in sys.path, rather than ptvsd/launcher/.
48 #
49 # The other issue is that many other absolute imports will break, because they
50 # will be resolved relative to ptvsd/launcher/ - e.g. `import state` will then try
51 # to import ptvsd/launcher/state.py.
52 #
53 # To fix both, we need to replace the automatically added entry such that it points
54 # at parent directory of ptvsd/ instead of ptvsd/launcher, import ptvsd with that
55 # in sys.path, and then remove the first entry entry altogether, so that it doesn't
56 # affect any further imports we might do. For example, suppose the user did:
57 #
58 # python /foo/bar/ptvsd/launcher ...
59 #
60 # At the beginning of this script, sys.path will contain "/foo/bar/ptvsd/launcher"
61 # as the first entry. What we want is to replace it with "/foo/bar', then import
62 # ptvsd with that in effect, and then remove the replaced entry before any more
63 # code runs. The imported ptvsd module will remain in sys.modules, and thus all
64 # future imports of it or its submodules will resolve accordingly.
65 if "ptvsd" not in sys.modules:
66 # Do not use dirname() to walk up - this can be a relative path, e.g. ".".
67 sys.path[0] = sys.path[0] + "/../../"
68 __import__("ptvsd")
69 del sys.path[0]
70
71 # Load locale settings.
72 locale.setlocale(locale.LC_ALL, "")
73
74 main()
75
[end of src/ptvsd/launcher/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ptvsd/launcher/__main__.py b/src/ptvsd/launcher/__main__.py
--- a/src/ptvsd/launcher/__main__.py
+++ b/src/ptvsd/launcher/__main__.py
@@ -8,6 +8,7 @@
import locale
import os
+import signal
import sys
# WARNING: ptvsd and submodules must not be imported on top level in this module,
@@ -25,6 +26,11 @@
log.to_file(prefix="ptvsd.launcher")
log.describe_environment("ptvsd.launcher startup environment:")
+ # Disable exceptions on Ctrl+C - we want to allow the debuggee process to handle
+ # these, or not, as it sees fit. If the debuggee exits on Ctrl+C, the launcher
+ # will also exit, so it doesn't need to observe the signal directly.
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
def option(name, type, *args):
try:
return type(os.environ.pop(name, *args))
| {"golden_diff": "diff --git a/src/ptvsd/launcher/__main__.py b/src/ptvsd/launcher/__main__.py\n--- a/src/ptvsd/launcher/__main__.py\n+++ b/src/ptvsd/launcher/__main__.py\n@@ -8,6 +8,7 @@\n \n import locale\n import os\n+import signal\n import sys\n \n # WARNING: ptvsd and submodules must not be imported on top level in this module,\n@@ -25,6 +26,11 @@\n log.to_file(prefix=\"ptvsd.launcher\")\n log.describe_environment(\"ptvsd.launcher startup environment:\")\n \n+ # Disable exceptions on Ctrl+C - we want to allow the debuggee process to handle\n+ # these, or not, as it sees fit. If the debuggee exits on Ctrl+C, the launcher\n+ # will also exit, so it doesn't need to observe the signal directly.\n+ signal.signal(signal.SIGINT, signal.SIG_IGN)\n+\n def option(name, type, *args):\n try:\n return type(os.environ.pop(name, *args))\n", "issue": "Using Ctrl+C causes launcher to print traceback\n## Environment data\r\n\r\n- PTVSD version: master\r\n- OS and version: linux\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.7\r\n- Using VS Code or Visual Studio: VSC\r\n\r\n## Actual behavior\r\n\r\n```console\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/kanadig/GIT/ptvsd/src/ptvsd/launcher/__main__.py\", line 74, in <module>\r\n main()\r\n File \"/home/kanadig/GIT/ptvsd/src/ptvsd/launcher/__main__.py\", line 37, in main\r\n adapter.channel.wait()\r\n File \"/home/kanadig/GIT/ptvsd/src/ptvsd/launcher/../../ptvsd/common/messaging.py\", line 1231, in wait\r\n parser_thread.join()\r\n File \"/usr/lib/python3.7/threading.py\", line 1032, in join\r\n self._wait_for_tstate_lock()\r\n File \"/usr/lib/python3.7/threading.py\", line 1048, in _wait_for_tstate_lock\r\n elif lock.acquire(block, timeout):\r\nKeyboardInterrupt\r\n```\r\n\r\n## Expected behavior\r\n\r\nThis is normal way to shutdown the servers. Show not print exception in launcher.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__all__ = [\"main\"]\n\nimport locale\nimport os\nimport sys\n\n# WARNING: ptvsd and submodules must not be imported on top level in this module,\n# and should be imported locally inside main() instead.\n\n# Force absolute path on Python 2.\n__file__ = os.path.abspath(__file__)\n\n\ndef main():\n from ptvsd.common import log\n from ptvsd import launcher\n from ptvsd.launcher import debuggee\n\n log.to_file(prefix=\"ptvsd.launcher\")\n log.describe_environment(\"ptvsd.launcher startup environment:\")\n\n def option(name, type, *args):\n try:\n return type(os.environ.pop(name, *args))\n except Exception:\n raise log.exception(\"Error parsing {0!r}:\", name)\n\n launcher_port = option(\"PTVSD_LAUNCHER_PORT\", int)\n\n launcher.connect(launcher_port)\n launcher.channel.wait()\n\n if debuggee.process is not None:\n sys.exit(debuggee.process.returncode)\n\n\nif __name__ == \"__main__\":\n # ptvsd can also be invoked directly rather than via -m. In this case, the first\n # entry on sys.path is the one added automatically by Python for the directory\n # containing this file. This means that import ptvsd will not work, since we need\n # the parent directory of ptvsd/ to be in sys.path, rather than ptvsd/launcher/.\n #\n # The other issue is that many other absolute imports will break, because they\n # will be resolved relative to ptvsd/launcher/ - e.g. `import state` will then try\n # to import ptvsd/launcher/state.py.\n #\n # To fix both, we need to replace the automatically added entry such that it points\n # at parent directory of ptvsd/ instead of ptvsd/launcher, import ptvsd with that\n # in sys.path, and then remove the first entry entry altogether, so that it doesn't\n # affect any further imports we might do. For example, suppose the user did:\n #\n # python /foo/bar/ptvsd/launcher ...\n #\n # At the beginning of this script, sys.path will contain \"/foo/bar/ptvsd/launcher\"\n # as the first entry. What we want is to replace it with \"/foo/bar', then import\n # ptvsd with that in effect, and then remove the replaced entry before any more\n # code runs. The imported ptvsd module will remain in sys.modules, and thus all\n # future imports of it or its submodules will resolve accordingly.\n if \"ptvsd\" not in sys.modules:\n # Do not use dirname() to walk up - this can be a relative path, e.g. \".\".\n sys.path[0] = sys.path[0] + \"/../../\"\n __import__(\"ptvsd\")\n del sys.path[0]\n\n # Load locale settings.\n locale.setlocale(locale.LC_ALL, \"\")\n\n main()\n", "path": "src/ptvsd/launcher/__main__.py"}]} | 1,757 | 241 |
gh_patches_debug_58138 | rasdani/github-patches | git_diff | gammapy__gammapy-3911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
string representation of various Maker classes may cause a TypeError
This was tested against Gammapy 0.19 and the development version.
In some cases, printing (or using the string representation in another way) of an instance of a `gammapy.makers.Maker` subclass may cause a TypeError.
An example directly from the tutorials (introduction, low-level analysis):
```
from astropy import units
from regions import CircleSkyRegion
from gammapy.makers import FoVBackgroundMaker
from astropy.coordinates import SkyCoord
from gammapy.maps import WcsGeom, MapAxis
energy_axis = MapAxis.from_energy_bounds(1.0, 10.0, 4, unit="TeV")
geom = WcsGeom.create(
skydir=(83.633, 22.014),
binsz=0.02,
width=(2, 2),
frame="icrs",
proj="CAR",
axes=[],
)
circle = CircleSkyRegion(
center=SkyCoord("83.63 deg", "22.14 deg"), radius=0.2 * units.deg
)
exclusion_mask = ~geom.region_mask(regions=[circle])
maker_fov = FoVBackgroundMaker(method="fit", exclusion_mask=exclusion_mask)
str(maker_fov)
```
will cause a
```
TypeError: Cannot parse "not available" as a Quantity. It does not start with a number.
```
(full traceback at the bottom).
The reason is in the `__str__` implementation of the `gammapy.makers.Maker` abstract class:
```
def __str__(self):
# <snip>
for name in names:
value = getattr(self, name, "not available")
if value == "not available":
continue
else:
s += f"\t{name:{max_len}s}: {value}\n"
return s.expandtabs(tabsize=2)
```
When an attribute is not found, it is set to the string "not available". Otherwise, the attribute's value is retrieved.
The resulting value is then compared to the string "not available" to determine whether it is an existing attribute. But some classes can't compare to string. In this particular case, comparing an instance of `WcsNDMap` fails this comparison, resulting (indirectly) in the `TypeError`.
Perhaps the most Pythonic solution is to have `WcsNDMap` handle comparisons with any arbirtrary type, and return `False`. This is what Python does: `1 == "abc"` is valid and returns `False`; similar for e.g. `1 = ["abc", 5.5]`.
Perhaps easier, and in my opinion semantically better, is to use
```
value = getattr(self, name, None)
if value is None:
continue
s += f"\t{name:{max_len}s}: {value}\n"
```
since `None` signifies the optional type in Python.
Though even better, in my opinion, is to simply use
```
for name in names:
try:
value = getattr(self, name)
except AttributeError:
continue
s += f"\t{name:{max_len}s}: {value}\n"
```
After all, this is what exceptions are for. (People sometimes mention speed reasons if the lookup fails a lot of times, but I don't think that's relevant here for a `__str__` implementation.)
I would even simply use `self.name`, but that'll fail because `Maker` is an abstract class, so more dynamic retrieval of attributes is required. I assume this is why it's implemented in its current way.
-----
Full traceback:
```
Traceback (most recent call last):
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py", line 333, in __new__
value = float(v.group())
AttributeError: 'NoneType' object has no attribute 'group'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "gammapy_maker_str.py", line 22, in <module>
str(maker_fov)
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/makers/core.py", line 31, in __str__
if value == "not available":
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py", line 1612, in __eq__
return self._arithmetics(np.equal, other, copy=True)
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py", line 1552, in _arithmetics
q = u.Quantity(other, copy=False)
File "/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py", line 338, in __new__
.format(value, cls.__name__))
TypeError: Cannot parse "not available" as a Quantity. It does not start with a number.
```
</issue>
<code>
[start of gammapy/makers/core.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import abc
3 import numpy as np
4
5 __all__ = ["Maker"]
6
7
8 class Maker(abc.ABC):
9 """Abstract maker base class."""
10
11 @property
12 @abc.abstractmethod
13 def tag(self):
14 pass
15
16 @abc.abstractmethod
17 def run(self):
18 pass
19
20 def __str__(self):
21 s = f"{self.__class__.__name__}\n"
22 s += "-" * (len(s) - 1) + "\n\n"
23
24 names = self.__init__.__code__.co_varnames
25
26 max_len = np.max([len(_) for _ in names]) + 1
27
28 for name in names:
29 value = getattr(self, name, "not available")
30
31 if value == "not available":
32 continue
33 else:
34 s += f"\t{name:{max_len}s}: {value}\n"
35
36 return s.expandtabs(tabsize=2)
37
[end of gammapy/makers/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/makers/core.py b/gammapy/makers/core.py
--- a/gammapy/makers/core.py
+++ b/gammapy/makers/core.py
@@ -26,9 +26,9 @@
max_len = np.max([len(_) for _ in names]) + 1
for name in names:
- value = getattr(self, name, "not available")
+ value = getattr(self, name, None)
- if value == "not available":
+ if value is None:
continue
else:
s += f"\t{name:{max_len}s}: {value}\n"
| {"golden_diff": "diff --git a/gammapy/makers/core.py b/gammapy/makers/core.py\n--- a/gammapy/makers/core.py\n+++ b/gammapy/makers/core.py\n@@ -26,9 +26,9 @@\n max_len = np.max([len(_) for _ in names]) + 1\n \n for name in names:\n- value = getattr(self, name, \"not available\")\n+ value = getattr(self, name, None)\n \n- if value == \"not available\":\n+ if value is None:\n continue\n else:\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n", "issue": "string representation of various Maker classes may cause a TypeError\nThis was tested against Gammapy 0.19 and the development version.\r\n\r\nIn some cases, printing (or using the string representation in another way) of an instance of a `gammapy.makers.Maker` subclass may cause a TypeError.\r\n\r\nAn example directly from the tutorials (introduction, low-level analysis):\r\n```\r\nfrom astropy import units\r\nfrom regions import CircleSkyRegion\r\nfrom gammapy.makers import FoVBackgroundMaker\r\nfrom astropy.coordinates import SkyCoord\r\nfrom gammapy.maps import WcsGeom, MapAxis\r\n\r\nenergy_axis = MapAxis.from_energy_bounds(1.0, 10.0, 4, unit=\"TeV\")\r\ngeom = WcsGeom.create(\r\n skydir=(83.633, 22.014),\r\n binsz=0.02,\r\n width=(2, 2),\r\n frame=\"icrs\",\r\n proj=\"CAR\",\r\n axes=[],\r\n)\r\ncircle = CircleSkyRegion(\r\n center=SkyCoord(\"83.63 deg\", \"22.14 deg\"), radius=0.2 * units.deg\r\n)\r\nexclusion_mask = ~geom.region_mask(regions=[circle])\r\nmaker_fov = FoVBackgroundMaker(method=\"fit\", exclusion_mask=exclusion_mask)\r\nstr(maker_fov)\r\n```\r\n\r\nwill cause a \r\n```\r\nTypeError: Cannot parse \"not available\" as a Quantity. It does not start with a number.\r\n```\r\n(full traceback at the bottom).\r\n\r\nThe reason is in the `__str__` implementation of the `gammapy.makers.Maker` abstract class:\r\n```\r\n def __str__(self):\r\n # <snip>\r\n for name in names:\r\n value = getattr(self, name, \"not available\")\r\n\r\n if value == \"not available\":\r\n continue\r\n else:\r\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\r\n\r\n return s.expandtabs(tabsize=2)\r\n```\r\n\r\nWhen an attribute is not found, it is set to the string \"not available\". Otherwise, the attribute's value is retrieved.\r\nThe resulting value is then compared to the string \"not available\" to determine whether it is an existing attribute. But some classes can't compare to string. In this particular case, comparing an instance of `WcsNDMap` fails this comparison, resulting (indirectly) in the `TypeError`.\r\n\r\nPerhaps the most Pythonic solution is to have `WcsNDMap` handle comparisons with any arbirtrary type, and return `False`. This is what Python does: `1 == \"abc\"` is valid and returns `False`; similar for e.g. `1 = [\"abc\", 5.5]`.\r\n\r\nPerhaps easier, and in my opinion semantically better, is to use\r\n```\r\nvalue = getattr(self, name, None)\r\nif value is None:\r\n continue\r\ns += f\"\\t{name:{max_len}s}: {value}\\n\"\r\n```\r\nsince `None` signifies the optional type in Python.\r\n\r\nThough even better, in my opinion, is to simply use\r\n```\r\nfor name in names:\r\n try:\r\n value = getattr(self, name)\r\n except AttributeError:\r\n continue\r\n s += f\"\\t{name:{max_len}s}: {value}\\n\" \r\n```\r\nAfter all, this is what exceptions are for. (People sometimes mention speed reasons if the lookup fails a lot of times, but I don't think that's relevant here for a `__str__` implementation.)\r\n\r\nI would even simply use `self.name`, but that'll fail because `Maker` is an abstract class, so more dynamic retrieval of attributes is required. I assume this is why it's implemented in its current way.\r\n\r\n\r\n-----\r\n\r\nFull traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py\", line 333, in __new__\r\n value = float(v.group())\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"gammapy_maker_str.py\", line 22, in <module>\r\n str(maker_fov)\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/makers/core.py\", line 31, in __str__\r\n if value == \"not available\":\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py\", line 1612, in __eq__\r\n return self._arithmetics(np.equal, other, copy=True)\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/gammapy/maps/core.py\", line 1552, in _arithmetics\r\n q = u.Quantity(other, copy=False)\r\n File \"/home/user/.miniconda3/envs/gammapy-dev/lib/python3.7/site-packages/astropy/units/quantity.py\", line 338, in __new__\r\n .format(value, cls.__name__))\r\nTypeError: Cannot parse \"not available\" as a Quantity. It does not start with a number.\r\n```\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nimport numpy as np\n\n__all__ = [\"Maker\"]\n\n\nclass Maker(abc.ABC):\n \"\"\"Abstract maker base class.\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def run(self):\n pass\n\n def __str__(self):\n s = f\"{self.__class__.__name__}\\n\"\n s += \"-\" * (len(s) - 1) + \"\\n\\n\"\n\n names = self.__init__.__code__.co_varnames\n\n max_len = np.max([len(_) for _ in names]) + 1\n\n for name in names:\n value = getattr(self, name, \"not available\")\n\n if value == \"not available\":\n continue\n else:\n s += f\"\\t{name:{max_len}s}: {value}\\n\"\n\n return s.expandtabs(tabsize=2)\n", "path": "gammapy/makers/core.py"}]} | 1,972 | 145 |
gh_patches_debug_30680 | rasdani/github-patches | git_diff | Kinto__kinto-1941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Optimize use of jsonschema.validate()
It could be interesting to investigate if the fastjsonschema lib would give us better perfs :)
I marked this as easy-pick since everything happens in `schema_validation.py`
https://github.com/horejsek/python-fastjsonschema/
</issue>
<code>
[start of kinto/schema_validation.py]
1 import colander
2 from jsonschema import ValidationError, SchemaError, RefResolutionError, validate
3
4 try: # pragma: no cover
5 from jsonschema import Draft7Validator as DraftValidator
6 except ImportError:
7 from jsonschema import Draft4Validator as DraftValidator
8
9 from pyramid.settings import asbool
10
11 from kinto.core import utils
12 from kinto.core.errors import raise_invalid
13 from kinto.views import object_exists_or_404
14
15
16 class JSONSchemaMapping(colander.SchemaNode):
17 def schema_type(self, **kw):
18 return colander.Mapping(unknown="preserve")
19
20 def deserialize(self, cstruct=colander.null):
21 # Start by deserializing a simple mapping.
22 validated = super().deserialize(cstruct)
23
24 # In case it is optional in parent schema.
25 if not validated or validated in (colander.null, colander.drop):
26 return validated
27 try:
28 check_schema(validated)
29 except ValidationError as e:
30 self.raise_invalid(e.message)
31 return validated
32
33
34 def check_schema(data):
35 try:
36 DraftValidator.check_schema(data)
37 except SchemaError as e:
38 message = e.path.pop() + e.message
39 raise ValidationError(message)
40
41
42 def validate_schema(data, schema, ignore_fields=[]):
43 required_fields = [f for f in schema.get("required", []) if f not in ignore_fields]
44 # jsonschema doesn't accept 'required': [] yet.
45 # See https://github.com/Julian/jsonschema/issues/337.
46 # In the meantime, strip out 'required' if no other fields are required.
47 if required_fields:
48 schema = {**schema, "required": required_fields}
49 else:
50 schema = {f: v for f, v in schema.items() if f != "required"}
51
52 data = {f: v for f, v in data.items() if f not in ignore_fields}
53
54 try:
55 validate(data, schema)
56 except ValidationError as e:
57 if e.path:
58 field = e.path[-1]
59 elif e.validator_value:
60 field = e.validator_value[-1]
61 else:
62 field = e.schema_path[-1]
63 e.field = field
64 raise e
65 # Raise an error here if a reference in the schema doesn't resolve.
66 # jsonschema doesn't provide schema validation checking upon creation yet,
67 # it must be validated against data.
68 # See https://github.com/Julian/jsonschema/issues/399
69 # For future support https://github.com/Julian/jsonschema/issues/346.
70 except RefResolutionError as e:
71 raise e
72
73
74 def validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):
75 """Lookup in the parent objects if a schema was defined for this resource.
76
77 If the schema validation feature is enabled, if a schema is/are defined, and if the
78 data does not validate it/them, then it raises a 400 exception.
79 """
80 settings = request.registry.settings
81 schema_validation = "experimental_collection_schema_validation"
82 # If disabled from settings, do nothing.
83 if not asbool(settings.get(schema_validation)):
84 return
85
86 bucket_id = request.matchdict["bucket_id"]
87 bucket_uri = utils.instance_uri(request, "bucket", id=bucket_id)
88 buckets = request.bound_data.setdefault("buckets", {})
89 if bucket_uri not in buckets:
90 # Unknown yet, fetch from storage.
91 bucket = object_exists_or_404(
92 request, resource_name="bucket", parent_id="", object_id=bucket_id
93 )
94 buckets[bucket_uri] = bucket
95
96 # Let's see if the bucket defines a schema for this resource.
97 metadata_field = f"{resource_name}:schema"
98 bucket = buckets[bucket_uri]
99 if metadata_field not in bucket:
100 return
101
102 # Validate or fail with 400.
103 schema = bucket[metadata_field]
104 try:
105 validate_schema(data, schema, ignore_fields=ignore_fields)
106 except ValidationError as e:
107 raise_invalid(request, name=e.field, description=e.message)
108 except RefResolutionError as e:
109 raise_invalid(request, name="schema", description=str(e))
110
[end of kinto/schema_validation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py
--- a/kinto/schema_validation.py
+++ b/kinto/schema_validation.py
@@ -1,5 +1,6 @@
import colander
-from jsonschema import ValidationError, SchemaError, RefResolutionError, validate
+from jsonschema import ValidationError, SchemaError, RefResolutionError
+from jsonschema.validators import validator_for
try: # pragma: no cover
from jsonschema import Draft7Validator as DraftValidator
@@ -39,6 +40,34 @@
raise ValidationError(message)
+# Module level global that stores a version of every possible schema (as a <class 'dict'>)
+# turned into a jsonschema instance (as <class 'jsonschema.validators.Validator'>).
+_schema_cache = {}
+
+
+def validate(data, schema):
+ """Raise a ValidationError or a RefResolutionError if the data doesn't validate
+ with the given schema.
+
+ Note that this function is just a "wrapper" on `jsonschema.validate()` but with
+ some memoization based on the schema for better repeat performance.
+ """
+ # Because the schema is a dict, it can't be used as a hash key so it needs to be
+ # "transformed" to something that is hashable. The quickest solution is to convert
+ # it to a string.
+ # Note that the order of the dict will determine the string it becomes. The solution
+ # to that would a canonical serializer like `json.dumps(..., sort_keys=True)` but it's
+ # overkill since the assumption is that the schema is very unlikely to be exactly
+ # the same but different order.
+ cache_key = str(schema)
+ if cache_key not in _schema_cache:
+ # This is essentially what the `jsonschema.validate()` shortcut function does.
+ cls = validator_for(schema)
+ cls.check_schema(schema)
+ _schema_cache[cache_key] = cls(schema)
+ return _schema_cache[cache_key].validate(data)
+
+
def validate_schema(data, schema, ignore_fields=[]):
required_fields = [f for f in schema.get("required", []) if f not in ignore_fields]
# jsonschema doesn't accept 'required': [] yet.
| {"golden_diff": "diff --git a/kinto/schema_validation.py b/kinto/schema_validation.py\n--- a/kinto/schema_validation.py\n+++ b/kinto/schema_validation.py\n@@ -1,5 +1,6 @@\n import colander\n-from jsonschema import ValidationError, SchemaError, RefResolutionError, validate\n+from jsonschema import ValidationError, SchemaError, RefResolutionError\n+from jsonschema.validators import validator_for\n \n try: # pragma: no cover\n from jsonschema import Draft7Validator as DraftValidator\n@@ -39,6 +40,34 @@\n raise ValidationError(message)\n \n \n+# Module level global that stores a version of every possible schema (as a <class 'dict'>)\n+# turned into a jsonschema instance (as <class 'jsonschema.validators.Validator'>).\n+_schema_cache = {}\n+\n+\n+def validate(data, schema):\n+ \"\"\"Raise a ValidationError or a RefResolutionError if the data doesn't validate\n+ with the given schema.\n+\n+ Note that this function is just a \"wrapper\" on `jsonschema.validate()` but with\n+ some memoization based on the schema for better repeat performance.\n+ \"\"\"\n+ # Because the schema is a dict, it can't be used as a hash key so it needs to be\n+ # \"transformed\" to something that is hashable. The quickest solution is to convert\n+ # it to a string.\n+ # Note that the order of the dict will determine the string it becomes. The solution\n+ # to that would a canonical serializer like `json.dumps(..., sort_keys=True)` but it's\n+ # overkill since the assumption is that the schema is very unlikely to be exactly\n+ # the same but different order.\n+ cache_key = str(schema)\n+ if cache_key not in _schema_cache:\n+ # This is essentially what the `jsonschema.validate()` shortcut function does.\n+ cls = validator_for(schema)\n+ cls.check_schema(schema)\n+ _schema_cache[cache_key] = cls(schema)\n+ return _schema_cache[cache_key].validate(data)\n+\n+\n def validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n", "issue": "Optimize use of jsonschema.validate()\nIt could be interesting to investigate if the fastjsonschema lib would give us better perfs :)\r\n\r\nI marked this as easy-pick since everything happens in `schema_validation.py`\r\n\r\nhttps://github.com/horejsek/python-fastjsonschema/\n", "before_files": [{"content": "import colander\nfrom jsonschema import ValidationError, SchemaError, RefResolutionError, validate\n\ntry: # pragma: no cover\n from jsonschema import Draft7Validator as DraftValidator\nexcept ImportError:\n from jsonschema import Draft4Validator as DraftValidator\n\nfrom pyramid.settings import asbool\n\nfrom kinto.core import utils\nfrom kinto.core.errors import raise_invalid\nfrom kinto.views import object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown=\"preserve\")\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super().deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n try:\n check_schema(validated)\n except ValidationError as e:\n self.raise_invalid(e.message)\n return validated\n\n\ndef check_schema(data):\n try:\n DraftValidator.check_schema(data)\n except SchemaError as e:\n message = e.path.pop() + e.message\n raise ValidationError(message)\n\n\ndef validate_schema(data, schema, ignore_fields=[]):\n required_fields = [f for f in schema.get(\"required\", []) if f not in ignore_fields]\n # jsonschema doesn't accept 'required': [] yet.\n # See https://github.com/Julian/jsonschema/issues/337.\n # In the meantime, strip out 'required' if no other fields are required.\n if required_fields:\n schema = {**schema, \"required\": required_fields}\n else:\n schema = {f: v for f, v in schema.items() if f != \"required\"}\n\n data = {f: v for f, v in data.items() if f not in ignore_fields}\n\n try:\n validate(data, schema)\n except ValidationError as e:\n if e.path:\n field = e.path[-1]\n elif e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n e.field = field\n raise e\n # Raise an error here if a reference in the schema doesn't resolve.\n # jsonschema doesn't provide schema validation checking upon creation yet,\n # it must be validated against data.\n # See https://github.com/Julian/jsonschema/issues/399\n # For future support https://github.com/Julian/jsonschema/issues/346.\n except RefResolutionError as e:\n raise e\n\n\ndef validate_from_bucket_schema_or_400(data, resource_name, request, ignore_fields=[]):\n \"\"\"Lookup in the parent objects if a schema was defined for this resource.\n\n If the schema validation feature is enabled, if a schema is/are defined, and if the\n data does not validate it/them, then it raises a 400 exception.\n \"\"\"\n settings = request.registry.settings\n schema_validation = \"experimental_collection_schema_validation\"\n # If disabled from settings, do nothing.\n if not asbool(settings.get(schema_validation)):\n return\n\n bucket_id = request.matchdict[\"bucket_id\"]\n bucket_uri = utils.instance_uri(request, \"bucket\", id=bucket_id)\n buckets = request.bound_data.setdefault(\"buckets\", {})\n if bucket_uri not in buckets:\n # Unknown yet, fetch from storage.\n bucket = object_exists_or_404(\n request, resource_name=\"bucket\", parent_id=\"\", object_id=bucket_id\n )\n buckets[bucket_uri] = bucket\n\n # Let's see if the bucket defines a schema for this resource.\n metadata_field = f\"{resource_name}:schema\"\n bucket = buckets[bucket_uri]\n if metadata_field not in bucket:\n return\n\n # Validate or fail with 400.\n schema = bucket[metadata_field]\n try:\n validate_schema(data, schema, ignore_fields=ignore_fields)\n except ValidationError as e:\n raise_invalid(request, name=e.field, description=e.message)\n except RefResolutionError as e:\n raise_invalid(request, name=\"schema\", description=str(e))\n", "path": "kinto/schema_validation.py"}]} | 1,716 | 493 |
gh_patches_debug_6142 | rasdani/github-patches | git_diff | Textualize__textual-4424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs change - use `pane` instead of `tab` for an event listener on tab change
We tried adding an event listener for a tab change using the docs here, but couldn't get it to work: https://textual.textualize.io/guide/events/#applying-css-selectors-to-arbitrary-attributes. Should the docs be updated to use `pane` instead of `tab`, or are we doing something wrong? Specifically this snippet:
```python
@on(TabbedContent.TabActivated, tab="#home")
def home_tab(self) -> None:
self.log("Switched back to home tab.")
```
I got it working, I think it's related to the breaking change described in https://github.com/Textualize/textual/blob/main/CHANGELOG.md#0460---2023-12-17
> Breaking change: tab is no longer a @on decorator selector for TabbedContent.TabActivated -- use pane instead https://github.com/Textualize/textual/pull/3815
</issue>
<code>
[start of src/textual/_on.py]
1 from __future__ import annotations
2
3 from typing import Callable, TypeVar
4
5 from .css.model import SelectorSet
6 from .css.parse import parse_selectors
7 from .css.tokenizer import TokenError
8 from .message import Message
9
10 DecoratedType = TypeVar("DecoratedType")
11
12
13 class OnDecoratorError(Exception):
14 """Errors related to the `on` decorator.
15
16 Typically raised at import time as an early warning system.
17 """
18
19
20 class OnNoWidget(Exception):
21 """A selector was applied to an attribute that isn't a widget."""
22
23
24 def on(
25 message_type: type[Message], selector: str | None = None, **kwargs: str
26 ) -> Callable[[DecoratedType], DecoratedType]:
27 """Decorator to declare that the method is a message handler.
28
29 The decorator accepts an optional CSS selector that will be matched against a widget exposed by
30 a `control` property on the message.
31
32 Example:
33 ```python
34 # Handle the press of buttons with ID "#quit".
35 @on(Button.Pressed, "#quit")
36 def quit_button(self) -> None:
37 self.app.quit()
38 ```
39
40 Keyword arguments can be used to match additional selectors for attributes
41 listed in [`ALLOW_SELECTOR_MATCH`][textual.message.Message.ALLOW_SELECTOR_MATCH].
42
43 Example:
44 ```python
45 # Handle the activation of the tab "#home" within the `TabbedContent` "#tabs".
46 @on(TabbedContent.TabActivated, "#tabs", tab="#home")
47 def switch_to_home(self) -> None:
48 self.log("Switching back to the home tab.")
49 ...
50 ```
51
52 Args:
53 message_type: The message type (i.e. the class).
54 selector: An optional [selector](/guide/CSS#selectors). If supplied, the handler will only be called if `selector`
55 matches the widget from the `control` attribute of the message.
56 **kwargs: Additional selectors for other attributes of the message.
57 """
58
59 selectors: dict[str, str] = {}
60 if selector is not None:
61 selectors["control"] = selector
62 if kwargs:
63 selectors.update(kwargs)
64
65 parsed_selectors: dict[str, tuple[SelectorSet, ...]] = {}
66 for attribute, css_selector in selectors.items():
67 if attribute == "control":
68 if message_type.control == Message.control:
69 raise OnDecoratorError(
70 "The message class must have a 'control' to match with the on decorator"
71 )
72 elif attribute not in message_type.ALLOW_SELECTOR_MATCH:
73 raise OnDecoratorError(
74 f"The attribute {attribute!r} can't be matched; have you added it to "
75 + f"{message_type.__name__}.ALLOW_SELECTOR_MATCH?"
76 )
77 try:
78 parsed_selectors[attribute] = parse_selectors(css_selector)
79 except TokenError:
80 raise OnDecoratorError(
81 f"Unable to parse selector {css_selector!r} for {attribute}; check for syntax errors"
82 ) from None
83
84 def decorator(method: DecoratedType) -> DecoratedType:
85 """Store message and selector in function attribute, return callable unaltered."""
86
87 if not hasattr(method, "_textual_on"):
88 setattr(method, "_textual_on", [])
89 getattr(method, "_textual_on").append((message_type, parsed_selectors))
90
91 return method
92
93 return decorator
94
[end of src/textual/_on.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/_on.py b/src/textual/_on.py
--- a/src/textual/_on.py
+++ b/src/textual/_on.py
@@ -43,7 +43,7 @@
Example:
```python
# Handle the activation of the tab "#home" within the `TabbedContent` "#tabs".
- @on(TabbedContent.TabActivated, "#tabs", tab="#home")
+ @on(TabbedContent.TabActivated, "#tabs", pane="#home")
def switch_to_home(self) -> None:
self.log("Switching back to the home tab.")
...
| {"golden_diff": "diff --git a/src/textual/_on.py b/src/textual/_on.py\n--- a/src/textual/_on.py\n+++ b/src/textual/_on.py\n@@ -43,7 +43,7 @@\n Example:\n ```python\n # Handle the activation of the tab \"#home\" within the `TabbedContent` \"#tabs\".\n- @on(TabbedContent.TabActivated, \"#tabs\", tab=\"#home\")\n+ @on(TabbedContent.TabActivated, \"#tabs\", pane=\"#home\")\n def switch_to_home(self) -> None:\n self.log(\"Switching back to the home tab.\")\n ...\n", "issue": "Docs change - use `pane` instead of `tab` for an event listener on tab change\nWe tried adding an event listener for a tab change using the docs here, but couldn't get it to work: https://textual.textualize.io/guide/events/#applying-css-selectors-to-arbitrary-attributes. Should the docs be updated to use `pane` instead of `tab`, or are we doing something wrong? Specifically this snippet:\r\n\r\n```python\r\n@on(TabbedContent.TabActivated, tab=\"#home\")\r\ndef home_tab(self) -> None:\r\n self.log(\"Switched back to home tab.\")\r\n```\r\n\r\nI got it working, I think it's related to the breaking change described in https://github.com/Textualize/textual/blob/main/CHANGELOG.md#0460---2023-12-17\r\n\r\n> Breaking change: tab is no longer a @on decorator selector for TabbedContent.TabActivated -- use pane instead https://github.com/Textualize/textual/pull/3815\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Callable, TypeVar\n\nfrom .css.model import SelectorSet\nfrom .css.parse import parse_selectors\nfrom .css.tokenizer import TokenError\nfrom .message import Message\n\nDecoratedType = TypeVar(\"DecoratedType\")\n\n\nclass OnDecoratorError(Exception):\n \"\"\"Errors related to the `on` decorator.\n\n Typically raised at import time as an early warning system.\n \"\"\"\n\n\nclass OnNoWidget(Exception):\n \"\"\"A selector was applied to an attribute that isn't a widget.\"\"\"\n\n\ndef on(\n message_type: type[Message], selector: str | None = None, **kwargs: str\n) -> Callable[[DecoratedType], DecoratedType]:\n \"\"\"Decorator to declare that the method is a message handler.\n\n The decorator accepts an optional CSS selector that will be matched against a widget exposed by\n a `control` property on the message.\n\n Example:\n ```python\n # Handle the press of buttons with ID \"#quit\".\n @on(Button.Pressed, \"#quit\")\n def quit_button(self) -> None:\n self.app.quit()\n ```\n\n Keyword arguments can be used to match additional selectors for attributes\n listed in [`ALLOW_SELECTOR_MATCH`][textual.message.Message.ALLOW_SELECTOR_MATCH].\n\n Example:\n ```python\n # Handle the activation of the tab \"#home\" within the `TabbedContent` \"#tabs\".\n @on(TabbedContent.TabActivated, \"#tabs\", tab=\"#home\")\n def switch_to_home(self) -> None:\n self.log(\"Switching back to the home tab.\")\n ...\n ```\n\n Args:\n message_type: The message type (i.e. the class).\n selector: An optional [selector](/guide/CSS#selectors). If supplied, the handler will only be called if `selector`\n matches the widget from the `control` attribute of the message.\n **kwargs: Additional selectors for other attributes of the message.\n \"\"\"\n\n selectors: dict[str, str] = {}\n if selector is not None:\n selectors[\"control\"] = selector\n if kwargs:\n selectors.update(kwargs)\n\n parsed_selectors: dict[str, tuple[SelectorSet, ...]] = {}\n for attribute, css_selector in selectors.items():\n if attribute == \"control\":\n if message_type.control == Message.control:\n raise OnDecoratorError(\n \"The message class must have a 'control' to match with the on decorator\"\n )\n elif attribute not in message_type.ALLOW_SELECTOR_MATCH:\n raise OnDecoratorError(\n f\"The attribute {attribute!r} can't be matched; have you added it to \"\n + f\"{message_type.__name__}.ALLOW_SELECTOR_MATCH?\"\n )\n try:\n parsed_selectors[attribute] = parse_selectors(css_selector)\n except TokenError:\n raise OnDecoratorError(\n f\"Unable to parse selector {css_selector!r} for {attribute}; check for syntax errors\"\n ) from None\n\n def decorator(method: DecoratedType) -> DecoratedType:\n \"\"\"Store message and selector in function attribute, return callable unaltered.\"\"\"\n\n if not hasattr(method, \"_textual_on\"):\n setattr(method, \"_textual_on\", [])\n getattr(method, \"_textual_on\").append((message_type, parsed_selectors))\n\n return method\n\n return decorator\n", "path": "src/textual/_on.py"}]} | 1,655 | 137 |
gh_patches_debug_9111 | rasdani/github-patches | git_diff | e2nIEE__pandapower-291 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fixed rundcpp with single bus network
csr_matrix dimesions has to be specified explicitly.
Issue #288
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6
7
8 from setuptools import setup, find_packages
9
10 with open('README.rst', 'rb') as f:
11 install = f.read().decode('utf-8')
12 with open('CHANGELOG.rst', 'rb') as f:
13 changelog = f.read().decode('utf-8')
14
15 long_description = '\n\n'.join((install, changelog))
16
17 setup(
18 name='pandapower',
19 version='1.6.1',
20 author='Leon Thurner, Alexander Scheidler',
21 author_email='[email protected], [email protected]',
22 description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
23 long_description=long_description,
24 url='www.pandapower.org',
25 license='BSD',
26 install_requires=["pypower>=5.0.1",
27 "pandas>=0.17.0",
28 "networkx",
29 "numpy",
30 "scipy"],
31 extras_require = {":python_version<'3.0'": ["future"]},
32 packages=find_packages(),
33 include_package_data=True,
34 classifiers=[
35 'Development Status :: 5 - Production/Stable',
36 'Environment :: Console',
37 'Intended Audience :: Developers',
38 'Intended Audience :: Education',
39 'Intended Audience :: Science/Research',
40 'License :: OSI Approved :: BSD License',
41 'Natural Language :: English',
42 'Operating System :: OS Independent',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Programming Language :: Python :: 3.5',
49 'Programming Language :: Python :: 3.6',
50 'Topic :: Scientific/Engineering',
51 ],
52 )
53
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
author_email='[email protected], [email protected]',
description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
long_description=long_description,
- url='www.pandapower.org',
+ url='http://www.pandapower.org',
license='BSD',
install_requires=["pypower>=5.0.1",
"pandas>=0.17.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n author_email='[email protected], [email protected]',\n description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',\n long_description=long_description,\n- url='www.pandapower.org',\n+ url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pypower>=5.0.1\",\n \"pandas>=0.17.0\",\n", "issue": "fixed rundcpp with single bus network\ncsr_matrix dimesions has to be specified explicitly.\r\n\r\nIssue #288 \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='1.6.1',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',\n long_description=long_description,\n url='www.pandapower.org',\n license='BSD',\n install_requires=[\"pypower>=5.0.1\",\n \"pandas>=0.17.0\",\n \"networkx\",\n \"numpy\",\n \"scipy\"],\n extras_require = {\":python_version<'3.0'\": [\"future\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n ],\n)\n", "path": "setup.py"}]} | 1,115 | 142 |
gh_patches_debug_1964 | rasdani/github-patches | git_diff | kserve__kserve-1137 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installed KFServing SDK 0.4 but getting import error while running the custom built image
/kind bug
**What steps did you take and what happened:**
Run a custom built image with KFServing SDK 0.4.
```
Traceback (most recent call last):
File "/python3/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/python3/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/job/blambda-function/image_transformer_v2/__main__.py", line 15, in <module>
import kfserving
File "/python3/lib/python3.7/site-packages/kfserving/__init__.py", line 18, in <module>
from .storage import Storage
File "/python3/lib/python3.7/site-packages/kfserving/storage.py", line 23, in <module>
from google.cloud import storage
File "/python3/lib/python3.7/site-packages/google/cloud/storage/__init__.py", line 39, in <module>
from google.cloud.storage.batch import Batch
File "/python3/lib/python3.7/site-packages/google/cloud/storage/batch.py", line 31, in <module>
from google.cloud.storage._http import Connection
File "/python3/lib/python3.7/site-packages/google/cloud/storage/_http.py", line 17, in <module>
from google.cloud import _http
File "/python3/lib/python3.7/site-packages/google/cloud/_http.py", line 22, in <module>
from six.moves import collections_abc
ImportError: cannot import name 'collections_abc' from 'six.moves' (unknown location)
```
**What did you expect to happen:**
**Anything else you would like to add:**
We have fixed this in master branch but looks like we need to patch the setup.py in 0.4 branch and release a new minor version
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version:
- Kubeflow version:
- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
</issue>
<code>
[start of python/alibiexplainer/setup.py]
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 setup(
24 name='alibiexplainer',
25 version='0.4.0',
26 author_email='[email protected]',
27 license='../../LICENSE.txt',
28 url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',
29 description='Model Explaination Server. \
30 Not intended for use outside KFServing Frameworks Images',
31 long_description=open('README.md').read(),
32 python_requires='>=3.6',
33 packages=find_packages("alibiexplainer"),
34 install_requires=[
35 "kfserving>=0.4.0",
36 "alibi==0.4.0",
37 "scikit-learn>=0.20.3",
38 "argparse>=1.4.0",
39 "requests>=2.22.0",
40 "joblib>=0.13.2",
41 "pandas>=0.24.2",
42 "numpy>=1.16.3",
43 "dill>=0.3.0",
44 "spacy>=2.1.4"
45 ],
46 tests_require=tests_require,
47 extras_require={'test': tests_require}
48 )
49
[end of python/alibiexplainer/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py
--- a/python/alibiexplainer/setup.py
+++ b/python/alibiexplainer/setup.py
@@ -32,6 +32,7 @@
python_requires='>=3.6',
packages=find_packages("alibiexplainer"),
install_requires=[
+ "shap==0.35",
"kfserving>=0.4.0",
"alibi==0.4.0",
"scikit-learn>=0.20.3",
| {"golden_diff": "diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py\n--- a/python/alibiexplainer/setup.py\n+++ b/python/alibiexplainer/setup.py\n@@ -32,6 +32,7 @@\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n+ \"shap==0.35\",\n \"kfserving>=0.4.0\",\n \"alibi==0.4.0\",\n \"scikit-learn>=0.20.3\",\n", "issue": "Installed KFServing SDK 0.4 but getting import error while running the custom built image\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nRun a custom built image with KFServing SDK 0.4.\r\n```\r\nTraceback (most recent call last):\r\n File \"/python3/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/python3/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/job/blambda-function/image_transformer_v2/__main__.py\", line 15, in <module>\r\n import kfserving\r\n File \"/python3/lib/python3.7/site-packages/kfserving/__init__.py\", line 18, in <module>\r\n from .storage import Storage\r\n File \"/python3/lib/python3.7/site-packages/kfserving/storage.py\", line 23, in <module>\r\n from google.cloud import storage\r\n File \"/python3/lib/python3.7/site-packages/google/cloud/storage/__init__.py\", line 39, in <module>\r\n from google.cloud.storage.batch import Batch\r\n File \"/python3/lib/python3.7/site-packages/google/cloud/storage/batch.py\", line 31, in <module>\r\n from google.cloud.storage._http import Connection\r\n File \"/python3/lib/python3.7/site-packages/google/cloud/storage/_http.py\", line 17, in <module>\r\n from google.cloud import _http\r\n File \"/python3/lib/python3.7/site-packages/google/cloud/_http.py\", line 22, in <module>\r\n from six.moves import collections_abc\r\nImportError: cannot import name 'collections_abc' from 'six.moves' (unknown location)\r\n```\r\n\r\n\r\n**What did you expect to happen:**\r\n\r\n\r\n**Anything else you would like to add:**\r\nWe have fixed this in master branch but looks like we need to patch the setup.py in 0.4 branch and release a new minor version\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version:\r\n- Kubeflow version:\r\n- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.4.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.4.0\",\n \"alibi==0.4.0\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"dill>=0.3.0\",\n \"spacy>=2.1.4\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py"}]} | 1,579 | 124 |
gh_patches_debug_53356 | rasdani/github-patches | git_diff | facebookresearch__hydra-1887 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Ray-Plugin] Add support for Python 3.9
Python 3.9 support depends on https://github.com/ray-project/ray/issues/12788
Related to #1062
</issue>
<code>
[start of plugins/hydra_ray_launcher/setup.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from pathlib import Path
4
5 from read_version import read_version
6 from setuptools import find_namespace_packages, setup
7
8 setup(
9 name="hydra-ray-launcher",
10 version=read_version("hydra_plugins/hydra_ray_launcher", "__init__.py"),
11 author="Jieru Hu",
12 author_email="[email protected]",
13 description="Hydra Ray launcher plugin",
14 long_description=(Path(__file__).parent / "README.md").read_text(),
15 long_description_content_type="text/markdown",
16 url="https://github.com/facebookresearch/hydra/",
17 packages=find_namespace_packages(include=["hydra_plugins.*"]),
18 classifiers=[
19 "License :: OSI Approved :: MIT License",
20 "Programming Language :: Python :: 3.7",
21 "Programming Language :: Python :: 3.8",
22 # "Programming Language :: Python :: 3.9",
23 "Operating System :: MacOS",
24 "Operating System :: POSIX :: Linux",
25 ],
26 install_requires=[
27 "boto3==1.17.17",
28 "hydra-core>=1.1.0.dev7",
29 "ray[default]==1.6.0",
30 # https://github.com/aio-libs/aiohttp/issues/6203
31 "aiohttp!=3.8.0",
32 "cloudpickle==1.6.0",
33 "pickle5==0.0.11",
34 ],
35 include_package_data=True,
36 )
37
[end of plugins/hydra_ray_launcher/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_ray_launcher/setup.py b/plugins/hydra_ray_launcher/setup.py
--- a/plugins/hydra_ray_launcher/setup.py
+++ b/plugins/hydra_ray_launcher/setup.py
@@ -19,7 +19,7 @@
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
- # "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.9",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
],
| {"golden_diff": "diff --git a/plugins/hydra_ray_launcher/setup.py b/plugins/hydra_ray_launcher/setup.py\n--- a/plugins/hydra_ray_launcher/setup.py\n+++ b/plugins/hydra_ray_launcher/setup.py\n@@ -19,7 +19,7 @@\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n- # \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n ],\n", "issue": "[Ray-Plugin] Add support for Python 3.9\nPython 3.9 support depends on https://github.com/ray-project/ray/issues/12788\r\n\r\nRelated to #1062 \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom pathlib import Path\n\nfrom read_version import read_version\nfrom setuptools import find_namespace_packages, setup\n\nsetup(\n name=\"hydra-ray-launcher\",\n version=read_version(\"hydra_plugins/hydra_ray_launcher\", \"__init__.py\"),\n author=\"Jieru Hu\",\n author_email=\"[email protected]\",\n description=\"Hydra Ray launcher plugin\",\n long_description=(Path(__file__).parent / \"README.md\").read_text(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n ],\n install_requires=[\n \"boto3==1.17.17\",\n \"hydra-core>=1.1.0.dev7\",\n \"ray[default]==1.6.0\",\n # https://github.com/aio-libs/aiohttp/issues/6203\n \"aiohttp!=3.8.0\",\n \"cloudpickle==1.6.0\",\n \"pickle5==0.0.11\",\n ],\n include_package_data=True,\n)\n", "path": "plugins/hydra_ray_launcher/setup.py"}]} | 985 | 136 |
gh_patches_debug_24241 | rasdani/github-patches | git_diff | Mailu__Mailu-2144 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot send email after upgrade to 1.9 `non DNSSEC destination`
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
Thanks for all the work! 1.9 seems like an amazing release overall ❤️
I upgraded to 1.9, generated the new docker-compose file with the tool.
Copied the relevant env variables over and started everything.
I can still receive mails, and send internal one (from and to my own domain)
I cannot send mails to external providers.
after restoring from backup everything works in 1.8
```
Final-Recipient: rfc822; ******
Original-Recipient: rfc822;******
Action: delayed
Status: 4.7.5
Diagnostic-Code: X-Postfix; non DNSSEC destination
Will-Retry-Until: Fri, 7 Jan 2022 18:38:34 +0000 (UTC)
Return-Path: <*****>
From: Cloud <*******>
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nicco.io; s=dkim;
t=1641148715;
h=from:from:reply-to:subject:subject:date:date:message-id:message-id:
to:to:cc:mime-version:mime-version:content-type:content-type:
in-reply-to:in-reply-to:references:references;
bh=Q0j3Ph9l8nLeBMIzdq6aOtNcsZOyiD8WuiQSGgc2oeY=;
b=nF+9OZeRSSEDZ995inbj/6nDYgbTDMhPGc63Gab3FN1A984PxY7rDoHIhkO5nvh2wzNZG6
5jIAmfCyYfHriJawNrtcKeUA4wBO5YgYPWag6CbGmWQ8sGSIty2fjIO0W4dWfWy+OjsksX
sJ2TK8uft8Ax9F9QmQKMuZHBP3Myh/I=
Content-Type: multipart/mixed;
boundary="Apple-Mail=_DD0DBE9A-59E8-455E-B4DD-1317DF1D24F9"
Mime-Version: 1.0
Subject: =*****
Date: Sun, 2 Jan 2022 19:38:33 +0100
References: <****>
<****>
<****>
<****>
To: ***
In-Reply-To: ****
Message-Id: ****
X-Spam-Level: *
X-Spamd-Bar: +
Authentication-Results: *****;
auth=pass smtp.mailfrom=*****
```
</issue>
<code>
[start of core/admin/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import logging as log
5 import sys
6
7 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "INFO"))
8
9 os.system("flask mailu advertise")
10 os.system("flask db upgrade")
11
12 account = os.environ.get("INITIAL_ADMIN_ACCOUNT")
13 domain = os.environ.get("INITIAL_ADMIN_DOMAIN")
14 password = os.environ.get("INITIAL_ADMIN_PW")
15
16 if account is not None and domain is not None and password is not None:
17 mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing")
18 log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode)
19 os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
20
21 start_command="".join([
22 "gunicorn --threads ", str(os.cpu_count()),
23 " -b :80 ",
24 "--access-logfile - " if (log.root.level<=log.INFO) else "",
25 "--error-logfile - ",
26 "--preload ",
27 "'mailu:create_app()'"])
28
29 os.system(start_command)
30
[end of core/admin/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -18,6 +18,34 @@
log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode)
os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
+def test_DNS():
+ import dns.resolver
+ import dns.exception
+ import dns.flags
+ import dns.rdtypes
+ import dns.rdatatype
+ import dns.rdataclass
+ import time
+ # DNS stub configured to do DNSSEC enabled queries
+ resolver = dns.resolver.Resolver()
+ resolver.use_edns(0, 0, 1232)
+ resolver.flags = dns.flags.AD | dns.flags.RD
+ nameservers = resolver.nameservers
+ for ns in nameservers:
+ resolver.nameservers=[ns]
+ while True:
+ try:
+ result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
+ except Exception as e:
+ log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e);
+ else:
+ if result.response.flags & dns.flags.AD:
+ break
+ log.critical("Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.", ns)
+ time.sleep(5)
+
+test_DNS()
+
start_command="".join([
"gunicorn --threads ", str(os.cpu_count()),
" -b :80 ",
| {"golden_diff": "diff --git a/core/admin/start.py b/core/admin/start.py\n--- a/core/admin/start.py\n+++ b/core/admin/start.py\n@@ -18,6 +18,34 @@\n log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n \n+def test_DNS():\n+ import dns.resolver\n+ import dns.exception\n+ import dns.flags\n+ import dns.rdtypes\n+ import dns.rdatatype\n+ import dns.rdataclass\n+ import time\n+ # DNS stub configured to do DNSSEC enabled queries\n+ resolver = dns.resolver.Resolver()\n+ resolver.use_edns(0, 0, 1232)\n+ resolver.flags = dns.flags.AD | dns.flags.RD\n+ nameservers = resolver.nameservers\n+ for ns in nameservers:\n+ resolver.nameservers=[ns]\n+ while True:\n+ try:\n+ result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n+ except Exception as e:\n+ log.critical(\"Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.\", ns, e);\n+ else:\n+ if result.response.flags & dns.flags.AD:\n+ break\n+ log.critical(\"Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.\", ns)\n+ time.sleep(5)\n+\n+test_DNS()\n+\n start_command=\"\".join([\n \"gunicorn --threads \", str(os.cpu_count()),\n \" -b :80 \",\n", "issue": "Cannot send email after upgrade to 1.9 `non DNSSEC destination`\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\n1.9\r\n\r\n## Description\r\nThanks for all the work! 1.9 seems like an amazing release overall \u2764\ufe0f \r\n\r\nI upgraded to 1.9, generated the new docker-compose file with the tool.\r\nCopied the relevant env variables over and started everything.\r\nI can still receive mails, and send internal one (from and to my own domain)\r\nI cannot send mails to external providers.\r\n\r\nafter restoring from backup everything works in 1.8\r\n\r\n```\r\nFinal-Recipient: rfc822; ******\r\nOriginal-Recipient: rfc822;******\r\nAction: delayed\r\nStatus: 4.7.5\r\nDiagnostic-Code: X-Postfix; non DNSSEC destination\r\nWill-Retry-Until: Fri, 7 Jan 2022 18:38:34 +0000 (UTC)\r\nReturn-Path: <*****>\r\nFrom: Cloud <*******>\r\nDKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nicco.io; s=dkim;\r\n\tt=1641148715;\r\n\th=from:from:reply-to:subject:subject:date:date:message-id:message-id:\r\n\t to:to:cc:mime-version:mime-version:content-type:content-type:\r\n\t in-reply-to:in-reply-to:references:references;\r\n\tbh=Q0j3Ph9l8nLeBMIzdq6aOtNcsZOyiD8WuiQSGgc2oeY=;\r\n\tb=nF+9OZeRSSEDZ995inbj/6nDYgbTDMhPGc63Gab3FN1A984PxY7rDoHIhkO5nvh2wzNZG6\r\n\t5jIAmfCyYfHriJawNrtcKeUA4wBO5YgYPWag6CbGmWQ8sGSIty2fjIO0W4dWfWy+OjsksX\r\n\tsJ2TK8uft8Ax9F9QmQKMuZHBP3Myh/I=\r\nContent-Type: multipart/mixed;\r\n\tboundary=\"Apple-Mail=_DD0DBE9A-59E8-455E-B4DD-1317DF1D24F9\"\r\nMime-Version: 1.0\r\nSubject: =*****\r\nDate: Sun, 2 Jan 2022 19:38:33 +0100\r\nReferences: <****>\r\n<****>\r\n<****>\r\n<****>\r\nTo: ***\r\nIn-Reply-To: ****\r\nMessage-Id: ****\r\nX-Spam-Level: *\r\nX-Spamd-Bar: +\r\nAuthentication-Results: *****;\r\n\tauth=pass smtp.mailfrom=*****\r\n```\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\nstart_command=\"\".join([\n \"gunicorn --threads \", str(os.cpu_count()),\n \" -b :80 \",\n \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n \"--error-logfile - \",\n \"--preload \",\n \"'mailu:create_app()'\"])\n\nos.system(start_command)\n", "path": "core/admin/start.py"}]} | 1,615 | 408 |
gh_patches_debug_18079 | rasdani/github-patches | git_diff | modin-project__modin-3016 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document modin.experimental.sklearn
This should include docstrings and purposes of everything inside modin.experimental.sklearn.
</issue>
<code>
[start of modin/experimental/sklearn/model_selection/__init__.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 from .train_test_split import train_test_split
15
16 __all__ = ["train_test_split"]
17
[end of modin/experimental/sklearn/model_selection/__init__.py]
[start of modin/experimental/sklearn/__init__.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
[end of modin/experimental/sklearn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/experimental/sklearn/__init__.py b/modin/experimental/sklearn/__init__.py
--- a/modin/experimental/sklearn/__init__.py
+++ b/modin/experimental/sklearn/__init__.py
@@ -10,3 +10,5 @@
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+
+"""Module holds experimental scikit-learn specific functionality for Modin."""
diff --git a/modin/experimental/sklearn/model_selection/__init__.py b/modin/experimental/sklearn/model_selection/__init__.py
--- a/modin/experimental/sklearn/model_selection/__init__.py
+++ b/modin/experimental/sklearn/model_selection/__init__.py
@@ -11,6 +11,8 @@
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+"""Module holds model selection specific functionality."""
+
from .train_test_split import train_test_split
__all__ = ["train_test_split"]
| {"golden_diff": "diff --git a/modin/experimental/sklearn/__init__.py b/modin/experimental/sklearn/__init__.py\n--- a/modin/experimental/sklearn/__init__.py\n+++ b/modin/experimental/sklearn/__init__.py\n@@ -10,3 +10,5 @@\n # the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n # ANY KIND, either express or implied. See the License for the specific language\n # governing permissions and limitations under the License.\n+\n+\"\"\"Module holds experimental scikit-learn specific functionality for Modin.\"\"\"\ndiff --git a/modin/experimental/sklearn/model_selection/__init__.py b/modin/experimental/sklearn/model_selection/__init__.py\n--- a/modin/experimental/sklearn/model_selection/__init__.py\n+++ b/modin/experimental/sklearn/model_selection/__init__.py\n@@ -11,6 +11,8 @@\n # ANY KIND, either express or implied. See the License for the specific language\n # governing permissions and limitations under the License.\n \n+\"\"\"Module holds model selection specific functionality.\"\"\"\n+\n from .train_test_split import train_test_split\n \n __all__ = [\"train_test_split\"]\n", "issue": "Document modin.experimental.sklearn\nThis should include docstrings and purposes of everything inside modin.experimental.sklearn.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom .train_test_split import train_test_split\n\n__all__ = [\"train_test_split\"]\n", "path": "modin/experimental/sklearn/model_selection/__init__.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n", "path": "modin/experimental/sklearn/__init__.py"}]} | 995 | 262 |
gh_patches_debug_5648 | rasdani/github-patches | git_diff | pydantic__pydantic-8341 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade ruff target version to Python 3.8
### Initial Checks
- [X] I confirm that I'm using Pydantic V2
### Description
I deemed this as an action left out from [Dropping Python 3.7 Support](https://github.com/pydantic/pydantic/issues/7187)
1. Update ruff's `target-version` to `py38`
2. Run `make format` and adopt minimal changes to conform
### Example Code
```Python
(pydantic-3.12) PS D:\dev\Elkiwa\pydantic> make format
PDM, version 2.10.4
pdm run ruff --fix pydantic tests docs/plugins
pdm run ruff format pydantic tests docs/plugins
164 files left unchanged
```
### Python, Pydantic & OS Version
```Text
pydantic version: 2.6.0a1
pydantic-core version: 2.14.5
pydantic-core build: profile=release pgo=true
install path: D:\dev\Elkiwa\pydantic\pydantic
python version: 3.12.1 (tags/v3.12.1:2305ca5, Dec 7 2023, 22:03:25) [MSC v.1937 64 bit (AMD64)]
platform: Windows-11-10.0.22631-SP0
related packages: email-validator-2.1.0.post1 mypy-1.1.1 pydantic-extra-types-2.1.0 pydantic-settings-2.1.0 typing_extensions-4.8.0
commit: a3c3037f
```
</issue>
<code>
[start of pydantic/plugin/_loader.py]
1 from __future__ import annotations
2
3 import sys
4 import warnings
5 from typing import TYPE_CHECKING, Iterable
6
7 from typing_extensions import Final
8
9 if sys.version_info >= (3, 8):
10 import importlib.metadata as importlib_metadata
11 else:
12 import importlib_metadata
13
14
15 if TYPE_CHECKING:
16 from . import PydanticPluginProtocol
17
18
19 PYDANTIC_ENTRY_POINT_GROUP: Final[str] = 'pydantic'
20
21 # cache of plugins
22 _plugins: dict[str, PydanticPluginProtocol] | None = None
23 # return no plugins while loading plugins to avoid recursion and errors while import plugins
24 # this means that if plugins use pydantic
25 _loading_plugins: bool = False
26
27
28 def get_plugins() -> Iterable[PydanticPluginProtocol]:
29 """Load plugins for Pydantic.
30
31 Inspired by: https://github.com/pytest-dev/pluggy/blob/1.3.0/src/pluggy/_manager.py#L376-L402
32 """
33 global _plugins, _loading_plugins
34 if _loading_plugins:
35 # this happens when plugins themselves use pydantic, we return no plugins
36 return ()
37 elif _plugins is None:
38 _plugins = {}
39 # set _loading_plugins so any plugins that use pydantic don't themselves use plugins
40 _loading_plugins = True
41 try:
42 for dist in importlib_metadata.distributions():
43 for entry_point in dist.entry_points:
44 if entry_point.group != PYDANTIC_ENTRY_POINT_GROUP:
45 continue
46 if entry_point.value in _plugins:
47 continue
48 try:
49 _plugins[entry_point.value] = entry_point.load()
50 except (ImportError, AttributeError) as e:
51 warnings.warn(
52 f'{e.__class__.__name__} while loading the `{entry_point.name}` Pydantic plugin, '
53 f'this plugin will not be installed.\n\n{e!r}'
54 )
55 finally:
56 _loading_plugins = False
57
58 return _plugins.values()
59
[end of pydantic/plugin/_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydantic/plugin/_loader.py b/pydantic/plugin/_loader.py
--- a/pydantic/plugin/_loader.py
+++ b/pydantic/plugin/_loader.py
@@ -1,16 +1,8 @@
from __future__ import annotations
-import sys
+import importlib.metadata as importlib_metadata
import warnings
-from typing import TYPE_CHECKING, Iterable
-
-from typing_extensions import Final
-
-if sys.version_info >= (3, 8):
- import importlib.metadata as importlib_metadata
-else:
- import importlib_metadata
-
+from typing import TYPE_CHECKING, Final, Iterable
if TYPE_CHECKING:
from . import PydanticPluginProtocol
| {"golden_diff": "diff --git a/pydantic/plugin/_loader.py b/pydantic/plugin/_loader.py\n--- a/pydantic/plugin/_loader.py\n+++ b/pydantic/plugin/_loader.py\n@@ -1,16 +1,8 @@\n from __future__ import annotations\n \n-import sys\n+import importlib.metadata as importlib_metadata\n import warnings\n-from typing import TYPE_CHECKING, Iterable\n-\n-from typing_extensions import Final\n-\n-if sys.version_info >= (3, 8):\n- import importlib.metadata as importlib_metadata\n-else:\n- import importlib_metadata\n-\n+from typing import TYPE_CHECKING, Final, Iterable\n \n if TYPE_CHECKING:\n from . import PydanticPluginProtocol\n", "issue": "Upgrade ruff target version to Python 3.8\n### Initial Checks\r\n\r\n- [X] I confirm that I'm using Pydantic V2\r\n\r\n### Description\r\n\r\nI deemed this as an action left out from [Dropping Python 3.7 Support](https://github.com/pydantic/pydantic/issues/7187)\r\n\r\n1. Update ruff's `target-version` to `py38`\r\n2. Run `make format` and adopt minimal changes to conform\r\n\r\n### Example Code\r\n\r\n```Python\r\n(pydantic-3.12) PS D:\\dev\\Elkiwa\\pydantic> make format\r\nPDM, version 2.10.4\r\npdm run ruff --fix pydantic tests docs/plugins\r\npdm run ruff format pydantic tests docs/plugins\r\n164 files left unchanged\r\n```\r\n\r\n\r\n### Python, Pydantic & OS Version\r\n\r\n```Text\r\n pydantic version: 2.6.0a1\r\n pydantic-core version: 2.14.5\r\n pydantic-core build: profile=release pgo=true\r\n install path: D:\\dev\\Elkiwa\\pydantic\\pydantic\r\n python version: 3.12.1 (tags/v3.12.1:2305ca5, Dec 7 2023, 22:03:25) [MSC v.1937 64 bit (AMD64)]\r\n platform: Windows-11-10.0.22631-SP0\r\n related packages: email-validator-2.1.0.post1 mypy-1.1.1 pydantic-extra-types-2.1.0 pydantic-settings-2.1.0 typing_extensions-4.8.0\r\n commit: a3c3037f\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nimport warnings\nfrom typing import TYPE_CHECKING, Iterable\n\nfrom typing_extensions import Final\n\nif sys.version_info >= (3, 8):\n import importlib.metadata as importlib_metadata\nelse:\n import importlib_metadata\n\n\nif TYPE_CHECKING:\n from . import PydanticPluginProtocol\n\n\nPYDANTIC_ENTRY_POINT_GROUP: Final[str] = 'pydantic'\n\n# cache of plugins\n_plugins: dict[str, PydanticPluginProtocol] | None = None\n# return no plugins while loading plugins to avoid recursion and errors while import plugins\n# this means that if plugins use pydantic\n_loading_plugins: bool = False\n\n\ndef get_plugins() -> Iterable[PydanticPluginProtocol]:\n \"\"\"Load plugins for Pydantic.\n\n Inspired by: https://github.com/pytest-dev/pluggy/blob/1.3.0/src/pluggy/_manager.py#L376-L402\n \"\"\"\n global _plugins, _loading_plugins\n if _loading_plugins:\n # this happens when plugins themselves use pydantic, we return no plugins\n return ()\n elif _plugins is None:\n _plugins = {}\n # set _loading_plugins so any plugins that use pydantic don't themselves use plugins\n _loading_plugins = True\n try:\n for dist in importlib_metadata.distributions():\n for entry_point in dist.entry_points:\n if entry_point.group != PYDANTIC_ENTRY_POINT_GROUP:\n continue\n if entry_point.value in _plugins:\n continue\n try:\n _plugins[entry_point.value] = entry_point.load()\n except (ImportError, AttributeError) as e:\n warnings.warn(\n f'{e.__class__.__name__} while loading the `{entry_point.name}` Pydantic plugin, '\n f'this plugin will not be installed.\\n\\n{e!r}'\n )\n finally:\n _loading_plugins = False\n\n return _plugins.values()\n", "path": "pydantic/plugin/_loader.py"}]} | 1,494 | 152 |
gh_patches_debug_11234 | rasdani/github-patches | git_diff | Mailu__Mailu-1184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mailu 1.7 : Issue when moving mail to the Junk folder
When I move a mail to the Junk Folder, i see the following in the logs:
imap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu
imap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu
imap_1 | Sep 22 16:51:26 imap: Error: tee: /dev/fd/63: I/O error
imap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: program exec:/conf/bin/spam (27655): Terminated with non-zero exit code 1
imap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: sieve: left message in mailbox 'Junk'
</issue>
<code>
[start of core/dovecot/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import multiprocessing
6 import logging as log
7 import sys
8
9 from podop import run_server
10 from socrate import system, conf
11
12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
13
14 def start_podop():
15 os.setuid(8)
16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
17 run_server(0, "dovecot", "/tmp/podop.socket", [
18 ("quota", "url", url ),
19 ("auth", "url", url),
20 ("sieve", "url", url),
21 ])
22
23 # Actual startup script
24 os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
25 os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis"))
26 os.environ["ADMIN_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ADMIN", "admin"))
27 os.environ["ANTISPAM_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ANTISPAM", "antispam:11334"))
28 if os.environ["WEBMAIL"] != "none":
29 os.environ["WEBMAIL_ADDRESS"] = system.resolve_address(os.environ.get("HOST_WEBMAIL", "webmail"))
30
31 for dovecot_file in glob.glob("/conf/*.conf"):
32 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
33
34 # Run Podop, then postfix
35 multiprocessing.Process(target=start_podop).start()
36 os.system("chown mail:mail /mail")
37 os.system("chown -R mail:mail /var/lib/dovecot /conf")
38 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
39
[end of core/dovecot/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -31,6 +31,12 @@
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
+os.makedirs("/conf/bin", exist_ok=True)
+for script_file in glob.glob("/conf/*.script"):
+ out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
+ conf.jinja(script_file, os.environ, out_file)
+ os.chmod(out_file, 0o555)
+
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
| {"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -31,6 +31,12 @@\n for dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n \n+os.makedirs(\"/conf/bin\", exist_ok=True)\n+for script_file in glob.glob(\"/conf/*.script\"):\n+ out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n+ conf.jinja(script_file, os.environ, out_file)\n+ os.chmod(out_file, 0o555)\n+\n # Run Podop, then postfix\n multiprocessing.Process(target=start_podop).start()\n os.system(\"chown mail:mail /mail\")\n", "issue": "Mailu 1.7 : Issue when moving mail to the Junk folder\nWhen I move a mail to the Junk Folder, i see the following in the logs:\r\nimap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu\r\nimap_1 | Sep 22 16:51:26 imap: Error: cannot stat file mailu\r\nimap_1 | Sep 22 16:51:26 imap: Error: tee: /dev/fd/63: I/O error\r\nimap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: program exec:/conf/bin/spam (27655): Terminated with non-zero exit code 1\r\nimap_1 | Sep 22 16:51:26 imap([email protected])<27629><hn93GCeTsresEgAC>: Info: sieve: left message in mailbox 'Junk'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\nos.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\nos.environ[\"ADMIN_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ADMIN\", \"admin\"))\nos.environ[\"ANTISPAM_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_WEBMAIL\", \"webmail\"))\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]} | 1,260 | 197 |
gh_patches_debug_2118 | rasdani/github-patches | git_diff | mdn__kuma-6978 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SystemError: <method 'get' of 'dict' objects> returned a result with an error set
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6659909/
```
timeout: timeout
SystemError: <method 'get' of 'dict' objects> returned a result with an error set
File "meinheld/mlogging.py", line 187, in _access
'h': environ.get('REMOTE_ADDR', '-'),
SystemError: <method 'get' of 'dict' objects> returned a result with an error set
```
Low priority: 12x in 2yrs, but might worth looking into with spare time.
</issue>
<code>
[start of kuma/wiki/views/misc.py]
1 import newrelic.agent
2 from django.http import HttpResponseBadRequest, JsonResponse
3 from django.shortcuts import render
4 from django.utils.translation import gettext_lazy as _
5 from django.views.decorators.http import require_GET
6
7 from kuma.core.decorators import (
8 block_user_agents,
9 ensure_wiki_domain,
10 shared_cache_control,
11 )
12
13 from ..constants import ALLOWED_TAGS, REDIRECT_CONTENT
14 from ..decorators import allow_CORS_GET
15 from ..models import Document, EditorToolbar
16
17
18 @ensure_wiki_domain
19 @shared_cache_control
20 @require_GET
21 def ckeditor_config(request):
22 """
23 Return ckeditor config from database
24 """
25 default_config = EditorToolbar.objects.filter(name="default")
26 if default_config.exists():
27 code = default_config[0].code
28 else:
29 code = ""
30
31 context = {
32 "editor_config": code,
33 "redirect_pattern": REDIRECT_CONTENT,
34 "allowed_tags": " ".join(ALLOWED_TAGS),
35 }
36 return render(
37 request,
38 "wiki/ckeditor_config.js",
39 context,
40 content_type="application/x-javascript",
41 )
42
43
44 @shared_cache_control
45 @newrelic.agent.function_trace()
46 @block_user_agents
47 @require_GET
48 @allow_CORS_GET
49 def autosuggest_documents(request):
50 """
51 Returns the closest title matches for front-end autosuggests
52 """
53 partial_title = request.GET.get("term", "")
54 locale = request.GET.get("locale", False)
55 current_locale = request.GET.get("current_locale", False)
56 exclude_current_locale = request.GET.get("exclude_current_locale", False)
57
58 if not partial_title:
59 # Only handle actual autosuggest requests, not requests for a
60 # memory-busting list of all documents.
61 return HttpResponseBadRequest(
62 _(
63 "Autosuggest requires a partial "
64 "title. For a full document "
65 "index, see the main page."
66 )
67 )
68
69 # Retrieve all documents that aren't redirects
70 docs = (
71 Document.objects.extra(select={"length": "Length(slug)"})
72 .filter(title__icontains=partial_title, is_redirect=0)
73 .exclude(slug__icontains="Talk:") # Remove old talk pages
74 .order_by("title", "length")
75 )
76
77 # All locales are assumed, unless a specific locale is requested or banned
78 if locale:
79 docs = docs.filter(locale=locale)
80 if current_locale:
81 docs = docs.filter(locale=request.LANGUAGE_CODE)
82 if exclude_current_locale:
83 docs = docs.exclude(locale=request.LANGUAGE_CODE)
84
85 # Generates a list of acceptable docs
86 docs_list = []
87 for doc in docs:
88 data = doc.get_json_data()
89 data["label"] += " [" + doc.locale + "]"
90 docs_list.append(data)
91
92 return JsonResponse(docs_list, safe=False)
93
[end of kuma/wiki/views/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/wiki/views/misc.py b/kuma/wiki/views/misc.py
--- a/kuma/wiki/views/misc.py
+++ b/kuma/wiki/views/misc.py
@@ -84,7 +84,7 @@
# Generates a list of acceptable docs
docs_list = []
- for doc in docs:
+ for doc in docs[:100]:
data = doc.get_json_data()
data["label"] += " [" + doc.locale + "]"
docs_list.append(data)
| {"golden_diff": "diff --git a/kuma/wiki/views/misc.py b/kuma/wiki/views/misc.py\n--- a/kuma/wiki/views/misc.py\n+++ b/kuma/wiki/views/misc.py\n@@ -84,7 +84,7 @@\n \n # Generates a list of acceptable docs\n docs_list = []\n- for doc in docs:\n+ for doc in docs[:100]:\n data = doc.get_json_data()\n data[\"label\"] += \" [\" + doc.locale + \"]\"\n docs_list.append(data)\n", "issue": "SystemError: <method 'get' of 'dict' objects> returned a result with an error set\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6659909/\r\n\r\n```\r\ntimeout: timeout\r\nSystemError: <method 'get' of 'dict' objects> returned a result with an error set\r\n File \"meinheld/mlogging.py\", line 187, in _access\r\n 'h': environ.get('REMOTE_ADDR', '-'),\r\n\r\nSystemError: <method 'get' of 'dict' objects> returned a result with an error set\r\n```\r\n\r\nLow priority: 12x in 2yrs, but might worth looking into with spare time.\n", "before_files": [{"content": "import newrelic.agent\nfrom django.http import HttpResponseBadRequest, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_GET\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n shared_cache_control,\n)\n\nfrom ..constants import ALLOWED_TAGS, REDIRECT_CONTENT\nfrom ..decorators import allow_CORS_GET\nfrom ..models import Document, EditorToolbar\n\n\n@ensure_wiki_domain\n@shared_cache_control\n@require_GET\ndef ckeditor_config(request):\n \"\"\"\n Return ckeditor config from database\n \"\"\"\n default_config = EditorToolbar.objects.filter(name=\"default\")\n if default_config.exists():\n code = default_config[0].code\n else:\n code = \"\"\n\n context = {\n \"editor_config\": code,\n \"redirect_pattern\": REDIRECT_CONTENT,\n \"allowed_tags\": \" \".join(ALLOWED_TAGS),\n }\n return render(\n request,\n \"wiki/ckeditor_config.js\",\n context,\n content_type=\"application/x-javascript\",\n )\n\n\n@shared_cache_control\[email protected]_trace()\n@block_user_agents\n@require_GET\n@allow_CORS_GET\ndef autosuggest_documents(request):\n \"\"\"\n Returns the closest title matches for front-end autosuggests\n \"\"\"\n partial_title = request.GET.get(\"term\", \"\")\n locale = request.GET.get(\"locale\", False)\n current_locale = request.GET.get(\"current_locale\", False)\n exclude_current_locale = request.GET.get(\"exclude_current_locale\", False)\n\n if not partial_title:\n # Only handle actual autosuggest requests, not requests for a\n # memory-busting list of all documents.\n return HttpResponseBadRequest(\n _(\n \"Autosuggest requires a partial \"\n \"title. For a full document \"\n \"index, see the main page.\"\n )\n )\n\n # Retrieve all documents that aren't redirects\n docs = (\n Document.objects.extra(select={\"length\": \"Length(slug)\"})\n .filter(title__icontains=partial_title, is_redirect=0)\n .exclude(slug__icontains=\"Talk:\") # Remove old talk pages\n .order_by(\"title\", \"length\")\n )\n\n # All locales are assumed, unless a specific locale is requested or banned\n if locale:\n docs = docs.filter(locale=locale)\n if current_locale:\n docs = docs.filter(locale=request.LANGUAGE_CODE)\n if exclude_current_locale:\n docs = docs.exclude(locale=request.LANGUAGE_CODE)\n\n # Generates a list of acceptable docs\n docs_list = []\n for doc in docs:\n data = doc.get_json_data()\n data[\"label\"] += \" [\" + doc.locale + \"]\"\n docs_list.append(data)\n\n return JsonResponse(docs_list, safe=False)\n", "path": "kuma/wiki/views/misc.py"}]} | 1,465 | 110 |
gh_patches_debug_50138 | rasdani/github-patches | git_diff | archlinux__archinstall-1322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please remove packagekit from GNOME profile
Speaking as the maintainer of GNOME on Arch, please remove packagekit from the `gnome` profile.
It's split out from the rest of gnome-software for a reason - packagekit is utterly unsupported. I've never seen it work reliably and it breaks our assumptions about user attention during upgrades.
</issue>
<code>
[start of profiles/gnome.py]
1 # A desktop environment using "Gnome"
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 # Note: GDM should be part of the gnome group, but adding it here for clarity
8 __packages__ = [
9 "gnome",
10 "gnome-tweaks",
11 "gdm",
12 "gnome-software-packagekit-plugin",
13 ]
14
15
16 def _prep_function(*args, **kwargs):
17 """
18 Magic function called by the importing installer
19 before continuing any further. It also avoids executing any
20 other code in this stage. So it's a safe way to ask the user
21 for more input before any other installer steps start.
22 """
23
24 # Gnome optionally supports xorg, we'll install it since it also
25 # includes graphic driver setups (this might change in the future)
26 profile = archinstall.Profile(None, 'xorg')
27 with profile.load_instructions(namespace='xorg.py') as imported:
28 if hasattr(imported, '_prep_function'):
29 return imported._prep_function()
30 else:
31 print('Deprecated (??): xorg profile has no _prep_function() anymore')
32
33
34 # Ensures that this code only gets executed if executed
35 # through importlib.util.spec_from_file_location("gnome", "/somewhere/gnome.py")
36 # or through conventional import gnome
37 if __name__ == 'gnome':
38 # Install dependency profiles
39 archinstall.storage['installation_session'].install_profile('xorg')
40
41 # Install the GNOME packages
42 archinstall.storage['installation_session'].add_additional_packages(__packages__)
43
44 archinstall.storage['installation_session'].enable_service('gdm') # Gnome Display Manager
45 # We could also start it via xinitrc since we do have Xorg,
46 # but for gnome that's deprecated and wayland is preferred.
47
[end of profiles/gnome.py]
[start of profiles/kde.py]
1 # A desktop environment using "KDE".
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "plasma-meta",
9 "konsole",
10 "kwrite",
11 "dolphin",
12 "ark",
13 "sddm",
14 "plasma-wayland-session",
15 "egl-wayland",
16 "packagekit-qt5",
17 ]
18
19
20 # TODO: Remove hard dependency of bash (due to .bash_profile)
21
22
23 def _prep_function(*args, **kwargs):
24 """
25 Magic function called by the importing installer
26 before continuing any further. It also avoids executing any
27 other code in this stage. So it's a safe way to ask the user
28 for more input before any other installer steps start.
29 """
30
31 # KDE requires a functioning Xorg installation.
32 profile = archinstall.Profile(None, 'xorg')
33 with profile.load_instructions(namespace='xorg.py') as imported:
34 if hasattr(imported, '_prep_function'):
35 return imported._prep_function()
36 else:
37 print('Deprecated (??): xorg profile has no _prep_function() anymore')
38
39
40 """
41 def _post_install(*args, **kwargs):
42 if "nvidia" in _gfx_driver_packages:
43 print("Plasma Wayland has known compatibility issues with the proprietary Nvidia driver")
44 print("After booting, you can choose between Wayland and Xorg using the drop-down menu")
45 return True
46 """
47
48 # Ensures that this code only gets executed if executed
49 # through importlib.util.spec_from_file_location("kde", "/somewhere/kde.py")
50 # or through conventional import kde
51 if __name__ == 'kde':
52 # Install dependency profiles
53 archinstall.storage['installation_session'].install_profile('xorg')
54
55 # Install the KDE packages
56 archinstall.storage['installation_session'].add_additional_packages(__packages__)
57
58 # Enable autostart of KDE for all users
59 archinstall.storage['installation_session'].enable_service('sddm')
60
[end of profiles/kde.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profiles/gnome.py b/profiles/gnome.py
--- a/profiles/gnome.py
+++ b/profiles/gnome.py
@@ -8,8 +8,7 @@
__packages__ = [
"gnome",
"gnome-tweaks",
- "gdm",
- "gnome-software-packagekit-plugin",
+ "gdm"
]
diff --git a/profiles/kde.py b/profiles/kde.py
--- a/profiles/kde.py
+++ b/profiles/kde.py
@@ -12,8 +12,7 @@
"ark",
"sddm",
"plasma-wayland-session",
- "egl-wayland",
- "packagekit-qt5",
+ "egl-wayland"
]
| {"golden_diff": "diff --git a/profiles/gnome.py b/profiles/gnome.py\n--- a/profiles/gnome.py\n+++ b/profiles/gnome.py\n@@ -8,8 +8,7 @@\n __packages__ = [\n \t\"gnome\",\n \t\"gnome-tweaks\",\n-\t\"gdm\",\n-\t\"gnome-software-packagekit-plugin\",\n+\t\"gdm\"\n ]\n \n \ndiff --git a/profiles/kde.py b/profiles/kde.py\n--- a/profiles/kde.py\n+++ b/profiles/kde.py\n@@ -12,8 +12,7 @@\n \t\"ark\",\n \t\"sddm\",\n \t\"plasma-wayland-session\",\n-\t\"egl-wayland\",\n-\t\"packagekit-qt5\",\n+\t\"egl-wayland\"\n ]\n", "issue": "Please remove packagekit from GNOME profile\nSpeaking as the maintainer of GNOME on Arch, please remove packagekit from the `gnome` profile.\r\n\r\nIt's split out from the rest of gnome-software for a reason - packagekit is utterly unsupported. I've never seen it work reliably and it breaks our assumptions about user attention during upgrades.\n", "before_files": [{"content": "# A desktop environment using \"Gnome\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n# Note: GDM should be part of the gnome group, but adding it here for clarity\n__packages__ = [\n\t\"gnome\",\n\t\"gnome-tweaks\",\n\t\"gdm\",\n\t\"gnome-software-packagekit-plugin\",\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# Gnome optionally supports xorg, we'll install it since it also\n\t# includes graphic driver setups (this might change in the future)\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"gnome\", \"/somewhere/gnome.py\")\n# or through conventional import gnome\nif __name__ == 'gnome':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the GNOME packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\tarchinstall.storage['installation_session'].enable_service('gdm') # Gnome Display Manager\n# We could also start it via xinitrc since we do have Xorg,\n# but for gnome that's deprecated and wayland is preferred.\n", "path": "profiles/gnome.py"}, {"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kwrite\",\n\t\"dolphin\",\n\t\"ark\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n\t\"packagekit-qt5\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}]} | 1,660 | 174 |
gh_patches_debug_18126 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Django BASE_DIR being a pathlib.Path
As per [my blog post today](https://adamj.eu/tech/2020/03/16/use-pathlib-in-your-django-project/), this is changing in Django 3.1 but users may already be changing it now.
</issue>
<code>
[start of src/scout_apm/django/apps.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from django.apps import AppConfig
5 from django.conf import settings
6 from django.test.signals import setting_changed
7
8 import scout_apm.core
9 from scout_apm.core.config import scout_config
10 from scout_apm.django.instruments.huey import ensure_huey_instrumented
11 from scout_apm.django.instruments.sql import ensure_sql_instrumented
12 from scout_apm.django.instruments.template import ensure_templates_instrumented
13
14
15 class ScoutApmDjangoConfig(AppConfig):
16 name = "scout_apm"
17 verbose_name = "Scout Apm (Django)"
18
19 def ready(self):
20 self.update_scout_config_from_django_settings()
21 setting_changed.connect(self.on_setting_changed)
22
23 # Finish installing the agent. If the agent isn't installed for any
24 # reason, return without installing instruments
25 installed = scout_apm.core.install()
26 if not installed:
27 return
28
29 self.install_middleware()
30
31 # Setup Instruments
32 ensure_huey_instrumented()
33 ensure_sql_instrumented()
34 ensure_templates_instrumented()
35
36 def update_scout_config_from_django_settings(self, **kwargs):
37 for name in dir(settings):
38 self.on_setting_changed(name)
39
40 def on_setting_changed(self, setting, **kwargs):
41 if setting == "BASE_DIR":
42 scout_name = "application_root"
43 elif setting.startswith("SCOUT_"):
44 scout_name = setting.replace("SCOUT_", "").lower()
45 else:
46 return
47
48 try:
49 value = getattr(settings, setting)
50 except AttributeError:
51 # It was removed
52 scout_config.unset(scout_name)
53 else:
54 scout_config.set(**{scout_name: value})
55
56 def install_middleware(self):
57 """
58 Attempts to insert the ScoutApm middleware as the first middleware
59 (first on incoming requests, last on outgoing responses).
60 """
61 from django.conf import settings
62
63 # If MIDDLEWARE is set, update that, with handling of tuple vs array forms
64 if getattr(settings, "MIDDLEWARE", None) is not None:
65 timing_middleware = "scout_apm.django.middleware.MiddlewareTimingMiddleware"
66 view_middleware = "scout_apm.django.middleware.ViewTimingMiddleware"
67
68 if isinstance(settings.MIDDLEWARE, tuple):
69 if timing_middleware not in settings.MIDDLEWARE:
70 settings.MIDDLEWARE = (timing_middleware,) + settings.MIDDLEWARE
71 if view_middleware not in settings.MIDDLEWARE:
72 settings.MIDDLEWARE = settings.MIDDLEWARE + (view_middleware,)
73 else:
74 if timing_middleware not in settings.MIDDLEWARE:
75 settings.MIDDLEWARE.insert(0, timing_middleware)
76 if view_middleware not in settings.MIDDLEWARE:
77 settings.MIDDLEWARE.append(view_middleware)
78
79 # Otherwise, we're doing old style middleware, do the same thing with
80 # the same handling of tuple vs array forms
81 else:
82 timing_middleware = (
83 "scout_apm.django.middleware.OldStyleMiddlewareTimingMiddleware"
84 )
85 view_middleware = "scout_apm.django.middleware.OldStyleViewMiddleware"
86
87 if isinstance(settings.MIDDLEWARE_CLASSES, tuple):
88 if timing_middleware not in settings.MIDDLEWARE_CLASSES:
89 settings.MIDDLEWARE_CLASSES = (
90 timing_middleware,
91 ) + settings.MIDDLEWARE_CLASSES
92
93 if view_middleware not in settings.MIDDLEWARE_CLASSES:
94 settings.MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES + (
95 view_middleware,
96 )
97 else:
98 if timing_middleware not in settings.MIDDLEWARE_CLASSES:
99 settings.MIDDLEWARE_CLASSES.insert(0, timing_middleware)
100 if view_middleware not in settings.MIDDLEWARE_CLASSES:
101 settings.MIDDLEWARE_CLASSES.append(view_middleware)
102
[end of src/scout_apm/django/apps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/django/apps.py b/src/scout_apm/django/apps.py
--- a/src/scout_apm/django/apps.py
+++ b/src/scout_apm/django/apps.py
@@ -38,8 +38,10 @@
self.on_setting_changed(name)
def on_setting_changed(self, setting, **kwargs):
+ cast = None
if setting == "BASE_DIR":
scout_name = "application_root"
+ cast = str
elif setting.startswith("SCOUT_"):
scout_name = setting.replace("SCOUT_", "").lower()
else:
@@ -51,6 +53,8 @@
# It was removed
scout_config.unset(scout_name)
else:
+ if cast is not None:
+ value = cast(value)
scout_config.set(**{scout_name: value})
def install_middleware(self):
| {"golden_diff": "diff --git a/src/scout_apm/django/apps.py b/src/scout_apm/django/apps.py\n--- a/src/scout_apm/django/apps.py\n+++ b/src/scout_apm/django/apps.py\n@@ -38,8 +38,10 @@\n self.on_setting_changed(name)\n \n def on_setting_changed(self, setting, **kwargs):\n+ cast = None\n if setting == \"BASE_DIR\":\n scout_name = \"application_root\"\n+ cast = str\n elif setting.startswith(\"SCOUT_\"):\n scout_name = setting.replace(\"SCOUT_\", \"\").lower()\n else:\n@@ -51,6 +53,8 @@\n # It was removed\n scout_config.unset(scout_name)\n else:\n+ if cast is not None:\n+ value = cast(value)\n scout_config.set(**{scout_name: value})\n \n def install_middleware(self):\n", "issue": "Support Django BASE_DIR being a pathlib.Path\nAs per [my blog post today](https://adamj.eu/tech/2020/03/16/use-pathlib-in-your-django-project/), this is changing in Django 3.1 but users may already be changing it now.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\nfrom django.test.signals import setting_changed\n\nimport scout_apm.core\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.django.instruments.huey import ensure_huey_instrumented\nfrom scout_apm.django.instruments.sql import ensure_sql_instrumented\nfrom scout_apm.django.instruments.template import ensure_templates_instrumented\n\n\nclass ScoutApmDjangoConfig(AppConfig):\n name = \"scout_apm\"\n verbose_name = \"Scout Apm (Django)\"\n\n def ready(self):\n self.update_scout_config_from_django_settings()\n setting_changed.connect(self.on_setting_changed)\n\n # Finish installing the agent. If the agent isn't installed for any\n # reason, return without installing instruments\n installed = scout_apm.core.install()\n if not installed:\n return\n\n self.install_middleware()\n\n # Setup Instruments\n ensure_huey_instrumented()\n ensure_sql_instrumented()\n ensure_templates_instrumented()\n\n def update_scout_config_from_django_settings(self, **kwargs):\n for name in dir(settings):\n self.on_setting_changed(name)\n\n def on_setting_changed(self, setting, **kwargs):\n if setting == \"BASE_DIR\":\n scout_name = \"application_root\"\n elif setting.startswith(\"SCOUT_\"):\n scout_name = setting.replace(\"SCOUT_\", \"\").lower()\n else:\n return\n\n try:\n value = getattr(settings, setting)\n except AttributeError:\n # It was removed\n scout_config.unset(scout_name)\n else:\n scout_config.set(**{scout_name: value})\n\n def install_middleware(self):\n \"\"\"\n Attempts to insert the ScoutApm middleware as the first middleware\n (first on incoming requests, last on outgoing responses).\n \"\"\"\n from django.conf import settings\n\n # If MIDDLEWARE is set, update that, with handling of tuple vs array forms\n if getattr(settings, \"MIDDLEWARE\", None) is not None:\n timing_middleware = \"scout_apm.django.middleware.MiddlewareTimingMiddleware\"\n view_middleware = \"scout_apm.django.middleware.ViewTimingMiddleware\"\n\n if isinstance(settings.MIDDLEWARE, tuple):\n if timing_middleware not in settings.MIDDLEWARE:\n settings.MIDDLEWARE = (timing_middleware,) + settings.MIDDLEWARE\n if view_middleware not in settings.MIDDLEWARE:\n settings.MIDDLEWARE = settings.MIDDLEWARE + (view_middleware,)\n else:\n if timing_middleware not in settings.MIDDLEWARE:\n settings.MIDDLEWARE.insert(0, timing_middleware)\n if view_middleware not in settings.MIDDLEWARE:\n settings.MIDDLEWARE.append(view_middleware)\n\n # Otherwise, we're doing old style middleware, do the same thing with\n # the same handling of tuple vs array forms\n else:\n timing_middleware = (\n \"scout_apm.django.middleware.OldStyleMiddlewareTimingMiddleware\"\n )\n view_middleware = \"scout_apm.django.middleware.OldStyleViewMiddleware\"\n\n if isinstance(settings.MIDDLEWARE_CLASSES, tuple):\n if timing_middleware not in settings.MIDDLEWARE_CLASSES:\n settings.MIDDLEWARE_CLASSES = (\n timing_middleware,\n ) + settings.MIDDLEWARE_CLASSES\n\n if view_middleware not in settings.MIDDLEWARE_CLASSES:\n settings.MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES + (\n view_middleware,\n )\n else:\n if timing_middleware not in settings.MIDDLEWARE_CLASSES:\n settings.MIDDLEWARE_CLASSES.insert(0, timing_middleware)\n if view_middleware not in settings.MIDDLEWARE_CLASSES:\n settings.MIDDLEWARE_CLASSES.append(view_middleware)\n", "path": "src/scout_apm/django/apps.py"}]} | 1,635 | 200 |
gh_patches_debug_19527 | rasdani/github-patches | git_diff | twisted__twisted-512 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
twisted.internet._ssl is unused and can be deleted
|[<img alt="alex's avatar" src="https://avatars.githubusercontent.com/u/772?s=50" width="50" height="50">](https://github.com/alex)| @alex reported|
|-|-|
|Trac ID|trac#8775|
|Type|enhancement|
|Created|2016-08-20 12:51:20Z|
LGTM
<details><summary>Searchable metadata</summary>
```
trac-id__8775 8775
type__enhancement enhancement
reporter__alex alex
priority__normal normal
milestone__None None
branch__
branch_author__
status__closed closed
resolution__fixed fixed
component__core core
keywords__None None
time__1471697480022394 1471697480022394
changetime__1471699165043033 1471699165043033
version__None None
owner__hawkowl hawkowl
```
</details>
</issue>
<code>
[start of src/twisted/internet/_ssl.py]
1 # -*- test-case-name: twisted.test.test_ssl -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 This module implements helpers for switching to TLS on an existing transport.
7
8 @since: 11.1
9 """
10
11 class _TLSDelayed(object):
12 """
13 State tracking record for TLS startup parameters. Used to remember how
14 TLS should be started when starting it is delayed to wait for the output
15 buffer to be flushed.
16
17 @ivar bufferedData: A C{list} which contains all the data which was
18 written to the transport after an attempt to start TLS was made but
19 before the buffers outstanding at that time could be flushed and TLS
20 could really be started. This is appended to by the transport's
21 write and writeSequence methods until it is possible to actually
22 start TLS, then it is written to the TLS-enabled transport.
23
24 @ivar context: An SSL context factory object to use to start TLS.
25
26 @ivar extra: An extra argument to pass to the transport's C{startTLS}
27 method.
28 """
29 def __init__(self, bufferedData, context, extra):
30 self.bufferedData = bufferedData
31 self.context = context
32 self.extra = extra
33
[end of src/twisted/internet/_ssl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/twisted/internet/_ssl.py b/src/twisted/internet/_ssl.py
deleted file mode 100644
--- a/src/twisted/internet/_ssl.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- test-case-name: twisted.test.test_ssl -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-This module implements helpers for switching to TLS on an existing transport.
-
-@since: 11.1
-"""
-
-class _TLSDelayed(object):
- """
- State tracking record for TLS startup parameters. Used to remember how
- TLS should be started when starting it is delayed to wait for the output
- buffer to be flushed.
-
- @ivar bufferedData: A C{list} which contains all the data which was
- written to the transport after an attempt to start TLS was made but
- before the buffers outstanding at that time could be flushed and TLS
- could really be started. This is appended to by the transport's
- write and writeSequence methods until it is possible to actually
- start TLS, then it is written to the TLS-enabled transport.
-
- @ivar context: An SSL context factory object to use to start TLS.
-
- @ivar extra: An extra argument to pass to the transport's C{startTLS}
- method.
- """
- def __init__(self, bufferedData, context, extra):
- self.bufferedData = bufferedData
- self.context = context
- self.extra = extra
| {"golden_diff": "diff --git a/src/twisted/internet/_ssl.py b/src/twisted/internet/_ssl.py\ndeleted file mode 100644\n--- a/src/twisted/internet/_ssl.py\n+++ /dev/null\n@@ -1,32 +0,0 @@\n-# -*- test-case-name: twisted.test.test_ssl -*-\n-# Copyright (c) Twisted Matrix Laboratories.\n-# See LICENSE for details.\n-\n-\"\"\"\n-This module implements helpers for switching to TLS on an existing transport.\n-\n-@since: 11.1\n-\"\"\"\n-\n-class _TLSDelayed(object):\n- \"\"\"\n- State tracking record for TLS startup parameters. Used to remember how\n- TLS should be started when starting it is delayed to wait for the output\n- buffer to be flushed.\n-\n- @ivar bufferedData: A C{list} which contains all the data which was\n- written to the transport after an attempt to start TLS was made but\n- before the buffers outstanding at that time could be flushed and TLS\n- could really be started. This is appended to by the transport's\n- write and writeSequence methods until it is possible to actually\n- start TLS, then it is written to the TLS-enabled transport.\n-\n- @ivar context: An SSL context factory object to use to start TLS.\n-\n- @ivar extra: An extra argument to pass to the transport's C{startTLS}\n- method.\n- \"\"\"\n- def __init__(self, bufferedData, context, extra):\n- self.bufferedData = bufferedData\n- self.context = context\n- self.extra = extra\n", "issue": "twisted.internet._ssl is unused and can be deleted\n|[<img alt=\"alex's avatar\" src=\"https://avatars.githubusercontent.com/u/772?s=50\" width=\"50\" height=\"50\">](https://github.com/alex)| @alex reported|\n|-|-|\n|Trac ID|trac#8775|\n|Type|enhancement|\n|Created|2016-08-20 12:51:20Z|\n\nLGTM\n\n<details><summary>Searchable metadata</summary>\n\n```\ntrac-id__8775 8775\ntype__enhancement enhancement\nreporter__alex alex\npriority__normal normal\nmilestone__None None\nbranch__ \nbranch_author__ \nstatus__closed closed\nresolution__fixed fixed\ncomponent__core core\nkeywords__None None\ntime__1471697480022394 1471697480022394\nchangetime__1471699165043033 1471699165043033\nversion__None None\nowner__hawkowl hawkowl\n\n```\n</details>\n\n", "before_files": [{"content": "# -*- test-case-name: twisted.test.test_ssl -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nThis module implements helpers for switching to TLS on an existing transport.\n\n@since: 11.1\n\"\"\"\n\nclass _TLSDelayed(object):\n \"\"\"\n State tracking record for TLS startup parameters. Used to remember how\n TLS should be started when starting it is delayed to wait for the output\n buffer to be flushed.\n\n @ivar bufferedData: A C{list} which contains all the data which was\n written to the transport after an attempt to start TLS was made but\n before the buffers outstanding at that time could be flushed and TLS\n could really be started. This is appended to by the transport's\n write and writeSequence methods until it is possible to actually\n start TLS, then it is written to the TLS-enabled transport.\n\n @ivar context: An SSL context factory object to use to start TLS.\n\n @ivar extra: An extra argument to pass to the transport's C{startTLS}\n method.\n \"\"\"\n def __init__(self, bufferedData, context, extra):\n self.bufferedData = bufferedData\n self.context = context\n self.extra = extra\n", "path": "src/twisted/internet/_ssl.py"}]} | 1,148 | 359 |
gh_patches_debug_5035 | rasdani/github-patches | git_diff | wagtail__wagtail-1019 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove built dependencies
This commit removes libsass and Pillow from the setup.py dependency list. This greatly improves install performance and also means that the basic Wagtail installation is pure-python (so no build tools need to be on the end users host machine).
None of these dependencies are directly called from within Wagtail so the start project command continues to work correctly.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import sys, os
4
5 from wagtail.wagtailcore import __version__
6
7
8 try:
9 from setuptools import setup, find_packages
10 except ImportError:
11 from distutils.core import setup
12
13
14 # Hack to prevent "TypeError: 'NoneType' object is not callable" error
15 # in multiprocessing/util.py _exit_function when setup.py exits
16 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
17 try:
18 import multiprocessing
19 except ImportError:
20 pass
21
22
23 # Disable parallel builds, because Pillow 2.5.3 does some crazy monkeypatching of
24 # the build process on multicore systems, which breaks installation of libsass
25 os.environ['MAX_CONCURRENCY'] = '1'
26
27 PY3 = sys.version_info[0] == 3
28
29
30 install_requires = [
31 "Django>=1.7.0,<1.8",
32 "django-compressor>=1.4",
33 "django-libsass>=0.2",
34 "django-modelcluster>=0.5",
35 "django-taggit==0.12.2",
36 "django-treebeard==3.0",
37 "Pillow>=2.6.1",
38 "beautifulsoup4>=4.3.2",
39 "html5lib==0.999",
40 "Unidecode>=0.04.14",
41 "six>=1.7.0",
42 'requests>=2.0.0',
43 "Willow==0.1",
44 ]
45
46
47 if not PY3:
48 install_requires += [
49 "unicodecsv>=0.9.4"
50 ]
51
52
53 setup(
54 name='wagtail',
55 version=__version__,
56 description='A Django content management system focused on flexibility and user experience',
57 author='Matthew Westcott',
58 author_email='[email protected]',
59 url='http://wagtail.io/',
60 packages=find_packages(),
61 include_package_data=True,
62 license='BSD',
63 long_description=open('README.rst').read(),
64 classifiers=[
65 'Development Status :: 5 - Production/Stable',
66 'Environment :: Web Environment',
67 'Intended Audience :: Developers',
68 'License :: OSI Approved :: BSD License',
69 'Operating System :: OS Independent',
70 'Programming Language :: Python',
71 'Programming Language :: Python :: 2',
72 'Programming Language :: Python :: 2.7',
73 'Programming Language :: Python :: 3',
74 'Programming Language :: Python :: 3.3',
75 'Programming Language :: Python :: 3.4',
76 'Framework :: Django',
77 'Topic :: Internet :: WWW/HTTP :: Site Management',
78 ],
79 install_requires=install_requires,
80 entry_points="""
81 [console_scripts]
82 wagtail=wagtail.bin.wagtail:main
83 """,
84 zip_safe=False,
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,9 @@
install_requires = [
"Django>=1.7.0,<1.8",
"django-compressor>=1.4",
- "django-libsass>=0.2",
"django-modelcluster>=0.5",
"django-taggit==0.12.2",
"django-treebeard==3.0",
- "Pillow>=2.6.1",
"beautifulsoup4>=4.3.2",
"html5lib==0.999",
"Unidecode>=0.04.14",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,9 @@\n install_requires = [\n \"Django>=1.7.0,<1.8\",\n \"django-compressor>=1.4\",\n- \"django-libsass>=0.2\",\n \"django-modelcluster>=0.5\",\n \"django-taggit==0.12.2\",\n \"django-treebeard==3.0\",\n- \"Pillow>=2.6.1\",\n \"beautifulsoup4>=4.3.2\",\n \"html5lib==0.999\",\n \"Unidecode>=0.04.14\",\n", "issue": "Remove built dependencies\nThis commit removes libsass and Pillow from the setup.py dependency list. This greatly improves install performance and also means that the basic Wagtail installation is pure-python (so no build tools need to be on the end users host machine).\n\nNone of these dependencies are directly called from within Wagtail so the start project command continues to work correctly.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport sys, os\n\nfrom wagtail.wagtailcore import __version__\n\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\n\n# Disable parallel builds, because Pillow 2.5.3 does some crazy monkeypatching of\n# the build process on multicore systems, which breaks installation of libsass\nos.environ['MAX_CONCURRENCY'] = '1'\n\nPY3 = sys.version_info[0] == 3\n\n\ninstall_requires = [\n \"Django>=1.7.0,<1.8\",\n \"django-compressor>=1.4\",\n \"django-libsass>=0.2\",\n \"django-modelcluster>=0.5\",\n \"django-taggit==0.12.2\",\n \"django-treebeard==3.0\",\n \"Pillow>=2.6.1\",\n \"beautifulsoup4>=4.3.2\",\n \"html5lib==0.999\",\n \"Unidecode>=0.04.14\",\n \"six>=1.7.0\",\n 'requests>=2.0.0',\n \"Willow==0.1\",\n]\n\n\nif not PY3:\n install_requires += [\n \"unicodecsv>=0.9.4\"\n ]\n\n\nsetup(\n name='wagtail',\n version=__version__,\n description='A Django content management system focused on flexibility and user experience',\n author='Matthew Westcott',\n author_email='[email protected]',\n url='http://wagtail.io/',\n packages=find_packages(),\n include_package_data=True,\n license='BSD',\n long_description=open('README.rst').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Framework :: Django',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n install_requires=install_requires,\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,394 | 159 |
gh_patches_debug_32672 | rasdani/github-patches | git_diff | encode__uvicorn-591 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proxy Middleware grabs the last IP in `x-forwarded-for`, not the first
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For says the first IP in is a client IP, but ProxyHeadersMiddleware grabs the last IP:
https://github.com/encode/uvicorn/blob/9d9f8820a8155e36dcb5e4d4023f470e51aa4e03/uvicorn/middleware/proxy_headers.py#L41
</issue>
<code>
[start of uvicorn/middleware/proxy_headers.py]
1 """
2 This middleware can be used when a known proxy is fronting the application,
3 and is trusted to be properly setting the `X-Forwarded-Proto` and
4 `X-Forwarded-For` headers with the connecting client information.
5
6 Modifies the `client` and `scheme` information so that they reference
7 the connecting client, rather that the connecting proxy.
8
9 https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies
10 """
11
12
13 class ProxyHeadersMiddleware:
14 def __init__(self, app, trusted_hosts="127.0.0.1"):
15 self.app = app
16 if isinstance(trusted_hosts, str):
17 self.trusted_hosts = [item.strip() for item in trusted_hosts.split(",")]
18 else:
19 self.trusted_hosts = trusted_hosts
20 self.always_trust = "*" in self.trusted_hosts
21
22 async def __call__(self, scope, receive, send):
23 if scope["type"] in ("http", "websocket"):
24 client_addr = scope.get("client")
25 client_host = client_addr[0] if client_addr else None
26
27 if self.always_trust or client_host in self.trusted_hosts:
28 headers = dict(scope["headers"])
29
30 if b"x-forwarded-proto" in headers:
31 # Determine if the incoming request was http or https based on
32 # the X-Forwarded-Proto header.
33 x_forwarded_proto = headers[b"x-forwarded-proto"].decode("latin1")
34 scope["scheme"] = x_forwarded_proto.strip()
35
36 if b"x-forwarded-for" in headers:
37 # Determine the client address from the last trusted IP in the
38 # X-Forwarded-For header. We've lost the connecting client's port
39 # information by now, so only include the host.
40 x_forwarded_for = headers[b"x-forwarded-for"].decode("latin1")
41 host = x_forwarded_for.split(",")[-1].strip()
42 port = 0
43 scope["client"] = (host, port)
44
45 return await self.app(scope, receive, send)
46
[end of uvicorn/middleware/proxy_headers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py
--- a/uvicorn/middleware/proxy_headers.py
+++ b/uvicorn/middleware/proxy_headers.py
@@ -8,17 +8,28 @@
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies
"""
+from typing import List
class ProxyHeadersMiddleware:
def __init__(self, app, trusted_hosts="127.0.0.1"):
self.app = app
if isinstance(trusted_hosts, str):
- self.trusted_hosts = [item.strip() for item in trusted_hosts.split(",")]
+ self.trusted_hosts = {item.strip() for item in trusted_hosts.split(",")}
else:
- self.trusted_hosts = trusted_hosts
+ self.trusted_hosts = set(trusted_hosts)
self.always_trust = "*" in self.trusted_hosts
+ def get_trusted_client_host(
+ self, x_forwarded_for_hosts
+ ): # type: (List[str]) -> str
+ if self.always_trust:
+ return x_forwarded_for_hosts[0]
+
+ for host in reversed(x_forwarded_for_hosts):
+ if host not in self.trusted_hosts:
+ return host
+
async def __call__(self, scope, receive, send):
if scope["type"] in ("http", "websocket"):
client_addr = scope.get("client")
@@ -38,7 +49,10 @@
# X-Forwarded-For header. We've lost the connecting client's port
# information by now, so only include the host.
x_forwarded_for = headers[b"x-forwarded-for"].decode("latin1")
- host = x_forwarded_for.split(",")[-1].strip()
+ x_forwarded_for_hosts = [
+ item.strip() for item in x_forwarded_for.split(",")
+ ]
+ host = self.get_trusted_client_host(x_forwarded_for_hosts)
port = 0
scope["client"] = (host, port)
| {"golden_diff": "diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py\n--- a/uvicorn/middleware/proxy_headers.py\n+++ b/uvicorn/middleware/proxy_headers.py\n@@ -8,17 +8,28 @@\n \n https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies\n \"\"\"\n+from typing import List\n \n \n class ProxyHeadersMiddleware:\n def __init__(self, app, trusted_hosts=\"127.0.0.1\"):\n self.app = app\n if isinstance(trusted_hosts, str):\n- self.trusted_hosts = [item.strip() for item in trusted_hosts.split(\",\")]\n+ self.trusted_hosts = {item.strip() for item in trusted_hosts.split(\",\")}\n else:\n- self.trusted_hosts = trusted_hosts\n+ self.trusted_hosts = set(trusted_hosts)\n self.always_trust = \"*\" in self.trusted_hosts\n \n+ def get_trusted_client_host(\n+ self, x_forwarded_for_hosts\n+ ): # type: (List[str]) -> str\n+ if self.always_trust:\n+ return x_forwarded_for_hosts[0]\n+\n+ for host in reversed(x_forwarded_for_hosts):\n+ if host not in self.trusted_hosts:\n+ return host\n+\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] in (\"http\", \"websocket\"):\n client_addr = scope.get(\"client\")\n@@ -38,7 +49,10 @@\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"latin1\")\n- host = x_forwarded_for.split(\",\")[-1].strip()\n+ x_forwarded_for_hosts = [\n+ item.strip() for item in x_forwarded_for.split(\",\")\n+ ]\n+ host = self.get_trusted_client_host(x_forwarded_for_hosts)\n port = 0\n scope[\"client\"] = (host, port)\n", "issue": "Proxy Middleware grabs the last IP in `x-forwarded-for`, not the first\nhttps://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For says the first IP in is a client IP, but ProxyHeadersMiddleware grabs the last IP:\r\nhttps://github.com/encode/uvicorn/blob/9d9f8820a8155e36dcb5e4d4023f470e51aa4e03/uvicorn/middleware/proxy_headers.py#L41\n", "before_files": [{"content": "\"\"\"\nThis middleware can be used when a known proxy is fronting the application,\nand is trusted to be properly setting the `X-Forwarded-Proto` and\n`X-Forwarded-For` headers with the connecting client information.\n\nModifies the `client` and `scheme` information so that they reference\nthe connecting client, rather that the connecting proxy.\n\nhttps://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies\n\"\"\"\n\n\nclass ProxyHeadersMiddleware:\n def __init__(self, app, trusted_hosts=\"127.0.0.1\"):\n self.app = app\n if isinstance(trusted_hosts, str):\n self.trusted_hosts = [item.strip() for item in trusted_hosts.split(\",\")]\n else:\n self.trusted_hosts = trusted_hosts\n self.always_trust = \"*\" in self.trusted_hosts\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] in (\"http\", \"websocket\"):\n client_addr = scope.get(\"client\")\n client_host = client_addr[0] if client_addr else None\n\n if self.always_trust or client_host in self.trusted_hosts:\n headers = dict(scope[\"headers\"])\n\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"latin1\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n\n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"latin1\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n\n return await self.app(scope, receive, send)\n", "path": "uvicorn/middleware/proxy_headers.py"}]} | 1,191 | 462 |
gh_patches_debug_29773 | rasdani/github-patches | git_diff | streamlink__streamlink-3940 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.euronews: www.euronews.com and ru.euronews.com provide different APIs
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
The main Euronews site has started using the new API some time ago, and the current streamlink's plugin, euronews.py, indeed works correctly with the main site. However, some (?) language specific subdomains, like https://ru.euronews.com/, still use the old API (compare, e.g., the following responses: https://www.euronews.com/api/watchlive.json vs. https://ru.euronews.com/api/watchlive.json).
[This previous version of euronews.py](https://github.com/streamlink/streamlink/blob/fcda5b681422718cc0a95b3de45d3fe2698d8e29/src/streamlink/plugins/euronews.py) works fine with the old API but obviously doesn't support the new one. Would be great if the plugin could support both versions.
### Debug log
```text
[cli][info] Found matching plugin euronews for URL https://ru.euronews.com/live
error: No playable streams found on this URL: https://ru.euronews.com/live
```
</issue>
<code>
[start of src/streamlink/plugins/euronews.py]
1 import re
2 from urllib.parse import urlparse
3
4 from streamlink.plugin import Plugin, pluginmatcher
5 from streamlink.plugin.api import validate
6 from streamlink.plugin.api.utils import itertags
7 from streamlink.stream import HTTPStream
8
9
10 @pluginmatcher(re.compile(
11 r'https?://(?:\w+\.)*euronews\.com/'
12 ))
13 class Euronews(Plugin):
14 def _get_vod_stream(self):
15 def find_video_url(content):
16 for elem in itertags(content, "meta"):
17 if elem.attributes.get("property") == "og:video":
18 return elem.attributes.get("content")
19
20 video_url = self.session.http.get(self.url, schema=validate.Schema(
21 validate.transform(find_video_url),
22 validate.any(None, validate.url())
23 ))
24
25 if video_url is not None:
26 return dict(vod=HTTPStream(self.session, video_url))
27
28 def _get_live_streams(self):
29 def find_video_id(content):
30 for elem in itertags(content, "div"):
31 if elem.attributes.get("id") == "pfpPlayer" and elem.attributes.get("data-google-src") is not None:
32 return elem.attributes.get("data-video-id")
33
34 video_id = self.session.http.get(self.url, schema=validate.Schema(
35 validate.transform(find_video_id),
36 validate.any(None, str)
37 ))
38
39 if video_id is not None:
40 return self.session.streams(f"https://www.youtube.com/watch?v={video_id}")
41
42 def _get_streams(self):
43 parsed = urlparse(self.url)
44
45 if parsed.path == "/live":
46 return self._get_live_streams()
47 else:
48 return self._get_vod_stream()
49
50
51 __plugin__ = Euronews
52
[end of src/streamlink/plugins/euronews.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/euronews.py b/src/streamlink/plugins/euronews.py
--- a/src/streamlink/plugins/euronews.py
+++ b/src/streamlink/plugins/euronews.py
@@ -3,14 +3,17 @@
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
-from streamlink.plugin.api.utils import itertags
-from streamlink.stream import HTTPStream
+from streamlink.plugin.api.utils import itertags, parse_json
+from streamlink.stream import HLSStream, HTTPStream
+from streamlink.utils import update_scheme
@pluginmatcher(re.compile(
- r'https?://(?:\w+\.)*euronews\.com/'
+ r'https?://(?:(?P<subdomain>\w+)\.)?euronews\.com/'
))
class Euronews(Plugin):
+ API_URL = "https://{subdomain}.euronews.com/api/watchlive.json"
+
def _get_vod_stream(self):
def find_video_url(content):
for elem in itertags(content, "meta"):
@@ -39,6 +42,24 @@
if video_id is not None:
return self.session.streams(f"https://www.youtube.com/watch?v={video_id}")
+ info_url = self.session.http.get(self.API_URL.format(subdomain=self.match.group("subdomain")), schema=validate.Schema(
+ validate.transform(parse_json),
+ {"url": validate.url()},
+ validate.get("url"),
+ validate.transform(lambda url: update_scheme("https://", url))
+ ))
+ hls_url = self.session.http.get(info_url, schema=validate.Schema(
+ validate.transform(parse_json),
+ {
+ "status": "ok",
+ "protocol": "hls",
+ "primary": validate.url()
+ },
+ validate.get("primary")
+ ))
+
+ return HLSStream.parse_variant_playlist(self.session, hls_url)
+
def _get_streams(self):
parsed = urlparse(self.url)
| {"golden_diff": "diff --git a/src/streamlink/plugins/euronews.py b/src/streamlink/plugins/euronews.py\n--- a/src/streamlink/plugins/euronews.py\n+++ b/src/streamlink/plugins/euronews.py\n@@ -3,14 +3,17 @@\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n-from streamlink.plugin.api.utils import itertags\n-from streamlink.stream import HTTPStream\n+from streamlink.plugin.api.utils import itertags, parse_json\n+from streamlink.stream import HLSStream, HTTPStream\n+from streamlink.utils import update_scheme\n \n \n @pluginmatcher(re.compile(\n- r'https?://(?:\\w+\\.)*euronews\\.com/'\n+ r'https?://(?:(?P<subdomain>\\w+)\\.)?euronews\\.com/'\n ))\n class Euronews(Plugin):\n+ API_URL = \"https://{subdomain}.euronews.com/api/watchlive.json\"\n+\n def _get_vod_stream(self):\n def find_video_url(content):\n for elem in itertags(content, \"meta\"):\n@@ -39,6 +42,24 @@\n if video_id is not None:\n return self.session.streams(f\"https://www.youtube.com/watch?v={video_id}\")\n \n+ info_url = self.session.http.get(self.API_URL.format(subdomain=self.match.group(\"subdomain\")), schema=validate.Schema(\n+ validate.transform(parse_json),\n+ {\"url\": validate.url()},\n+ validate.get(\"url\"),\n+ validate.transform(lambda url: update_scheme(\"https://\", url))\n+ ))\n+ hls_url = self.session.http.get(info_url, schema=validate.Schema(\n+ validate.transform(parse_json),\n+ {\n+ \"status\": \"ok\",\n+ \"protocol\": \"hls\",\n+ \"primary\": validate.url()\n+ },\n+ validate.get(\"primary\")\n+ ))\n+\n+ return HLSStream.parse_variant_playlist(self.session, hls_url)\n+\n def _get_streams(self):\n parsed = urlparse(self.url)\n", "issue": "plugins.euronews: www.euronews.com and ru.euronews.com provide different APIs\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nThe main Euronews site has started using the new API some time ago, and the current streamlink's plugin, euronews.py, indeed works correctly with the main site. However, some (?) language specific subdomains, like https://ru.euronews.com/, still use the old API (compare, e.g., the following responses: https://www.euronews.com/api/watchlive.json vs. https://ru.euronews.com/api/watchlive.json).\r\n\r\n[This previous version of euronews.py](https://github.com/streamlink/streamlink/blob/fcda5b681422718cc0a95b3de45d3fe2698d8e29/src/streamlink/plugins/euronews.py) works fine with the old API but obviously doesn't support the new one. Would be great if the plugin could support both versions.\n\n### Debug log\n\n```text\n[cli][info] Found matching plugin euronews for URL https://ru.euronews.com/live\r\nerror: No playable streams found on this URL: https://ru.euronews.com/live\n```\n\n", "before_files": [{"content": "import re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.plugin.api.utils import itertags\nfrom streamlink.stream import HTTPStream\n\n\n@pluginmatcher(re.compile(\n r'https?://(?:\\w+\\.)*euronews\\.com/'\n))\nclass Euronews(Plugin):\n def _get_vod_stream(self):\n def find_video_url(content):\n for elem in itertags(content, \"meta\"):\n if elem.attributes.get(\"property\") == \"og:video\":\n return elem.attributes.get(\"content\")\n\n video_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.transform(find_video_url),\n validate.any(None, validate.url())\n ))\n\n if video_url is not None:\n return dict(vod=HTTPStream(self.session, video_url))\n\n def _get_live_streams(self):\n def find_video_id(content):\n for elem in itertags(content, \"div\"):\n if elem.attributes.get(\"id\") == \"pfpPlayer\" and elem.attributes.get(\"data-google-src\") is not None:\n return elem.attributes.get(\"data-video-id\")\n\n video_id = self.session.http.get(self.url, schema=validate.Schema(\n validate.transform(find_video_id),\n validate.any(None, str)\n ))\n\n if video_id is not None:\n return self.session.streams(f\"https://www.youtube.com/watch?v={video_id}\")\n\n def _get_streams(self):\n parsed = urlparse(self.url)\n\n if parsed.path == \"/live\":\n return self._get_live_streams()\n else:\n return self._get_vod_stream()\n\n\n__plugin__ = Euronews\n", "path": "src/streamlink/plugins/euronews.py"}]} | 1,406 | 445 |
gh_patches_debug_8386 | rasdani/github-patches | git_diff | WeblateOrg__weblate-1471 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide example hooks in pip installations
### Steps to reproduce
1. `pip install Weblate`
2. `ls path/to/site-packages/weblate`
### Actual behaviour
Example hooks are missing. We have to download the examples manually and copy them where we want on the server.
### Expected behaviour
Example hooks should be in site-packages, packaged as data inside Weblate package. This would allow to use examples, even when Weblate is installed using pip.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright © 2012 - 2017 Michal Čihař <[email protected]>
5 #
6 # This file is part of Weblate <https://weblate.org/>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this program. If not, see <https://www.gnu.org/licenses/>.
20 #
21
22 import os
23 import sys
24 from setuptools import setup
25
26 # allow setup.py to be run from any path
27 os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
28
29 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
30 README = readme.read()
31
32 with open('requirements.txt') as requirements:
33 REQUIRES = requirements.read().splitlines()
34
35 setup(
36 name='Weblate',
37 version='2.14',
38 packages=[
39 'weblate',
40 'weblate.api',
41 'weblate.api.migrations',
42 'weblate.accounts',
43 'weblate.accounts.management',
44 'weblate.accounts.management.commands',
45 'weblate.accounts.migrations',
46 'weblate.accounts.templatetags',
47 'weblate.accounts.tests',
48 'weblate.billing',
49 'weblate.billing.management',
50 'weblate.billing.management.commands',
51 'weblate.billing.migrations',
52 'weblate.lang',
53 'weblate.lang.management',
54 'weblate.lang.management.commands',
55 'weblate.lang.migrations',
56 'weblate.trans',
57 'weblate.trans.autofixes',
58 'weblate.trans.checks',
59 'weblate.trans.machine',
60 'weblate.trans.management',
61 'weblate.trans.management.commands',
62 'weblate.trans.migrations',
63 'weblate.trans.models',
64 'weblate.trans.templatetags',
65 'weblate.trans.tests',
66 'weblate.trans.views',
67 ],
68 include_package_data=True,
69 license='GPLv3+',
70 description=(
71 'A web-based translation tool with tight version control integration'
72 ),
73 long_description=README,
74 keywords='i18n l10n gettext git mercurial translate',
75 url='https://weblate.org/',
76 download_url='https://weblate.org/download/',
77 bugtrack_url='https://github.com/WeblateOrg/weblate/issues',
78 author='Michal Čihař',
79 author_email='[email protected]',
80 install_requires=REQUIRES,
81 zip_safe=False,
82 extras_require={
83 'Mercurial': ['Mercurial>=2.8'],
84 'Unicode': ['pyuca>=1.1', 'python-bidi>=0.4.0', 'chardet'],
85 'Avatars': [
86 'pyLibravatar',
87 'pydns' if sys.version_info[0] == 2 else 'py3dns'
88 ],
89 'Android': ['Babel'],
90 'YAML': ['PyYAML>=3.0'],
91 'OCR': ['tesserocr>=1.2'],
92 },
93 classifiers=[
94 'Environment :: Web Environment',
95 'Framework :: Django',
96 'Intended Audience :: Developers',
97 'Intended Audience :: System Administrators',
98 'License :: OSI Approved :: '
99 'GNU General Public License v3 or later (GPLv3+)',
100 'Operating System :: OS Independent',
101 'Development Status :: 5 - Production/Stable',
102 'Programming Language :: Python',
103 'Programming Language :: Python :: 2',
104 'Programming Language :: Python :: 2.7',
105 'Programming Language :: Python :: 3',
106 'Programming Language :: Python :: 3.4',
107 'Programming Language :: Python :: 3.5',
108 'Topic :: Software Development :: Internationalization',
109 'Topic :: Software Development :: Localization',
110 'Topic :: Internet :: WWW/HTTP',
111 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
112 ],
113 entry_points={
114 'console_scripts': [
115 'weblate = weblate.runner:main',
116 ],
117 },
118 tests_require=(
119 'selenium',
120 'httpretty',
121 ),
122 test_suite='runtests.runtests',
123 )
124
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,6 +32,11 @@
with open('requirements.txt') as requirements:
REQUIRES = requirements.read().splitlines()
+DATA_FILES = [
+ ('share/weblate/' + root, [os.path.join(root, f) for f in files])
+ for root, dirs, files in os.walk('examples')
+]
+
setup(
name='Weblate',
version='2.14',
@@ -120,4 +125,5 @@
'httpretty',
),
test_suite='runtests.runtests',
+ data_files=DATA_FILES,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,6 +32,11 @@\n with open('requirements.txt') as requirements:\n REQUIRES = requirements.read().splitlines()\n \n+DATA_FILES = [\n+ ('share/weblate/' + root, [os.path.join(root, f) for f in files])\n+ for root, dirs, files in os.walk('examples')\n+]\n+\n setup(\n name='Weblate',\n version='2.14',\n@@ -120,4 +125,5 @@\n 'httpretty',\n ),\n test_suite='runtests.runtests',\n+ data_files=DATA_FILES,\n )\n", "issue": "Provide example hooks in pip installations\n### Steps to reproduce\r\n1. `pip install Weblate`\r\n2. `ls path/to/site-packages/weblate`\r\n\r\n### Actual behaviour\r\nExample hooks are missing. We have to download the examples manually and copy them where we want on the server.\r\n\r\n### Expected behaviour\r\nExample hooks should be in site-packages, packaged as data inside Weblate package. This would allow to use examples, even when Weblate is installed using pip.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2017 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\nimport os\nimport sys\nfrom setuptools import setup\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\nwith open('requirements.txt') as requirements:\n REQUIRES = requirements.read().splitlines()\n\nsetup(\n name='Weblate',\n version='2.14',\n packages=[\n 'weblate',\n 'weblate.api',\n 'weblate.api.migrations',\n 'weblate.accounts',\n 'weblate.accounts.management',\n 'weblate.accounts.management.commands',\n 'weblate.accounts.migrations',\n 'weblate.accounts.templatetags',\n 'weblate.accounts.tests',\n 'weblate.billing',\n 'weblate.billing.management',\n 'weblate.billing.management.commands',\n 'weblate.billing.migrations',\n 'weblate.lang',\n 'weblate.lang.management',\n 'weblate.lang.management.commands',\n 'weblate.lang.migrations',\n 'weblate.trans',\n 'weblate.trans.autofixes',\n 'weblate.trans.checks',\n 'weblate.trans.machine',\n 'weblate.trans.management',\n 'weblate.trans.management.commands',\n 'weblate.trans.migrations',\n 'weblate.trans.models',\n 'weblate.trans.templatetags',\n 'weblate.trans.tests',\n 'weblate.trans.views',\n ],\n include_package_data=True,\n license='GPLv3+',\n description=(\n 'A web-based translation tool with tight version control integration'\n ),\n long_description=README,\n keywords='i18n l10n gettext git mercurial translate',\n url='https://weblate.org/',\n download_url='https://weblate.org/download/',\n bugtrack_url='https://github.com/WeblateOrg/weblate/issues',\n author='Michal \u010ciha\u0159',\n author_email='[email protected]',\n install_requires=REQUIRES,\n zip_safe=False,\n extras_require={\n 'Mercurial': ['Mercurial>=2.8'],\n 'Unicode': ['pyuca>=1.1', 'python-bidi>=0.4.0', 'chardet'],\n 'Avatars': [\n 'pyLibravatar',\n 'pydns' if sys.version_info[0] == 2 else 'py3dns'\n ],\n 'Android': ['Babel'],\n 'YAML': ['PyYAML>=3.0'],\n 'OCR': ['tesserocr>=1.2'],\n },\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: '\n 'GNU General Public License v3 or later (GPLv3+)',\n 'Operating System :: OS Independent',\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Internationalization',\n 'Topic :: Software Development :: Localization',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n entry_points={\n 'console_scripts': [\n 'weblate = weblate.runner:main',\n ],\n },\n tests_require=(\n 'selenium',\n 'httpretty',\n ),\n test_suite='runtests.runtests',\n)\n", "path": "setup.py"}]} | 1,944 | 156 |
gh_patches_debug_52237 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-3584 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
-lpthtread is missing from $LIBBLAS_MT
when building with the `foss` toolchain, I noticed that `$LIBBLAS_MT` is defined identical to `$LIBBLAS` as `-lopenblas -lgfortran`
we should make sure `-lpthread` is included as well.
</issue>
<code>
[start of easybuild/toolchains/linalg/openblas.py]
1 ##
2 # Copyright 2013-2021 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 Support for OpenBLAS as toolchain linear algebra library.
27
28 :author: Kenneth Hoste (Ghent University)
29 """
30
31 from easybuild.tools.toolchain.linalg import LinAlg
32
33
34 TC_CONSTANT_OPENBLAS = 'OpenBLAS'
35
36
37 class OpenBLAS(LinAlg):
38 """
39 Trivial class, provides OpenBLAS support.
40 """
41 BLAS_MODULE_NAME = ['OpenBLAS']
42 BLAS_LIB = ['openblas']
43 BLAS_FAMILY = TC_CONSTANT_OPENBLAS
44
45 LAPACK_MODULE_NAME = ['OpenBLAS']
46 LAPACK_IS_BLAS = True
47 LAPACK_FAMILY = TC_CONSTANT_OPENBLAS
48
[end of easybuild/toolchains/linalg/openblas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/toolchains/linalg/openblas.py b/easybuild/toolchains/linalg/openblas.py
--- a/easybuild/toolchains/linalg/openblas.py
+++ b/easybuild/toolchains/linalg/openblas.py
@@ -40,6 +40,7 @@
"""
BLAS_MODULE_NAME = ['OpenBLAS']
BLAS_LIB = ['openblas']
+ BLAS_LIB_MT = ['openblas']
BLAS_FAMILY = TC_CONSTANT_OPENBLAS
LAPACK_MODULE_NAME = ['OpenBLAS']
| {"golden_diff": "diff --git a/easybuild/toolchains/linalg/openblas.py b/easybuild/toolchains/linalg/openblas.py\n--- a/easybuild/toolchains/linalg/openblas.py\n+++ b/easybuild/toolchains/linalg/openblas.py\n@@ -40,6 +40,7 @@\n \"\"\"\n BLAS_MODULE_NAME = ['OpenBLAS']\n BLAS_LIB = ['openblas']\n+ BLAS_LIB_MT = ['openblas']\n BLAS_FAMILY = TC_CONSTANT_OPENBLAS\n \n LAPACK_MODULE_NAME = ['OpenBLAS']\n", "issue": "-lpthtread is missing from $LIBBLAS_MT\nwhen building with the `foss` toolchain, I noticed that `$LIBBLAS_MT` is defined identical to `$LIBBLAS` as `-lopenblas -lgfortran`\r\n\r\nwe should make sure `-lpthread` is included as well.\r\n\r\n\n", "before_files": [{"content": "##\n# Copyright 2013-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for OpenBLAS as toolchain linear algebra library.\n\n:author: Kenneth Hoste (Ghent University)\n\"\"\"\n\nfrom easybuild.tools.toolchain.linalg import LinAlg\n\n\nTC_CONSTANT_OPENBLAS = 'OpenBLAS'\n\n\nclass OpenBLAS(LinAlg):\n \"\"\"\n Trivial class, provides OpenBLAS support.\n \"\"\"\n BLAS_MODULE_NAME = ['OpenBLAS']\n BLAS_LIB = ['openblas']\n BLAS_FAMILY = TC_CONSTANT_OPENBLAS\n\n LAPACK_MODULE_NAME = ['OpenBLAS']\n LAPACK_IS_BLAS = True\n LAPACK_FAMILY = TC_CONSTANT_OPENBLAS\n", "path": "easybuild/toolchains/linalg/openblas.py"}]} | 1,106 | 120 |
gh_patches_debug_25082 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAINT: Set only positional parameter for `(geo)accessor`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [ ] closes #xxxx
- [x] whatsnew entry
The first parameter of `(geo)accessor` methods must be set as only positional parameter.
</issue>
<code>
[start of dtoolkit/accessor/dataframe/repeat.py]
1 from __future__ import annotations
2
3 import numpy as np
4 import pandas as pd
5
6 from dtoolkit._typing import Axis
7 from dtoolkit.accessor.register import register_dataframe_method
8
9
10 @register_dataframe_method
11 def repeat(
12 df: pd.DataFrame,
13 repeat: int | list[int],
14 /,
15 axis: Axis = 0,
16 ) -> pd.DataFrame:
17 """
18 Repeat row or column of a :obj:`~pandas.DataFrame`.
19
20 Returns a new DataFrame where each row/column is repeated
21 consecutively a given number of times.
22
23 A sugary syntax wraps :meth:`numpy.repeat`.
24
25 Parameters
26 ----------
27 repeat : int or array of ints
28 The number of repetitions for each element. This should be a
29 non-negative integer. Repeating 0 times will return an empty
30 :obj:`~pandas.DataFrame`.
31
32 axis : {0 or 'index', 1 or 'columns'}, default 0
33 The axis along which to repeat.
34
35 * 0, or 'index' : Along the row to repeat.
36 * 1, or 'columns' : Along the column to repeat.
37
38 Returns
39 -------
40 DataFrame
41 Newly created DataFrame with repeated elements.
42
43 See Also
44 --------
45 numpy.repeat : This transformer's prototype method.
46
47 Examples
48 --------
49 >>> import pandas as pd
50 >>> import dtoolkit.accessor
51 >>> df = pd.DataFrame({'a': [1, 2], 'b':[3, 4]})
52 >>> df
53 a b
54 0 1 3
55 1 2 4
56
57 Each row repeat two times.
58
59 >>> df.repeat(2)
60 a b
61 0 1 3
62 0 1 3
63 1 2 4
64 1 2 4
65
66 Each column repeat two times.
67
68 >>> df.repeat(2, 1)
69 a a b b
70 0 1 1 3 3
71 1 2 2 4 4
72
73 ``a`` column repeat 1 times, ``b`` column repeat 2 times.
74
75 >>> df.repeat([1, 2], 1)
76 a b b
77 0 1 3 3
78 1 2 4 4
79 """
80
81 axis = df._get_axis_number(axis)
82 return pd.DataFrame(
83 np.repeat(
84 df._values,
85 repeat,
86 axis=axis,
87 ),
88 index=df.index.repeat(repeat) if axis == 0 else df.index,
89 columns=df.columns.repeat(repeat) if axis == 1 else df.columns,
90 )
91
[end of dtoolkit/accessor/dataframe/repeat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dtoolkit/accessor/dataframe/repeat.py b/dtoolkit/accessor/dataframe/repeat.py
--- a/dtoolkit/accessor/dataframe/repeat.py
+++ b/dtoolkit/accessor/dataframe/repeat.py
@@ -10,7 +10,7 @@
@register_dataframe_method
def repeat(
df: pd.DataFrame,
- repeat: int | list[int],
+ repeats: int | list[int],
/,
axis: Axis = 0,
) -> pd.DataFrame:
@@ -24,7 +24,7 @@
Parameters
----------
- repeat : int or array of ints
+ repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
:obj:`~pandas.DataFrame`.
@@ -78,13 +78,12 @@
1 2 4 4
"""
- axis = df._get_axis_number(axis)
return pd.DataFrame(
np.repeat(
df._values,
- repeat,
- axis=axis,
+ repeats,
+ axis=df._get_axis_number(axis),
),
- index=df.index.repeat(repeat) if axis == 0 else df.index,
- columns=df.columns.repeat(repeat) if axis == 1 else df.columns,
+ index=df.index.repeat(repeats) if axis == 0 else df.index,
+ columns=df.columns.repeat(repeats) if axis == 1 else df.columns,
)
| {"golden_diff": "diff --git a/dtoolkit/accessor/dataframe/repeat.py b/dtoolkit/accessor/dataframe/repeat.py\n--- a/dtoolkit/accessor/dataframe/repeat.py\n+++ b/dtoolkit/accessor/dataframe/repeat.py\n@@ -10,7 +10,7 @@\n @register_dataframe_method\n def repeat(\n df: pd.DataFrame,\n- repeat: int | list[int],\n+ repeats: int | list[int],\n /,\n axis: Axis = 0,\n ) -> pd.DataFrame:\n@@ -24,7 +24,7 @@\n \n Parameters\n ----------\n- repeat : int or array of ints\n+ repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n :obj:`~pandas.DataFrame`.\n@@ -78,13 +78,12 @@\n 1 2 4 4\n \"\"\"\n \n- axis = df._get_axis_number(axis)\n return pd.DataFrame(\n np.repeat(\n df._values,\n- repeat,\n- axis=axis,\n+ repeats,\n+ axis=df._get_axis_number(axis),\n ),\n- index=df.index.repeat(repeat) if axis == 0 else df.index,\n- columns=df.columns.repeat(repeat) if axis == 1 else df.columns,\n+ index=df.index.repeat(repeats) if axis == 0 else df.index,\n+ columns=df.columns.repeat(repeats) if axis == 1 else df.columns,\n )\n", "issue": "MAINT: Set only positional parameter for `(geo)accessor`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nThe first parameter of `(geo)accessor` methods must be set as only positional parameter.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport numpy as np\nimport pandas as pd\n\nfrom dtoolkit._typing import Axis\nfrom dtoolkit.accessor.register import register_dataframe_method\n\n\n@register_dataframe_method\ndef repeat(\n df: pd.DataFrame,\n repeat: int | list[int],\n /,\n axis: Axis = 0,\n) -> pd.DataFrame:\n \"\"\"\n Repeat row or column of a :obj:`~pandas.DataFrame`.\n\n Returns a new DataFrame where each row/column is repeated\n consecutively a given number of times.\n\n A sugary syntax wraps :meth:`numpy.repeat`.\n\n Parameters\n ----------\n repeat : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n :obj:`~pandas.DataFrame`.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to repeat.\n\n * 0, or 'index' : Along the row to repeat.\n * 1, or 'columns' : Along the column to repeat.\n\n Returns\n -------\n DataFrame\n Newly created DataFrame with repeated elements.\n\n See Also\n --------\n numpy.repeat : This transformer's prototype method.\n\n Examples\n --------\n >>> import pandas as pd\n >>> import dtoolkit.accessor\n >>> df = pd.DataFrame({'a': [1, 2], 'b':[3, 4]})\n >>> df\n a b\n 0 1 3\n 1 2 4\n\n Each row repeat two times.\n\n >>> df.repeat(2)\n a b\n 0 1 3\n 0 1 3\n 1 2 4\n 1 2 4\n\n Each column repeat two times.\n\n >>> df.repeat(2, 1)\n a a b b\n 0 1 1 3 3\n 1 2 2 4 4\n\n ``a`` column repeat 1 times, ``b`` column repeat 2 times.\n\n >>> df.repeat([1, 2], 1)\n a b b\n 0 1 3 3\n 1 2 4 4\n \"\"\"\n\n axis = df._get_axis_number(axis)\n return pd.DataFrame(\n np.repeat(\n df._values,\n repeat,\n axis=axis,\n ),\n index=df.index.repeat(repeat) if axis == 0 else df.index,\n columns=df.columns.repeat(repeat) if axis == 1 else df.columns,\n )\n", "path": "dtoolkit/accessor/dataframe/repeat.py"}]} | 1,545 | 343 |
gh_patches_debug_4871 | rasdani/github-patches | git_diff | ansible__molecule-3521 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing `f` prefix on f-strings
Some strings looks like they're meant to be f-strings but are missing the `f` prefix meaning variable interpolation won't happen.
https://github.com/ansible-community/molecule/blob/19381a8a564f8013453b8dfb08b677fd81c2e358/src/molecule/scenarios.py#L85
I found this issue automatically. I'm a bot. Beep Boop 🦊. See other issues I found in your repo [here](https://codereview.doctor/ansible-community/molecule)
</issue>
<code>
[start of src/molecule/scenarios.py]
1 # Copyright (c) 2015-2018 Cisco Systems, Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to
5 # deal in the Software without restriction, including without limitation the
6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20 """Scenarios Module."""
21 import logging
22 from typing import List
23
24 from molecule import util
25
26 LOG = logging.getLogger(__name__)
27
28
29 class Scenarios(object):
30 """The Scenarios groups one or more scenario objects Molecule will execute."""
31
32 def __init__(self, configs, scenario_name=None):
33 """
34 Initialize a new scenarios class and returns None.
35
36 :param configs: A list containing Molecule config instances.
37 :param scenario_name: A string containing the name of the scenario.
38 :return: None
39 """
40 self._configs = configs
41 self._scenario_name = scenario_name
42 self._scenarios = self.all
43
44 def next(self):
45 if not self._scenarios:
46 raise StopIteration
47 return self._scenarios.pop(0)
48
49 def __iter__(self):
50 """Make object iterable."""
51 return self
52
53 __next__ = next # Python 3.X compatibility
54
55 @property
56 def all(self):
57 """
58 Return a list containing all scenario objects.
59
60 :return: list
61 """
62 if self._scenario_name:
63 scenarios = self._filter_for_scenario()
64 self._verify()
65
66 return scenarios
67
68 scenarios = [c.scenario for c in self._configs]
69 scenarios.sort(key=lambda x: x.directory)
70 return scenarios
71
72 def print_matrix(self):
73 msg = "Test matrix"
74 LOG.info(msg)
75
76 tree = {}
77 for scenario in self.all:
78 tree[scenario.name] = [action for action in scenario.sequence]
79 util.print_as_yaml(tree)
80
81 def sequence(self, scenario_name: str) -> List[str]:
82 for scenario in self.all:
83 if scenario.name == scenario_name:
84 return [action for action in scenario.sequence]
85 raise RuntimeError("Unable to find sequence for {scenario_name} scenario.")
86
87 def _verify(self):
88 """
89 Verify the specified scenario was found and returns None.
90
91 :return: None
92 """
93 scenario_names = [c.scenario.name for c in self._configs]
94 if self._scenario_name not in scenario_names:
95 msg = f"Scenario '{self._scenario_name}' not found. Exiting."
96 util.sysexit_with_message(msg)
97
98 def _filter_for_scenario(self):
99 """
100 Find the scenario matching the provided scenario name and returns a \
101 list.
102
103 :return: list
104 """
105 return [
106 c.scenario for c in self._configs if c.scenario.name == self._scenario_name
107 ]
108
109 def _get_matrix(self):
110 """
111 Build a matrix of scenarios with sequence to include and returns a \
112 dict.
113
114 {
115 scenario_1: {
116 'subcommand': [
117 'action-1',
118 'action-2',
119 ],
120 },
121 scenario_2: {
122 'subcommand': [
123 'action-1',
124 ],
125 },
126 }
127
128 :returns: dict
129 """
130 return dict(
131 {
132 scenario.name: {
133 "check": scenario.check_sequence,
134 "cleanup": scenario.cleanup_sequence,
135 "converge": scenario.converge_sequence,
136 "create": scenario.create_sequence,
137 "dependency": scenario.dependency_sequence,
138 "destroy": scenario.destroy_sequence,
139 "idempotence": scenario.idempotence_sequence,
140 "lint": scenario.lint_sequence,
141 "prepare": scenario.prepare_sequence,
142 "side_effect": scenario.side_effect_sequence,
143 "syntax": scenario.syntax_sequence,
144 "test": scenario.test_sequence,
145 "verify": scenario.verify_sequence,
146 }
147 for scenario in self.all
148 }
149 )
150
[end of src/molecule/scenarios.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/molecule/scenarios.py b/src/molecule/scenarios.py
--- a/src/molecule/scenarios.py
+++ b/src/molecule/scenarios.py
@@ -82,7 +82,7 @@
for scenario in self.all:
if scenario.name == scenario_name:
return [action for action in scenario.sequence]
- raise RuntimeError("Unable to find sequence for {scenario_name} scenario.")
+ raise RuntimeError(f"Unable to find sequence for {scenario_name} scenario.")
def _verify(self):
"""
| {"golden_diff": "diff --git a/src/molecule/scenarios.py b/src/molecule/scenarios.py\n--- a/src/molecule/scenarios.py\n+++ b/src/molecule/scenarios.py\n@@ -82,7 +82,7 @@\n for scenario in self.all:\n if scenario.name == scenario_name:\n return [action for action in scenario.sequence]\n- raise RuntimeError(\"Unable to find sequence for {scenario_name} scenario.\")\n+ raise RuntimeError(f\"Unable to find sequence for {scenario_name} scenario.\")\n \n def _verify(self):\n \"\"\"\n", "issue": "Missing `f` prefix on f-strings\nSome strings looks like they're meant to be f-strings but are missing the `f` prefix meaning variable interpolation won't happen.\n\nhttps://github.com/ansible-community/molecule/blob/19381a8a564f8013453b8dfb08b677fd81c2e358/src/molecule/scenarios.py#L85\n\nI found this issue automatically. I'm a bot. Beep Boop \ud83e\udd8a. See other issues I found in your repo [here](https://codereview.doctor/ansible-community/molecule)\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Scenarios Module.\"\"\"\nimport logging\nfrom typing import List\n\nfrom molecule import util\n\nLOG = logging.getLogger(__name__)\n\n\nclass Scenarios(object):\n \"\"\"The Scenarios groups one or more scenario objects Molecule will execute.\"\"\"\n\n def __init__(self, configs, scenario_name=None):\n \"\"\"\n Initialize a new scenarios class and returns None.\n\n :param configs: A list containing Molecule config instances.\n :param scenario_name: A string containing the name of the scenario.\n :return: None\n \"\"\"\n self._configs = configs\n self._scenario_name = scenario_name\n self._scenarios = self.all\n\n def next(self):\n if not self._scenarios:\n raise StopIteration\n return self._scenarios.pop(0)\n\n def __iter__(self):\n \"\"\"Make object iterable.\"\"\"\n return self\n\n __next__ = next # Python 3.X compatibility\n\n @property\n def all(self):\n \"\"\"\n Return a list containing all scenario objects.\n\n :return: list\n \"\"\"\n if self._scenario_name:\n scenarios = self._filter_for_scenario()\n self._verify()\n\n return scenarios\n\n scenarios = [c.scenario for c in self._configs]\n scenarios.sort(key=lambda x: x.directory)\n return scenarios\n\n def print_matrix(self):\n msg = \"Test matrix\"\n LOG.info(msg)\n\n tree = {}\n for scenario in self.all:\n tree[scenario.name] = [action for action in scenario.sequence]\n util.print_as_yaml(tree)\n\n def sequence(self, scenario_name: str) -> List[str]:\n for scenario in self.all:\n if scenario.name == scenario_name:\n return [action for action in scenario.sequence]\n raise RuntimeError(\"Unable to find sequence for {scenario_name} scenario.\")\n\n def _verify(self):\n \"\"\"\n Verify the specified scenario was found and returns None.\n\n :return: None\n \"\"\"\n scenario_names = [c.scenario.name for c in self._configs]\n if self._scenario_name not in scenario_names:\n msg = f\"Scenario '{self._scenario_name}' not found. Exiting.\"\n util.sysexit_with_message(msg)\n\n def _filter_for_scenario(self):\n \"\"\"\n Find the scenario matching the provided scenario name and returns a \\\n list.\n\n :return: list\n \"\"\"\n return [\n c.scenario for c in self._configs if c.scenario.name == self._scenario_name\n ]\n\n def _get_matrix(self):\n \"\"\"\n Build a matrix of scenarios with sequence to include and returns a \\\n dict.\n\n {\n scenario_1: {\n 'subcommand': [\n 'action-1',\n 'action-2',\n ],\n },\n scenario_2: {\n 'subcommand': [\n 'action-1',\n ],\n },\n }\n\n :returns: dict\n \"\"\"\n return dict(\n {\n scenario.name: {\n \"check\": scenario.check_sequence,\n \"cleanup\": scenario.cleanup_sequence,\n \"converge\": scenario.converge_sequence,\n \"create\": scenario.create_sequence,\n \"dependency\": scenario.dependency_sequence,\n \"destroy\": scenario.destroy_sequence,\n \"idempotence\": scenario.idempotence_sequence,\n \"lint\": scenario.lint_sequence,\n \"prepare\": scenario.prepare_sequence,\n \"side_effect\": scenario.side_effect_sequence,\n \"syntax\": scenario.syntax_sequence,\n \"test\": scenario.test_sequence,\n \"verify\": scenario.verify_sequence,\n }\n for scenario in self.all\n }\n )\n", "path": "src/molecule/scenarios.py"}]} | 2,046 | 116 |
gh_patches_debug_2101 | rasdani/github-patches | git_diff | carpentries__amy-2028 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update colors of progress states
The current color scheme for our progress states makes it hard to know when something needs further attention and when it does not. Previously we had three colors: Red-Failed, Yellow-Not evaluated yet and Green-Passed.
The new 'Asked to Repeat' progress state option is also yellow and so it conflicts with the 'Not Evaluated Yet' option.
**Please update the color for 'Asked to Repeat' to blue.. Any shade will do.**
Progress state colors can be viewed on the [More>Trainees page ](https://amy.carpentries.org/trainings/trainees/)
</issue>
<code>
[start of amy/workshops/templatetags/training_progress.py]
1 from django import template
2 from django.template.defaultfilters import escape
3 from django.utils.safestring import mark_safe
4
5 from workshops.models import TrainingProgress
6
7 register = template.Library()
8
9
10 @register.simple_tag
11 def progress_label(progress):
12 assert isinstance(progress, TrainingProgress)
13
14 if progress.discarded:
15 additional_label = "dark"
16
17 else:
18 switch = {
19 "n": "warning",
20 "f": "danger",
21 "a": "warning",
22 "p": "success",
23 }
24 additional_label = switch[progress.state]
25
26 fmt = "badge badge-{}".format(additional_label)
27 return mark_safe(fmt)
28
29
30 @register.simple_tag
31 def progress_description(progress):
32 assert isinstance(progress, TrainingProgress)
33
34 text = "{discarded}{state} {type}<br />{evaluated_by}<br />on {day}.{notes}".format(
35 discarded="discarded " if progress.discarded else "",
36 state=progress.get_state_display(),
37 type=progress.requirement,
38 evaluated_by=(
39 "evaluated by {}".format(progress.evaluated_by.full_name)
40 if progress.evaluated_by is not None
41 else "submitted"
42 ),
43 day=progress.created_at.strftime("%A %d %B %Y at %H:%M"),
44 notes="<br />Notes: {}".format(escape(progress.notes))
45 if progress.notes
46 else "",
47 )
48 text = text[0].upper() + text[1:]
49 return mark_safe(text)
50
[end of amy/workshops/templatetags/training_progress.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/workshops/templatetags/training_progress.py b/amy/workshops/templatetags/training_progress.py
--- a/amy/workshops/templatetags/training_progress.py
+++ b/amy/workshops/templatetags/training_progress.py
@@ -18,7 +18,7 @@
switch = {
"n": "warning",
"f": "danger",
- "a": "warning",
+ "a": "info",
"p": "success",
}
additional_label = switch[progress.state]
| {"golden_diff": "diff --git a/amy/workshops/templatetags/training_progress.py b/amy/workshops/templatetags/training_progress.py\n--- a/amy/workshops/templatetags/training_progress.py\n+++ b/amy/workshops/templatetags/training_progress.py\n@@ -18,7 +18,7 @@\n switch = {\n \"n\": \"warning\",\n \"f\": \"danger\",\n- \"a\": \"warning\",\n+ \"a\": \"info\",\n \"p\": \"success\",\n }\n additional_label = switch[progress.state]\n", "issue": "Update colors of progress states\nThe current color scheme for our progress states makes it hard to know when something needs further attention and when it does not. Previously we had three colors: Red-Failed, Yellow-Not evaluated yet and Green-Passed. \r\n\r\nThe new 'Asked to Repeat' progress state option is also yellow and so it conflicts with the 'Not Evaluated Yet' option.\r\n\r\n**Please update the color for 'Asked to Repeat' to blue.. Any shade will do.**\r\n\r\nProgress state colors can be viewed on the [More>Trainees page ](https://amy.carpentries.org/trainings/trainees/)\n", "before_files": [{"content": "from django import template\nfrom django.template.defaultfilters import escape\nfrom django.utils.safestring import mark_safe\n\nfrom workshops.models import TrainingProgress\n\nregister = template.Library()\n\n\[email protected]_tag\ndef progress_label(progress):\n assert isinstance(progress, TrainingProgress)\n\n if progress.discarded:\n additional_label = \"dark\"\n\n else:\n switch = {\n \"n\": \"warning\",\n \"f\": \"danger\",\n \"a\": \"warning\",\n \"p\": \"success\",\n }\n additional_label = switch[progress.state]\n\n fmt = \"badge badge-{}\".format(additional_label)\n return mark_safe(fmt)\n\n\[email protected]_tag\ndef progress_description(progress):\n assert isinstance(progress, TrainingProgress)\n\n text = \"{discarded}{state} {type}<br />{evaluated_by}<br />on {day}.{notes}\".format(\n discarded=\"discarded \" if progress.discarded else \"\",\n state=progress.get_state_display(),\n type=progress.requirement,\n evaluated_by=(\n \"evaluated by {}\".format(progress.evaluated_by.full_name)\n if progress.evaluated_by is not None\n else \"submitted\"\n ),\n day=progress.created_at.strftime(\"%A %d %B %Y at %H:%M\"),\n notes=\"<br />Notes: {}\".format(escape(progress.notes))\n if progress.notes\n else \"\",\n )\n text = text[0].upper() + text[1:]\n return mark_safe(text)\n", "path": "amy/workshops/templatetags/training_progress.py"}]} | 1,085 | 129 |
gh_patches_debug_11216 | rasdani/github-patches | git_diff | OCA__server-tools-75 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[7.0] [base_optional_quick_create] AttributeError: 'NoneType' object has no attribute 'name_create'
Error at starting before a migration if a model has been removed
</issue>
<code>
[start of base_optional_quick_create/model.py]
1 # -*- coding: utf-8 -*-
2 ##############################################################################
3 #
4 # Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as published
8 # by the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 ##############################################################################
20
21 from openerp.osv import orm, fields
22 from openerp import SUPERUSER_ID
23 from openerp.tools.translate import _
24
25
26 class ir_model(orm.Model):
27 _inherit = 'ir.model'
28
29 _columns = {
30 'avoid_quick_create': fields.boolean('Avoid quick create'),
31 }
32
33 def _wrap_name_create(self, old_create, model):
34 def wrapper(cr, uid, name, context=None):
35 raise orm.except_orm(
36 _('Error'),
37 _("Can't create quickly. Opening create form"))
38 return wrapper
39
40 def _register_hook(self, cr, ids=None):
41 if ids is None:
42 ids = self.search(cr, SUPERUSER_ID, [])
43 for model in self.browse(cr, SUPERUSER_ID, ids):
44 if model.avoid_quick_create:
45 model_name = model.model
46 model_obj = self.pool.get(model_name)
47 if not hasattr(model_obj, 'check_quick_create'):
48 model_obj.name_create = self._wrap_name_create(
49 model_obj.name_create, model_name)
50 model_obj.check_quick_create = True
51 return True
52
53 def create(self, cr, uid, vals, context=None):
54 res_id = super(ir_model, self).create(cr, uid, vals, context=context)
55 self._register_hook(cr, [res_id])
56 return res_id
57
58 def write(self, cr, uid, ids, vals, context=None):
59 if isinstance(ids, (int, long)):
60 ids = [ids]
61 res = super(ir_model, self).write(cr, uid, ids, vals, context=context)
62 self._register_hook(cr, ids)
63 return res
64
[end of base_optional_quick_create/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/base_optional_quick_create/model.py b/base_optional_quick_create/model.py
--- a/base_optional_quick_create/model.py
+++ b/base_optional_quick_create/model.py
@@ -44,7 +44,7 @@
if model.avoid_quick_create:
model_name = model.model
model_obj = self.pool.get(model_name)
- if not hasattr(model_obj, 'check_quick_create'):
+ if model_obj and not hasattr(model_obj, 'check_quick_create'):
model_obj.name_create = self._wrap_name_create(
model_obj.name_create, model_name)
model_obj.check_quick_create = True
| {"golden_diff": "diff --git a/base_optional_quick_create/model.py b/base_optional_quick_create/model.py\n--- a/base_optional_quick_create/model.py\n+++ b/base_optional_quick_create/model.py\n@@ -44,7 +44,7 @@\n if model.avoid_quick_create:\n model_name = model.model\n model_obj = self.pool.get(model_name)\n- if not hasattr(model_obj, 'check_quick_create'):\n+ if model_obj and not hasattr(model_obj, 'check_quick_create'):\n model_obj.name_create = self._wrap_name_create(\n model_obj.name_create, model_name)\n model_obj.check_quick_create = True\n", "issue": "[7.0] [base_optional_quick_create] AttributeError: 'NoneType' object has no attribute 'name_create'\nError at starting before a migration if a model has been removed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\nfrom openerp.osv import orm, fields\nfrom openerp import SUPERUSER_ID\nfrom openerp.tools.translate import _\n\n\nclass ir_model(orm.Model):\n _inherit = 'ir.model'\n\n _columns = {\n 'avoid_quick_create': fields.boolean('Avoid quick create'),\n }\n\n def _wrap_name_create(self, old_create, model):\n def wrapper(cr, uid, name, context=None):\n raise orm.except_orm(\n _('Error'),\n _(\"Can't create quickly. Opening create form\"))\n return wrapper\n\n def _register_hook(self, cr, ids=None):\n if ids is None:\n ids = self.search(cr, SUPERUSER_ID, [])\n for model in self.browse(cr, SUPERUSER_ID, ids):\n if model.avoid_quick_create:\n model_name = model.model\n model_obj = self.pool.get(model_name)\n if not hasattr(model_obj, 'check_quick_create'):\n model_obj.name_create = self._wrap_name_create(\n model_obj.name_create, model_name)\n model_obj.check_quick_create = True\n return True\n\n def create(self, cr, uid, vals, context=None):\n res_id = super(ir_model, self).create(cr, uid, vals, context=context)\n self._register_hook(cr, [res_id])\n return res_id\n\n def write(self, cr, uid, ids, vals, context=None):\n if isinstance(ids, (int, long)):\n ids = [ids]\n res = super(ir_model, self).write(cr, uid, ids, vals, context=context)\n self._register_hook(cr, ids)\n return res\n", "path": "base_optional_quick_create/model.py"}]} | 1,240 | 133 |
gh_patches_debug_33358 | rasdani/github-patches | git_diff | vaexio__vaex-1299 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Displaying full integers instead of scientific notiation when printing out Vaex HDF5 data
Hi,
Can you help me:
my code:
`myfile = vaex.open('myfile.hdf5')`
`myfile['customer_id']`
output:
`Length: 4,259,376 dtype: int64 (column)`
`0 9.4618e+08`
` 1 9.43324e+08`
` 2 9.43325e+08`
` 3 9.43333e+08`
` 4 9.43333e+08`
...
How can get output with full integer instead of scientific notation? Thank you.
</issue>
<code>
[start of packages/vaex-core/vaex/formatting.py]
1 import numpy as np
2 import numbers
3 import six
4 import datetime
5 import pyarrow as pa
6
7
8 MAX_LENGTH = 50
9
10
11 def _format_value(value):
12 if isinstance(value, six.string_types):
13 value = str(value)
14 elif isinstance(value, pa.lib.Scalar):
15 value = value.as_py()
16 if value is None:
17 value = '--'
18 else:
19 value = repr(value)
20 elif isinstance(value, bytes):
21 value = repr(value)
22 elif isinstance(value, np.ma.core.MaskedConstant):
23 value = str(value)
24 if isinstance(value, np.datetime64):
25 if np.isnat(value):
26 value = 'NaT'
27 else:
28 value = ' '.join(str(value).split('T'))
29 if isinstance(value, np.timedelta64):
30 if np.isnat(value):
31 value = 'NaT'
32 else:
33 tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))
34 ms = tmp.microseconds
35 s = np.mod(tmp.seconds, 60)
36 m = np.mod(tmp.seconds//60, 60)
37 h = tmp.seconds // 3600
38 d = tmp.days
39 if ms:
40 value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))
41 else:
42 value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))
43 elif not isinstance(value, numbers.Number):
44 value = str(value)
45 if isinstance(value, float):
46 value = repr(value)
47 if isinstance(value, (str, bytes)):
48 if len(value) > MAX_LENGTH:
49 value = repr(value[:MAX_LENGTH-3])[:-1] + '...'
50 return value
51
[end of packages/vaex-core/vaex/formatting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py
--- a/packages/vaex-core/vaex/formatting.py
+++ b/packages/vaex-core/vaex/formatting.py
@@ -8,25 +8,32 @@
MAX_LENGTH = 50
+def _trim_string(value):
+ if len(value) > MAX_LENGTH:
+ value = repr(value[:MAX_LENGTH-3])[:-1] + '...'
+ return value
+
def _format_value(value):
- if isinstance(value, six.string_types):
- value = str(value)
- elif isinstance(value, pa.lib.Scalar):
+ # print("value = ", value, type(value), isinstance(value, numbers.Number))
+ if isinstance(value, pa.lib.Scalar):
value = value.as_py()
if value is None:
- value = '--'
+ return '--'
else:
- value = repr(value)
+ return _trim_string(str(value))
+ if isinstance(value, str):
+ return _trim_string(str(value))
elif isinstance(value, bytes):
- value = repr(value)
+ value = _trim_string(repr(value))
elif isinstance(value, np.ma.core.MaskedConstant):
- value = str(value)
- if isinstance(value, np.datetime64):
+ return str(value)
+ elif isinstance(value, np.datetime64):
if np.isnat(value):
value = 'NaT'
else:
value = ' '.join(str(value).split('T'))
- if isinstance(value, np.timedelta64):
+ return value
+ elif isinstance(value, np.timedelta64):
if np.isnat(value):
value = 'NaT'
else:
@@ -40,11 +47,10 @@
value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))
else:
value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))
- elif not isinstance(value, numbers.Number):
+ return value
+ elif isinstance(value, numbers.Number):
value = str(value)
- if isinstance(value, float):
+ else:
value = repr(value)
- if isinstance(value, (str, bytes)):
- if len(value) > MAX_LENGTH:
- value = repr(value[:MAX_LENGTH-3])[:-1] + '...'
+ value = _trim_string(value)
return value
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py\n--- a/packages/vaex-core/vaex/formatting.py\n+++ b/packages/vaex-core/vaex/formatting.py\n@@ -8,25 +8,32 @@\n MAX_LENGTH = 50\n \n \n+def _trim_string(value):\n+ if len(value) > MAX_LENGTH:\n+ value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n+ return value\n+\n def _format_value(value):\n- if isinstance(value, six.string_types):\n- value = str(value)\n- elif isinstance(value, pa.lib.Scalar):\n+ # print(\"value = \", value, type(value), isinstance(value, numbers.Number))\n+ if isinstance(value, pa.lib.Scalar):\n value = value.as_py()\n if value is None:\n- value = '--'\n+ return '--'\n else:\n- value = repr(value)\n+ return _trim_string(str(value))\n+ if isinstance(value, str):\n+ return _trim_string(str(value))\n elif isinstance(value, bytes):\n- value = repr(value)\n+ value = _trim_string(repr(value))\n elif isinstance(value, np.ma.core.MaskedConstant):\n- value = str(value)\n- if isinstance(value, np.datetime64):\n+ return str(value)\n+ elif isinstance(value, np.datetime64):\n if np.isnat(value):\n value = 'NaT'\n else:\n value = ' '.join(str(value).split('T'))\n- if isinstance(value, np.timedelta64):\n+ return value\n+ elif isinstance(value, np.timedelta64):\n if np.isnat(value):\n value = 'NaT'\n else:\n@@ -40,11 +47,10 @@\n value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n else:\n value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n- elif not isinstance(value, numbers.Number):\n+ return value\n+ elif isinstance(value, numbers.Number):\n value = str(value)\n- if isinstance(value, float):\n+ else:\n value = repr(value)\n- if isinstance(value, (str, bytes)):\n- if len(value) > MAX_LENGTH:\n- value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n+ value = _trim_string(value)\n return value\n", "issue": "Displaying full integers instead of scientific notiation when printing out Vaex HDF5 data\nHi,\r\nCan you help me:\r\nmy code:\r\n`myfile = vaex.open('myfile.hdf5')`\r\n`myfile['customer_id']`\r\n\r\noutput:\r\n`Length: 4,259,376 dtype: int64 (column)`\r\n`0 9.4618e+08`\r\n` 1 9.43324e+08`\r\n` 2 9.43325e+08`\r\n` 3 9.43333e+08`\r\n` 4 9.43333e+08`\r\n ...\r\n\r\nHow can get output with full integer instead of scientific notation? Thank you.\r\n\n", "before_files": [{"content": "import numpy as np\nimport numbers\nimport six\nimport datetime\nimport pyarrow as pa\n\n\nMAX_LENGTH = 50\n\n\ndef _format_value(value):\n if isinstance(value, six.string_types):\n value = str(value)\n elif isinstance(value, pa.lib.Scalar):\n value = value.as_py()\n if value is None:\n value = '--'\n else:\n value = repr(value)\n elif isinstance(value, bytes):\n value = repr(value)\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n if np.isnat(value):\n value = 'NaT'\n else:\n value = ' '.join(str(value).split('T'))\n if isinstance(value, np.timedelta64):\n if np.isnat(value):\n value = 'NaT'\n else:\n tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))\n ms = tmp.microseconds\n s = np.mod(tmp.seconds, 60)\n m = np.mod(tmp.seconds//60, 60)\n h = tmp.seconds // 3600\n d = tmp.days\n if ms:\n value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n else:\n value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n value = repr(value)\n if isinstance(value, (str, bytes)):\n if len(value) > MAX_LENGTH:\n value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n return value\n", "path": "packages/vaex-core/vaex/formatting.py"}]} | 1,209 | 563 |
gh_patches_debug_26582 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1741 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prevent automatic publishing of projects
## Test plan
GIVEN a project that is going to be published
WHEN the project is published
THEN a notification mail will be sent to Kasper
## Issue description
Somehow there are projects (mainly from Commonsites and Akvo) that get published automatically, even though they shouldn't be. This should be prevented and fixed.
Note; I can't find the reason why this happens. I suspect the API, but then again, this only happens for Akvo and Commonsites projects. Therefore I'll monitor it for now.
</issue>
<code>
[start of akvo/rsr/models/publishing_status.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.core.exceptions import ValidationError
8 from django.db import models
9 from django.utils.translation import ugettext_lazy as _
10
11 from ..fields import ValidXMLCharField
12
13
14 class PublishingStatus(models.Model):
15 """Keep track of publishing status."""
16 STATUS_PUBLISHED = 'published'
17 STATUS_UNPUBLISHED = 'unpublished'
18 PUBLISHING_STATUS = (
19 (STATUS_UNPUBLISHED, _(u'Unpublished')),
20 (STATUS_PUBLISHED, _(u'Published')),
21 )
22
23 project = models.OneToOneField('Project',)
24 status = ValidXMLCharField(max_length=30,
25 choices=PUBLISHING_STATUS,
26 db_index=True, default=STATUS_UNPUBLISHED)
27
28 def clean(self):
29 """Projects can only be published, when several checks have been performed."""
30 if self.status == 'published':
31 validation_errors = []
32
33 if not self.project.title:
34 validation_errors.append(
35 ValidationError(_('Project needs to have a title.'),
36 code='title')
37 )
38
39 if not self.project.subtitle:
40 validation_errors.append(
41 ValidationError(_('Project needs to have a subtitle.'),
42 code='subtitle')
43 )
44
45 if not self.project.project_plan_summary:
46 validation_errors.append(
47 ValidationError(_('Project needs to have the project plan summary filled in.'),
48 code='summary')
49 )
50
51 if not self.project.goals_overview:
52 validation_errors.append(
53 ValidationError(_('Project needs to have the goals overview field filled in.'),
54 code='goals_overview')
55 )
56
57 if not self.project.date_start_planned:
58 validation_errors.append(
59 ValidationError(
60 _('Project needs to have the planned start date field filled in.'),
61 code='goals_overview')
62 )
63
64 if not self.project.partners:
65 validation_errors.append(
66 ValidationError(_('Project needs to have at least one valid partner.'),
67 code='partners')
68 )
69 elif not self.project.partnerships.filter(
70 partner_type__in=['field', 'funding', 'support']
71 ).exists():
72 validation_errors.append(
73 ValidationError(
74 _('Project needs to have at least one field, funding or support partner.'),
75 code='partners'
76 )
77 )
78 else:
79 for funding_partner in self.project.partnerships.filter(partner_type='funding'):
80 if not funding_partner.funding_amount:
81 validation_errors.append(
82 ValidationError(_('All funding partners should have a funding amount.'),
83 code='partners'
84 )
85 )
86 break
87
88 if not self.project.sync_owner:
89 validation_errors.append(
90 ValidationError(_('Project needs to have a reporting organisation.'),
91 code='reporting_org')
92 )
93
94 if not self.project.current_image:
95 validation_errors.append(
96 ValidationError(_('Project needs to have a photo.'),
97 code='current_image')
98 )
99
100 if not self.project.locations.all():
101 validation_errors.append(
102 ValidationError(_('Project needs to have at least one location.'),
103 code='location')
104 )
105 else:
106 for location in self.project.locations.all():
107 if not location.latitude or not location.longitude or not location.country:
108 validation_errors.append(
109 ValidationError(
110 _('All locations need to have a latitude, longitude and country '
111 'specified.'),
112 code='location')
113 )
114 break
115
116 if not self.project.budget_items.all():
117 validation_errors.append(
118 ValidationError(_('Project needs to have at least one budget item.'),
119 code='budget_item')
120 )
121 elif not self.project.budget_items.filter(amount__gt=0).exists():
122 validation_errors.append(
123 ValidationError(
124 _('Project needs to have at least one budget item with an amount.'),
125 code='budget_item'
126 )
127 )
128
129 if not self.project.sectors.all():
130 validation_errors.append(
131 ValidationError(_('Project needs to have at least one sector.'),
132 code='sector')
133 )
134 else:
135 for sector in self.project.sectors.all():
136 if not sector.sector_code:
137 validation_errors.append(
138 ValidationError(_('All sectors need to have a sector code.'),
139 code='sector')
140 )
141 break
142
143 if validation_errors:
144 raise ValidationError(validation_errors)
145
146 class Meta:
147 app_label = 'rsr'
148 verbose_name = _(u'publishing status')
149 verbose_name_plural = _(u'publishing statuses')
150 ordering = ('-status', 'project')
151
[end of akvo/rsr/models/publishing_status.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/models/publishing_status.py b/akvo/rsr/models/publishing_status.py
--- a/akvo/rsr/models/publishing_status.py
+++ b/akvo/rsr/models/publishing_status.py
@@ -4,8 +4,12 @@
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
+from django.conf import settings
from django.core.exceptions import ValidationError
+from django.core.mail import send_mail
from django.db import models
+from django.db.models.signals import post_save
+from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from ..fields import ValidXMLCharField
@@ -148,3 +152,15 @@
verbose_name = _(u'publishing status')
verbose_name_plural = _(u'publishing statuses')
ordering = ('-status', 'project')
+
+
+@receiver(post_save, sender=PublishingStatus)
+def update_denormalized_project(sender, **kwargs):
+ "Send notification that a project is published."
+ publishing_status = kwargs['instance']
+ if publishing_status.status == PublishingStatus.STATUS_PUBLISHED:
+ send_mail(
+ 'Project %s has been published' % str(publishing_status.project.pk),
+ '', getattr(settings, "DEFAULT_FROM_EMAIL", "[email protected]"),
+ getattr(settings, "NOTIFY_PUBLISH", ["[email protected]"])
+ )
| {"golden_diff": "diff --git a/akvo/rsr/models/publishing_status.py b/akvo/rsr/models/publishing_status.py\n--- a/akvo/rsr/models/publishing_status.py\n+++ b/akvo/rsr/models/publishing_status.py\n@@ -4,8 +4,12 @@\n # See more details in the license.txt file located at the root folder of the Akvo RSR module.\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n+from django.conf import settings\n from django.core.exceptions import ValidationError\n+from django.core.mail import send_mail\n from django.db import models\n+from django.db.models.signals import post_save\n+from django.dispatch import receiver\n from django.utils.translation import ugettext_lazy as _\n \n from ..fields import ValidXMLCharField\n@@ -148,3 +152,15 @@\n verbose_name = _(u'publishing status')\n verbose_name_plural = _(u'publishing statuses')\n ordering = ('-status', 'project')\n+\n+\n+@receiver(post_save, sender=PublishingStatus)\n+def update_denormalized_project(sender, **kwargs):\n+ \"Send notification that a project is published.\"\n+ publishing_status = kwargs['instance']\n+ if publishing_status.status == PublishingStatus.STATUS_PUBLISHED:\n+ send_mail(\n+ 'Project %s has been published' % str(publishing_status.project.pk),\n+ '', getattr(settings, \"DEFAULT_FROM_EMAIL\", \"[email protected]\"),\n+ getattr(settings, \"NOTIFY_PUBLISH\", [\"[email protected]\"])\n+ )\n", "issue": "Prevent automatic publishing of projects\n## Test plan\n\nGIVEN a project that is going to be published\nWHEN the project is published\nTHEN a notification mail will be sent to Kasper\n## Issue description\n\nSomehow there are projects (mainly from Commonsites and Akvo) that get published automatically, even though they shouldn't be. This should be prevented and fixed.\n\nNote; I can't find the reason why this happens. I suspect the API, but then again, this only happens for Akvo and Commonsites projects. Therefore I'll monitor it for now.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\n\nclass PublishingStatus(models.Model):\n \"\"\"Keep track of publishing status.\"\"\"\n STATUS_PUBLISHED = 'published'\n STATUS_UNPUBLISHED = 'unpublished'\n PUBLISHING_STATUS = (\n (STATUS_UNPUBLISHED, _(u'Unpublished')),\n (STATUS_PUBLISHED, _(u'Published')),\n )\n\n project = models.OneToOneField('Project',)\n status = ValidXMLCharField(max_length=30,\n choices=PUBLISHING_STATUS,\n db_index=True, default=STATUS_UNPUBLISHED)\n\n def clean(self):\n \"\"\"Projects can only be published, when several checks have been performed.\"\"\"\n if self.status == 'published':\n validation_errors = []\n\n if not self.project.title:\n validation_errors.append(\n ValidationError(_('Project needs to have a title.'),\n code='title')\n )\n\n if not self.project.subtitle:\n validation_errors.append(\n ValidationError(_('Project needs to have a subtitle.'),\n code='subtitle')\n )\n\n if not self.project.project_plan_summary:\n validation_errors.append(\n ValidationError(_('Project needs to have the project plan summary filled in.'),\n code='summary')\n )\n\n if not self.project.goals_overview:\n validation_errors.append(\n ValidationError(_('Project needs to have the goals overview field filled in.'),\n code='goals_overview')\n )\n\n if not self.project.date_start_planned:\n validation_errors.append(\n ValidationError(\n _('Project needs to have the planned start date field filled in.'),\n code='goals_overview')\n )\n\n if not self.project.partners:\n validation_errors.append(\n ValidationError(_('Project needs to have at least one valid partner.'),\n code='partners')\n )\n elif not self.project.partnerships.filter(\n partner_type__in=['field', 'funding', 'support']\n ).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one field, funding or support partner.'),\n code='partners'\n )\n )\n else:\n for funding_partner in self.project.partnerships.filter(partner_type='funding'):\n if not funding_partner.funding_amount:\n validation_errors.append(\n ValidationError(_('All funding partners should have a funding amount.'),\n code='partners'\n )\n )\n break\n\n if not self.project.sync_owner:\n validation_errors.append(\n ValidationError(_('Project needs to have a reporting organisation.'),\n code='reporting_org')\n )\n\n if not self.project.current_image:\n validation_errors.append(\n ValidationError(_('Project needs to have a photo.'),\n code='current_image')\n )\n\n if not self.project.locations.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one location.'),\n code='location')\n )\n else:\n for location in self.project.locations.all():\n if not location.latitude or not location.longitude or not location.country:\n validation_errors.append(\n ValidationError(\n _('All locations need to have a latitude, longitude and country '\n 'specified.'),\n code='location')\n )\n break\n\n if not self.project.budget_items.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one budget item.'),\n code='budget_item')\n )\n elif not self.project.budget_items.filter(amount__gt=0).exists():\n validation_errors.append(\n ValidationError(\n _('Project needs to have at least one budget item with an amount.'),\n code='budget_item'\n )\n )\n\n if not self.project.sectors.all():\n validation_errors.append(\n ValidationError(_('Project needs to have at least one sector.'),\n code='sector')\n )\n else:\n for sector in self.project.sectors.all():\n if not sector.sector_code:\n validation_errors.append(\n ValidationError(_('All sectors need to have a sector code.'),\n code='sector')\n )\n break\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'publishing status')\n verbose_name_plural = _(u'publishing statuses')\n ordering = ('-status', 'project')\n", "path": "akvo/rsr/models/publishing_status.py"}]} | 1,979 | 352 |
gh_patches_debug_4692 | rasdani/github-patches | git_diff | watchdogpolska__feder-322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
brak zapytań w zakładce SQL w Django debug toolbar

</issue>
<code>
[start of config/settings/local.py]
1 # -*- coding: utf-8 -*-
2 '''
3 Local settings
4
5 - Run in Debug mode
6 - Use console backend for emails
7 - Add Django Debug Toolbar
8 - Add django-extensions as app
9 '''
10
11 from .common import * # noqa
12
13 # DEBUG
14 # ------------------------------------------------------------------------------
15 DEBUG = env.bool('DJANGO_DEBUG', default=True)
16 TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
17
18 # SECRET CONFIGURATION
19 # ------------------------------------------------------------------------------
20 # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
21 # Note: This key only used for development and testing.
22 SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
23
24 # Mail settings
25 # ------------------------------------------------------------------------------
26 EMAIL_HOST = 'localhost'
27 EMAIL_PORT = 1025
28 EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
29 default='django.core.mail.backends.console.EmailBackend')
30 EMAIL_NOTIFICATION = '[email protected]'
31
32 # CACHING
33 # ------------------------------------------------------------------------------
34 CACHES = {
35 'default': {
36 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
37 'LOCATION': ''
38 }
39 }
40
41 # django-debug-toolbar
42 # ------------------------------------------------------------------------------
43 MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
44 INSTALLED_APPS += ('debug_toolbar', )
45
46 INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
47
48 DEBUG_TOOLBAR_CONFIG = {
49 'DISABLE_PANELS': [
50 'debug_toolbar.panels.redirects.RedirectsPanel',
51 'debug_toolbar.panels.redirects.RedirectsPanel',
52 ],
53 'SHOW_TEMPLATE_CONTEXT': True,
54 }
55
56 # django-extensions
57 # ------------------------------------------------------------------------------
58 INSTALLED_APPS += ('django_extensions', )
59
60 # TESTING
61 # ------------------------------------------------------------------------------
62 TEST_RUNNER = 'django.test.runner.DiscoverRunner'
63
64 # Your local stuff: Below this line define 3rd party library settings
65 # To get all sql queries sent by Django from py shell
66 EMAILLABS_APP_KEY = env('EMAILLABS_APP_KEY', default="Dummy")
67
68 EMAILLABS_SECRET_KEY = env('EMAILLABS_SECRET_KEY', default="Dummy")
69
[end of config/settings/local.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/config/settings/local.py b/config/settings/local.py
--- a/config/settings/local.py
+++ b/config/settings/local.py
@@ -40,7 +40,7 @@
# django-debug-toolbar
# ------------------------------------------------------------------------------
-MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
+# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
| {"golden_diff": "diff --git a/config/settings/local.py b/config/settings/local.py\n--- a/config/settings/local.py\n+++ b/config/settings/local.py\n@@ -40,7 +40,7 @@\n \n # django-debug-toolbar\n # ------------------------------------------------------------------------------\n-MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\n+# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\n INSTALLED_APPS += ('debug_toolbar', )\n \n INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)\n", "issue": "brak zapyta\u0144 w zak\u0142adce SQL w Django debug toolbar \n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n'''\nLocal settings\n\n- Run in Debug mode\n- Use console backend for emails\n- Add Django Debug Toolbar\n- Add django-extensions as app\n'''\n\nfrom .common import * # noqa\n\n# DEBUG\n# ------------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', default=True)\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Note: This key only used for development and testing.\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\", default='CHANGEME!!!')\n\n# Mail settings\n# ------------------------------------------------------------------------------\nEMAIL_HOST = 'localhost'\nEMAIL_PORT = 1025\nEMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',\n default='django.core.mail.backends.console.EmailBackend')\nEMAIL_NOTIFICATION = '[email protected]'\n\n# CACHING\n# ------------------------------------------------------------------------------\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': ''\n }\n}\n\n# django-debug-toolbar\n# ------------------------------------------------------------------------------\nMIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\nINSTALLED_APPS += ('debug_toolbar', )\n\nINTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)\n\nDEBUG_TOOLBAR_CONFIG = {\n 'DISABLE_PANELS': [\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n 'debug_toolbar.panels.redirects.RedirectsPanel',\n ],\n 'SHOW_TEMPLATE_CONTEXT': True,\n}\n\n# django-extensions\n# ------------------------------------------------------------------------------\nINSTALLED_APPS += ('django_extensions', )\n\n# TESTING\n# ------------------------------------------------------------------------------\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Your local stuff: Below this line define 3rd party library settings\n# To get all sql queries sent by Django from py shell\nEMAILLABS_APP_KEY = env('EMAILLABS_APP_KEY', default=\"Dummy\")\n\nEMAILLABS_SECRET_KEY = env('EMAILLABS_SECRET_KEY', default=\"Dummy\")\n", "path": "config/settings/local.py"}]} | 1,195 | 112 |
gh_patches_debug_1380 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-4633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dunelm spider output is missing 41 branches (dunelm_gb)
The Dunelm spider dunelm_gb is consistently returning 138 branches for the last few weeks. However, Dunelm's own online store-finder at https://www.dunelm.com/stores/a-z lists 179 branches. All of the 138 are included in the 179, meaning the spider is missing 41.
For example, the following branches appear on Dunelm's website, but aren't returned by the spider:
- https://www.dunelm.com/stores/altrincham
- https://www.dunelm.com/stores/basildon
- https://www.dunelm.com/stores/beckton
- https://www.dunelm.com/stores/beverley
I'm afraid I can't figure out how to manually replicate the spider's request, to check whether the missing branches are missing from the API return, or are just not being picked up by the spider for some reason.
I don't know if there's any connection between the missing stores. The Basildon one only opened recently in April 2022 ([source](https://www.echo-news.co.uk/news/20100489.dunelm-opens-mayflower-retail-park-basildon/)) but the Altrincham store has been around since 2017 ([source](https://www.messengernewspapers.co.uk/news/whereyoulive/15122706.customers-attend-opening-of-dunelms-new-altrincham-store/)). I've checked a few of the missing branches and found facebook supprt groupswith recent posts, suggesting that the stores are indeed still open.
If the API isn't returning all the stores, then perhaps the online list at https://www.dunelm.com/stores/a-z could be used by the spider instead, or maybe https://www.dunelm.com/sitemap/static-sitemap.xml (which also seems to include all 179).
</issue>
<code>
[start of locations/spiders/dunelm_gb.py]
1 from scrapy.http import JsonRequest
2 from scrapy.spiders import Spider
3
4 from locations.dict_parser import DictParser
5 from locations.hours import OpeningHours
6
7
8 class DunelmGB(Spider):
9 name = "dunelm_gb"
10 item_attributes = {"brand": "Dunelm", "brand_wikidata": "Q5315020"}
11
12 def start_requests(self):
13 yield JsonRequest(
14 url="https://fy8plebn34-dsn.algolia.net/1/indexes/*/queries?x-algolia-application-id=FY8PLEBN34&x-algolia-api-key=ae9bc9ca475f6c3d7579016da0305a33",
15 data={
16 "requests": [
17 {
18 "indexName": "stores_prod",
19 "params": "hitsPerPage=300",
20 }
21 ]
22 },
23 )
24
25 def parse(self, response, **kwargs):
26 for store in response.json()["results"][0]["hits"]:
27 store["location"] = store["_geoloc"]
28
29 item = DictParser.parse(store)
30
31 item["ref"] = store["sapStoreId"]
32 item["website"] = "https://www.dunelm.com/stores/" + store["uri"]
33
34 oh = OpeningHours()
35 for rule in store["openingHours"]:
36 oh.add_range(rule["day"], rule["open"], rule["close"])
37
38 item["opening_hours"] = oh.as_opening_hours()
39
40 item["email"] = store["email"]
41 item["extras"] = {"storeType": store.get("storeType")}
42
43 yield item
44
[end of locations/spiders/dunelm_gb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/dunelm_gb.py b/locations/spiders/dunelm_gb.py
--- a/locations/spiders/dunelm_gb.py
+++ b/locations/spiders/dunelm_gb.py
@@ -37,7 +37,6 @@
item["opening_hours"] = oh.as_opening_hours()
- item["email"] = store["email"]
item["extras"] = {"storeType": store.get("storeType")}
yield item
| {"golden_diff": "diff --git a/locations/spiders/dunelm_gb.py b/locations/spiders/dunelm_gb.py\n--- a/locations/spiders/dunelm_gb.py\n+++ b/locations/spiders/dunelm_gb.py\n@@ -37,7 +37,6 @@\n \n item[\"opening_hours\"] = oh.as_opening_hours()\n \n- item[\"email\"] = store[\"email\"]\n item[\"extras\"] = {\"storeType\": store.get(\"storeType\")}\n \n yield item\n", "issue": "Dunelm spider output is missing 41 branches (dunelm_gb)\nThe Dunelm spider dunelm_gb is consistently returning 138 branches for the last few weeks. However, Dunelm's own online store-finder at https://www.dunelm.com/stores/a-z lists 179 branches. All of the 138 are included in the 179, meaning the spider is missing 41.\r\n\r\nFor example, the following branches appear on Dunelm's website, but aren't returned by the spider:\r\n- https://www.dunelm.com/stores/altrincham\r\n- https://www.dunelm.com/stores/basildon\r\n- https://www.dunelm.com/stores/beckton\r\n- https://www.dunelm.com/stores/beverley\r\n\r\nI'm afraid I can't figure out how to manually replicate the spider's request, to check whether the missing branches are missing from the API return, or are just not being picked up by the spider for some reason.\r\n\r\nI don't know if there's any connection between the missing stores. The Basildon one only opened recently in April 2022 ([source](https://www.echo-news.co.uk/news/20100489.dunelm-opens-mayflower-retail-park-basildon/)) but the Altrincham store has been around since 2017 ([source](https://www.messengernewspapers.co.uk/news/whereyoulive/15122706.customers-attend-opening-of-dunelms-new-altrincham-store/)). I've checked a few of the missing branches and found facebook supprt groupswith recent posts, suggesting that the stores are indeed still open.\r\n\r\nIf the API isn't returning all the stores, then perhaps the online list at https://www.dunelm.com/stores/a-z could be used by the spider instead, or maybe https://www.dunelm.com/sitemap/static-sitemap.xml (which also seems to include all 179).\n", "before_files": [{"content": "from scrapy.http import JsonRequest\nfrom scrapy.spiders import Spider\n\nfrom locations.dict_parser import DictParser\nfrom locations.hours import OpeningHours\n\n\nclass DunelmGB(Spider):\n name = \"dunelm_gb\"\n item_attributes = {\"brand\": \"Dunelm\", \"brand_wikidata\": \"Q5315020\"}\n\n def start_requests(self):\n yield JsonRequest(\n url=\"https://fy8plebn34-dsn.algolia.net/1/indexes/*/queries?x-algolia-application-id=FY8PLEBN34&x-algolia-api-key=ae9bc9ca475f6c3d7579016da0305a33\",\n data={\n \"requests\": [\n {\n \"indexName\": \"stores_prod\",\n \"params\": \"hitsPerPage=300\",\n }\n ]\n },\n )\n\n def parse(self, response, **kwargs):\n for store in response.json()[\"results\"][0][\"hits\"]:\n store[\"location\"] = store[\"_geoloc\"]\n\n item = DictParser.parse(store)\n\n item[\"ref\"] = store[\"sapStoreId\"]\n item[\"website\"] = \"https://www.dunelm.com/stores/\" + store[\"uri\"]\n\n oh = OpeningHours()\n for rule in store[\"openingHours\"]:\n oh.add_range(rule[\"day\"], rule[\"open\"], rule[\"close\"])\n\n item[\"opening_hours\"] = oh.as_opening_hours()\n\n item[\"email\"] = store[\"email\"]\n item[\"extras\"] = {\"storeType\": store.get(\"storeType\")}\n\n yield item\n", "path": "locations/spiders/dunelm_gb.py"}]} | 1,412 | 108 |
gh_patches_debug_23682 | rasdani/github-patches | git_diff | sunpy__sunpy-3960 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove sunpy.instr.aia.aiaprep
The `aiaprep` function should be removed from the `sunpy.instr` subpackage. Any AIA specific functionality should transition to the new [`aiapy` package](https://gitlab.com/LMSAL_HUB/aia_hub/aiapy).
This is part of the broader goal of moving instrument-specific functionality out of the core sunpy package and into affiliated packages.
</issue>
<code>
[start of sunpy/instr/aia.py]
1 """
2 This module provides processing routines for data captured with the AIA
3 instrument on SDO.
4 """
5 import numpy as np
6
7 import astropy.units as u
8
9 from sunpy.map.sources.sdo import AIAMap, HMIMap
10
11 __all__ = ['aiaprep']
12
13
14 def aiaprep(aiamap):
15 """
16 Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5
17 `~sunpy.map.sources.sdo.AIAMap`.
18
19 Rotates, scales and translates the image so that solar North is aligned
20 with the y axis, each pixel is 0.6 arcsec across, and the center of the
21 Sun is at the center of the image. The actual transformation is done by Map's `~sunpy.map.mapbase.GenericMap.rotate` method.
22
23 This function is similar in functionality to ``aia_prep`` in SSWIDL, but
24 it does not use the same transformation to rotate the image and it handles
25 the meta data differently. It should therefore not be expected to produce
26 the same results.
27
28 Parameters
29 ----------
30 aiamap : `~sunpy.map.sources.sdo.AIAMap`
31 A `sunpy.map.Map` from AIA.
32
33 Returns
34 -------
35 `~sunpy.map.sources.sdo.AIAMap`:
36 A level 1.5 copy of `~sunpy.map.sources.sdo.AIAMap`.
37
38 Notes
39 -----
40 This routine modifies the header information to the standard PCi_j WCS
41 formalism. The FITS header resulting in saving a file after this
42 procedure will therefore differ from the original file.
43 """
44
45 if not isinstance(aiamap, (AIAMap, HMIMap)):
46 raise ValueError("Input must be an AIAMap or HMIMap.")
47
48 # Target scale is 0.6 arcsec/pixel, but this needs to be adjusted if the map
49 # has already been rescaled.
50 if ((aiamap.scale[0] / 0.6).round() != 1.0 * u.arcsec / u.pix
51 and aiamap.data.shape != (4096, 4096)):
52 scale = (aiamap.scale[0] / 0.6).round() * 0.6 * u.arcsec
53 else:
54 scale = 0.6 * u.arcsec # pragma: no cover # can't test this because it needs a full res image
55 scale_factor = aiamap.scale[0] / scale
56
57 tempmap = aiamap.rotate(recenter=True, scale=scale_factor.value, missing=aiamap.min())
58
59 # extract center from padded aiamap.rotate output
60 # crpix1 and crpix2 will be equal (recenter=True), as aiaprep does not work with submaps
61 center = np.floor(tempmap.meta['crpix1'])
62 range_side = (center + np.array([-1, 1]) * aiamap.data.shape[0] / 2) * u.pix
63 newmap = tempmap.submap(u.Quantity([range_side[0], range_side[0]]),
64 u.Quantity([range_side[1], range_side[1]]))
65
66 newmap.meta['r_sun'] = newmap.meta['rsun_obs'] / newmap.meta['cdelt1']
67 newmap.meta['lvl_num'] = 1.5
68 newmap.meta['bitpix'] = -64
69
70 return newmap
71
[end of sunpy/instr/aia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/instr/aia.py b/sunpy/instr/aia.py
--- a/sunpy/instr/aia.py
+++ b/sunpy/instr/aia.py
@@ -7,10 +7,13 @@
import astropy.units as u
from sunpy.map.sources.sdo import AIAMap, HMIMap
+from sunpy.util.decorators import deprecated
__all__ = ['aiaprep']
+@deprecated("2.0", alternative="`register` in aiapy (https://aiapy.readthedocs.io) for converting \
+AIA images to level 1.5")
def aiaprep(aiamap):
"""
Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5
@@ -18,7 +21,8 @@
Rotates, scales and translates the image so that solar North is aligned
with the y axis, each pixel is 0.6 arcsec across, and the center of the
- Sun is at the center of the image. The actual transformation is done by Map's `~sunpy.map.mapbase.GenericMap.rotate` method.
+ Sun is at the center of the image. The actual transformation is done by Map's
+ `~sunpy.map.mapbase.GenericMap.rotate` method.
This function is similar in functionality to ``aia_prep`` in SSWIDL, but
it does not use the same transformation to rotate the image and it handles
| {"golden_diff": "diff --git a/sunpy/instr/aia.py b/sunpy/instr/aia.py\n--- a/sunpy/instr/aia.py\n+++ b/sunpy/instr/aia.py\n@@ -7,10 +7,13 @@\n import astropy.units as u\n \n from sunpy.map.sources.sdo import AIAMap, HMIMap\n+from sunpy.util.decorators import deprecated\n \n __all__ = ['aiaprep']\n \n \n+@deprecated(\"2.0\", alternative=\"`register` in aiapy (https://aiapy.readthedocs.io) for converting \\\n+AIA images to level 1.5\")\n def aiaprep(aiamap):\n \"\"\"\n Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5\n@@ -18,7 +21,8 @@\n \n Rotates, scales and translates the image so that solar North is aligned\n with the y axis, each pixel is 0.6 arcsec across, and the center of the\n- Sun is at the center of the image. The actual transformation is done by Map's `~sunpy.map.mapbase.GenericMap.rotate` method.\n+ Sun is at the center of the image. The actual transformation is done by Map's\n+ `~sunpy.map.mapbase.GenericMap.rotate` method.\n \n This function is similar in functionality to ``aia_prep`` in SSWIDL, but\n it does not use the same transformation to rotate the image and it handles\n", "issue": "Remove sunpy.instr.aia.aiaprep\nThe `aiaprep` function should be removed from the `sunpy.instr` subpackage. Any AIA specific functionality should transition to the new [`aiapy` package](https://gitlab.com/LMSAL_HUB/aia_hub/aiapy).\r\n\r\nThis is part of the broader goal of moving instrument-specific functionality out of the core sunpy package and into affiliated packages.\n", "before_files": [{"content": "\"\"\"\nThis module provides processing routines for data captured with the AIA\ninstrument on SDO.\n\"\"\"\nimport numpy as np\n\nimport astropy.units as u\n\nfrom sunpy.map.sources.sdo import AIAMap, HMIMap\n\n__all__ = ['aiaprep']\n\n\ndef aiaprep(aiamap):\n \"\"\"\n Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5\n `~sunpy.map.sources.sdo.AIAMap`.\n\n Rotates, scales and translates the image so that solar North is aligned\n with the y axis, each pixel is 0.6 arcsec across, and the center of the\n Sun is at the center of the image. The actual transformation is done by Map's `~sunpy.map.mapbase.GenericMap.rotate` method.\n\n This function is similar in functionality to ``aia_prep`` in SSWIDL, but\n it does not use the same transformation to rotate the image and it handles\n the meta data differently. It should therefore not be expected to produce\n the same results.\n\n Parameters\n ----------\n aiamap : `~sunpy.map.sources.sdo.AIAMap`\n A `sunpy.map.Map` from AIA.\n\n Returns\n -------\n `~sunpy.map.sources.sdo.AIAMap`:\n A level 1.5 copy of `~sunpy.map.sources.sdo.AIAMap`.\n\n Notes\n -----\n This routine modifies the header information to the standard PCi_j WCS\n formalism. The FITS header resulting in saving a file after this\n procedure will therefore differ from the original file.\n \"\"\"\n\n if not isinstance(aiamap, (AIAMap, HMIMap)):\n raise ValueError(\"Input must be an AIAMap or HMIMap.\")\n\n # Target scale is 0.6 arcsec/pixel, but this needs to be adjusted if the map\n # has already been rescaled.\n if ((aiamap.scale[0] / 0.6).round() != 1.0 * u.arcsec / u.pix\n and aiamap.data.shape != (4096, 4096)):\n scale = (aiamap.scale[0] / 0.6).round() * 0.6 * u.arcsec\n else:\n scale = 0.6 * u.arcsec # pragma: no cover # can't test this because it needs a full res image\n scale_factor = aiamap.scale[0] / scale\n\n tempmap = aiamap.rotate(recenter=True, scale=scale_factor.value, missing=aiamap.min())\n\n # extract center from padded aiamap.rotate output\n # crpix1 and crpix2 will be equal (recenter=True), as aiaprep does not work with submaps\n center = np.floor(tempmap.meta['crpix1'])\n range_side = (center + np.array([-1, 1]) * aiamap.data.shape[0] / 2) * u.pix\n newmap = tempmap.submap(u.Quantity([range_side[0], range_side[0]]),\n u.Quantity([range_side[1], range_side[1]]))\n\n newmap.meta['r_sun'] = newmap.meta['rsun_obs'] / newmap.meta['cdelt1']\n newmap.meta['lvl_num'] = 1.5\n newmap.meta['bitpix'] = -64\n\n return newmap\n", "path": "sunpy/instr/aia.py"}]} | 1,530 | 330 |
gh_patches_debug_1436 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bump azure-cosmos to v3.2.0
**Is your feature request related to a problem? Please describe.**
We're currently on `azure-cosmos` v3.1.2. Not a ton of changes in 3.2.0, but it looks like it will be their last stable version, now that they're working on v4:

**Additional context**
Need to ensure all Cosmos tests are run live before merging (they're skipped by default).
[enhancement]
</issue>
<code>
[start of libraries/botbuilder-azure/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "azure-cosmos==3.1.2",
9 "azure-storage-blob==2.1.0",
10 "botbuilder-schema==4.10.0",
11 "botframework-connector==4.10.0",
12 "jsonpickle==1.2",
13 ]
14 TEST_REQUIRES = ["aiounittest==1.3.0"]
15
16 root = os.path.abspath(os.path.dirname(__file__))
17
18 with open(os.path.join(root, "botbuilder", "azure", "about.py")) as f:
19 package_info = {}
20 info = f.read()
21 exec(info, package_info)
22
23 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
24 long_description = f.read()
25
26 setup(
27 name=package_info["__title__"],
28 version=package_info["__version__"],
29 url=package_info["__uri__"],
30 author=package_info["__author__"],
31 description=package_info["__description__"],
32 keywords=["BotBuilderAzure", "bots", "ai", "botframework", "botbuilder", "azure"],
33 long_description=long_description,
34 long_description_content_type="text/x-rst",
35 license=package_info["__license__"],
36 packages=["botbuilder.azure"],
37 install_requires=REQUIRES + TEST_REQUIRES,
38 tests_require=TEST_REQUIRES,
39 classifiers=[
40 "Programming Language :: Python :: 3.7",
41 "Intended Audience :: Developers",
42 "License :: OSI Approved :: MIT License",
43 "Operating System :: OS Independent",
44 "Development Status :: 5 - Production/Stable",
45 "Topic :: Scientific/Engineering :: Artificial Intelligence",
46 ],
47 )
48
[end of libraries/botbuilder-azure/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-azure/setup.py b/libraries/botbuilder-azure/setup.py
--- a/libraries/botbuilder-azure/setup.py
+++ b/libraries/botbuilder-azure/setup.py
@@ -5,7 +5,7 @@
from setuptools import setup
REQUIRES = [
- "azure-cosmos==3.1.2",
+ "azure-cosmos==3.2.0",
"azure-storage-blob==2.1.0",
"botbuilder-schema==4.10.0",
"botframework-connector==4.10.0",
| {"golden_diff": "diff --git a/libraries/botbuilder-azure/setup.py b/libraries/botbuilder-azure/setup.py\n--- a/libraries/botbuilder-azure/setup.py\n+++ b/libraries/botbuilder-azure/setup.py\n@@ -5,7 +5,7 @@\n from setuptools import setup\n \n REQUIRES = [\n- \"azure-cosmos==3.1.2\",\n+ \"azure-cosmos==3.2.0\",\n \"azure-storage-blob==2.1.0\",\n \"botbuilder-schema==4.10.0\",\n \"botframework-connector==4.10.0\",\n", "issue": "Bump azure-cosmos to v3.2.0\n**Is your feature request related to a problem? Please describe.**\r\n\r\nWe're currently on `azure-cosmos` v3.1.2. Not a ton of changes in 3.2.0, but it looks like it will be their last stable version, now that they're working on v4:\r\n\r\n\r\n\r\n**Additional context**\r\n\r\nNeed to ensure all Cosmos tests are run live before merging (they're skipped by default).\r\n\r\n[enhancement]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cosmos==3.1.2\",\n \"azure-storage-blob==2.1.0\",\n \"botbuilder-schema==4.10.0\",\n \"botframework-connector==4.10.0\",\n \"jsonpickle==1.2\",\n]\nTEST_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"azure\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderAzure\", \"bots\", \"ai\", \"botframework\", \"botbuilder\", \"azure\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.azure\"],\n install_requires=REQUIRES + TEST_REQUIRES,\n tests_require=TEST_REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-azure/setup.py"}]} | 1,188 | 136 |
gh_patches_debug_365 | rasdani/github-patches | git_diff | pypa__pipenv-5495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include missing package data for Safety
### The issue
#5491
### The fix
Include the missing package data for Safety.
### The checklist
* [ ] Build wheels and test if it is working fine.
<!--
### If this is a patch to the `vendor` directory...
Please try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.
A pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv’s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.
If your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.
-->
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 import os
4 import sys
5
6 from setuptools import find_packages, setup
7
8 here = os.path.abspath(os.path.dirname(__file__))
9
10 with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
11 long_description = "\n" + f.read()
12
13 about = {}
14
15 with open(os.path.join(here, "pipenv", "__version__.py")) as f:
16 exec(f.read(), about)
17
18 if sys.argv[-1] == "publish":
19 os.system("python setup.py sdist bdist_wheel upload")
20 sys.exit()
21
22 required = [
23 "certifi",
24 "setuptools>=36.2.1",
25 "virtualenv-clone>=0.2.5",
26 "virtualenv",
27 ]
28 extras = {
29 "dev": [
30 "towncrier",
31 "bs4",
32 "sphinx",
33 "flake8>=3.3.0,<4.0",
34 "black;python_version>='3.7'",
35 "parver",
36 "invoke",
37 ],
38 "tests": ["pytest>=5.0", "pytest-timeout", "pytest-xdist", "flaky", "mock"],
39 }
40
41
42 setup(
43 name="pipenv",
44 version=about["__version__"],
45 description="Python Development Workflow for Humans.",
46 long_description=long_description,
47 long_description_content_type="text/markdown",
48 author="Pipenv maintainer team",
49 author_email="[email protected]",
50 url="https://github.com/pypa/pipenv",
51 packages=find_packages(exclude=["tests", "tests.*", "tasks", "tasks.*"]),
52 entry_points={
53 "console_scripts": [
54 "pipenv=pipenv:cli",
55 "pipenv-resolver=pipenv.resolver:main",
56 ]
57 },
58 package_data={
59 "": ["LICENSE", "NOTICES"],
60 "pipenv.patched.safety": ["VERSION", "safety-policy-template.yml"],
61 "pipenv.patched.pip._vendor.certifi": ["*.pem"],
62 "pipenv.patched.pip._vendor.requests": ["*.pem"],
63 "pipenv.patched.pip._vendor.distlib._backport": ["sysconfig.cfg"],
64 "pipenv.patched.pip._vendor.distlib": [
65 "t32.exe",
66 "t64.exe",
67 "w32.exe",
68 "w64.exe",
69 ],
70 },
71 python_requires=">=3.7",
72 zip_safe=True,
73 setup_requires=[],
74 install_requires=required,
75 extras_require=extras,
76 include_package_data=True,
77 license="MIT",
78 classifiers=[
79 "License :: OSI Approved :: MIT License",
80 "Programming Language :: Python",
81 "Programming Language :: Python :: 3",
82 "Programming Language :: Python :: 3.7",
83 "Programming Language :: Python :: 3.8",
84 "Programming Language :: Python :: 3.9",
85 "Programming Language :: Python :: 3.10",
86 "Programming Language :: Python :: 3.11",
87 "Programming Language :: Python :: Implementation :: CPython",
88 "Programming Language :: Python :: Implementation :: PyPy",
89 ],
90 )
91
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -67,6 +67,7 @@
"w32.exe",
"w64.exe",
],
+ "pipenv.vendor.ruamel": ["yaml"],
},
python_requires=">=3.7",
zip_safe=True,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -67,6 +67,7 @@\n \"w32.exe\",\n \"w64.exe\",\n ],\n+ \"pipenv.vendor.ruamel\": [\"yaml\"],\n },\n python_requires=\">=3.7\",\n zip_safe=True,\n", "issue": "Include missing package data for Safety\n### The issue\r\n\r\n#5491 \r\n\r\n### The fix\r\n\r\nInclude the missing package data for Safety.\r\n\r\n### The checklist\r\n\r\n* [ ] Build wheels and test if it is working fine.\r\n\r\n<!--\r\n### If this is a patch to the `vendor` directory...\r\n\r\nPlease try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.\r\n\r\nA pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv\u2019s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.\r\n\r\nIf your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.\r\n-->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\n\nabout = {}\n\nwith open(os.path.join(here, \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\n\nrequired = [\n \"certifi\",\n \"setuptools>=36.2.1\",\n \"virtualenv-clone>=0.2.5\",\n \"virtualenv\",\n]\nextras = {\n \"dev\": [\n \"towncrier\",\n \"bs4\",\n \"sphinx\",\n \"flake8>=3.3.0,<4.0\",\n \"black;python_version>='3.7'\",\n \"parver\",\n \"invoke\",\n ],\n \"tests\": [\"pytest>=5.0\", \"pytest-timeout\", \"pytest-xdist\", \"flaky\", \"mock\"],\n}\n\n\nsetup(\n name=\"pipenv\",\n version=about[\"__version__\"],\n description=\"Python Development Workflow for Humans.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Pipenv maintainer team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pypa/pipenv\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tasks\", \"tasks.*\"]),\n entry_points={\n \"console_scripts\": [\n \"pipenv=pipenv:cli\",\n \"pipenv-resolver=pipenv.resolver:main\",\n ]\n },\n package_data={\n \"\": [\"LICENSE\", \"NOTICES\"],\n \"pipenv.patched.safety\": [\"VERSION\", \"safety-policy-template.yml\"],\n \"pipenv.patched.pip._vendor.certifi\": [\"*.pem\"],\n \"pipenv.patched.pip._vendor.requests\": [\"*.pem\"],\n \"pipenv.patched.pip._vendor.distlib._backport\": [\"sysconfig.cfg\"],\n \"pipenv.patched.pip._vendor.distlib\": [\n \"t32.exe\",\n \"t64.exe\",\n \"w32.exe\",\n \"w64.exe\",\n ],\n },\n python_requires=\">=3.7\",\n zip_safe=True,\n setup_requires=[],\n install_requires=required,\n extras_require=extras,\n include_package_data=True,\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n", "path": "setup.py"}]} | 1,620 | 77 |
gh_patches_debug_2459 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No module named 'botbuilder.ai.qna.dialogs' - Python QnA Sample 49
## Version
botbuilder-ai - 4.9.1
## Describe the bug
I was trying out the QnA Maker Sample - 49.qnamaker-all-features . I've configured my QnA KB and also the config.py with the necessary info. However the module botbuilder.ai.qna.dialogs does not seem to exist. I've manually verified for the class QnAMakermDialog and it does not exist
> from botbuilder.ai.qna.dialogs import QnAMakermDialog
## To Reproduce
Steps to reproduce the behavior:
1. Download the sample 49.qnamaker-all-features
2. Install the necessary requirements and configure QnAMaker.
3. Run python app.py in the folder
## Expected behavior
The sample should've run successfully.
[bug]
</issue>
<code>
[start of libraries/botbuilder-ai/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "azure-cognitiveservices-language-luis==0.2.0",
9 "botbuilder-schema>=4.7.1",
10 "botbuilder-core>=4.7.1",
11 "aiohttp==3.6.2",
12 ]
13
14 TESTS_REQUIRES = ["aiounittest>=1.1.0"]
15
16 root = os.path.abspath(os.path.dirname(__file__))
17
18 with open(os.path.join(root, "botbuilder", "ai", "about.py")) as f:
19 package_info = {}
20 info = f.read()
21 exec(info, package_info)
22
23 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
24 long_description = f.read()
25
26 setup(
27 name=package_info["__title__"],
28 version=package_info["__version__"],
29 url=package_info["__uri__"],
30 author=package_info["__author__"],
31 description=package_info["__description__"],
32 keywords="botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder",
33 long_description=long_description,
34 long_description_content_type="text/x-rst",
35 license=package_info["__license__"],
36 packages=[
37 "botbuilder.ai",
38 "botbuilder.ai.qna",
39 "botbuilder.ai.luis",
40 "botbuilder.ai.qna.models",
41 "botbuilder.ai.qna.utils",
42 ],
43 install_requires=REQUIRES + TESTS_REQUIRES,
44 tests_require=TESTS_REQUIRES,
45 include_package_data=True,
46 classifiers=[
47 "Programming Language :: Python :: 3.7",
48 "Intended Audience :: Developers",
49 "License :: OSI Approved :: MIT License",
50 "Operating System :: OS Independent",
51 "Development Status :: 5 - Production/Stable",
52 "Topic :: Scientific/Engineering :: Artificial Intelligence",
53 ],
54 )
55
[end of libraries/botbuilder-ai/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-ai/setup.py b/libraries/botbuilder-ai/setup.py
--- a/libraries/botbuilder-ai/setup.py
+++ b/libraries/botbuilder-ai/setup.py
@@ -39,6 +39,7 @@
"botbuilder.ai.luis",
"botbuilder.ai.qna.models",
"botbuilder.ai.qna.utils",
+ "botbuilder.ai.qna.dialogs",
],
install_requires=REQUIRES + TESTS_REQUIRES,
tests_require=TESTS_REQUIRES,
| {"golden_diff": "diff --git a/libraries/botbuilder-ai/setup.py b/libraries/botbuilder-ai/setup.py\n--- a/libraries/botbuilder-ai/setup.py\n+++ b/libraries/botbuilder-ai/setup.py\n@@ -39,6 +39,7 @@\n \"botbuilder.ai.luis\",\n \"botbuilder.ai.qna.models\",\n \"botbuilder.ai.qna.utils\",\n+ \"botbuilder.ai.qna.dialogs\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n", "issue": "No module named 'botbuilder.ai.qna.dialogs' - Python QnA Sample 49\n## Version\r\nbotbuilder-ai - 4.9.1\r\n\r\n## Describe the bug\r\nI was trying out the QnA Maker Sample - 49.qnamaker-all-features . I've configured my QnA KB and also the config.py with the necessary info. However the module botbuilder.ai.qna.dialogs does not seem to exist. I've manually verified for the class QnAMakermDialog and it does not exist\r\n\r\n> from botbuilder.ai.qna.dialogs import QnAMakermDialog\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Download the sample 49.qnamaker-all-features\r\n2. Install the necessary requirements and configure QnAMaker.\r\n3. Run python app.py in the folder\r\n\r\n## Expected behavior\r\nThe sample should've run successfully.\r\n\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cognitiveservices-language-luis==0.2.0\",\n \"botbuilder-schema>=4.7.1\",\n \"botbuilder-core>=4.7.1\",\n \"aiohttp==3.6.2\",\n]\n\nTESTS_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"ai\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.ai\",\n \"botbuilder.ai.qna\",\n \"botbuilder.ai.luis\",\n \"botbuilder.ai.qna.models\",\n \"botbuilder.ai.qna.utils\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-ai/setup.py"}]} | 1,267 | 121 |
gh_patches_debug_60755 | rasdani/github-patches | git_diff | MTES-MCT__aides-territoires-174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mauvais article et mauvaise casse pour message d'erreur sur adresse mail
Sur la page `/comptes/connexion/`, on voit :
```
Saisissez un Adresse e-mail…
```
Alors que l'on devrait avoir :
```
Saisissez une adresse e-mail…
```

La base du message d'erreur vient des [fichiers de traduction de Django](https://github.com/django/django/blob/6376278a904e2f8b34893a7166508dfd205fdceb/django/contrib/auth/locale/fr/LC_MESSAGES/django.po) :
```py
msgid ""
"Please enter a correct %(username)s and password. Note that both fields may "
"be case-sensitive."
msgstr ""
"Saisissez un %(username)s et un mot de passe valides. Remarquez que chacun "
"de ces champs est sensible à la casse (différenciation des majuscules/"
"minuscules)."
```
Et à la place du placeholder `%(username)s`, on a `Adresse e-mail` dans ce projet.
Dans le fichier de traduction (`django.po`) du projet actuel, on voit :
```py
msgid "Email address"
msgstr "Adresse e-mail"
```
</issue>
<code>
[start of src/accounts/forms.py]
1 from django import forms
2 from django.utils.translation import ugettext_lazy as _
3 from django.contrib.auth.forms import AuthenticationForm
4 from django.contrib.auth import password_validation
5
6 from accounts.models import User
7
8
9 class RegisterForm(forms.ModelForm):
10 """Form used to create new user accounts."""
11
12 email = forms.EmailField(
13 label=_('Your email address'),
14 required=True,
15 help_text=_('We will send a confirmation link to '
16 'this address before creating the account.'))
17 full_name = forms.CharField(
18 label=_('Your full name'),
19 required=True,
20 help_text=_('This is how we will address you in our communications.'))
21 ml_consent = forms.BooleanField(
22 label=_('I want to receive news and communications from the service.'),
23 required=False,
24 help_text=_('You will be able to unsubscribe at any time.'))
25
26 class Meta:
27 model = User
28 fields = ['full_name', 'email', 'ml_consent']
29
30 def __init__(self, *args, **kwargs):
31 super().__init__(*args, **kwargs)
32 self.fields['full_name'].widget.attrs.update({'autofocus': True})
33 self.fields['email'].widget.attrs.update({
34 'placeholder': _('Please double-check this value.')})
35
36 def clean_email(self):
37 email = self.cleaned_data['email']
38 return email.lower()
39
40
41 class LoginForm(AuthenticationForm):
42 username = forms.EmailField(
43 label=_('Your email address'),
44 required=True)
45 password = forms.CharField(
46 label=_('Your password'),
47 required=True,
48 strip=False,
49 widget=forms.PasswordInput)
50
51 def clean_username(self):
52 """Don't prevent users to login when they user uppercase emails."""
53
54 username = self.cleaned_data['username']
55 return username.lower()
56
57
58 class PasswordResetForm(forms.Form):
59 """Password reset request form."""
60
61 username = forms.EmailField(
62 label=_('Your email address'),
63 required=True)
64
65
66 class ProfileForm(forms.ModelForm):
67 """Edit profile related user data."""
68
69 new_password = forms.CharField(
70 label=_('Choose a new password'),
71 required=False,
72 strip=False,
73 help_text=password_validation.password_validators_help_text_html(),
74 widget=forms.PasswordInput(attrs={
75 'placeholder': _('Leave empty to keep your existing password')
76 }))
77
78 class Meta:
79 model = User
80 fields = ['full_name', 'new_password', 'ml_consent']
81 labels = {
82 'full_name': _('Your full name'),
83 'ml_consent':
84 _('Yes, I want to receive news about the service.'),
85 }
86 help_texts = {
87 'full_name':
88 _('This is how we will address you in our ' 'communications.'),
89 'ml_consent':
90 _('We will send regular updates (no more than once a month) '
91 'about the new features and updates about our service.'),
92 }
93
94 def _post_clean(self):
95 super()._post_clean()
96 # Validate the password after self.instance is updated with form data
97 # by super().
98 password = self.cleaned_data.get('new_password')
99 if password:
100 try:
101 password_validation.validate_password(password, self.instance)
102 except forms.ValidationError as error:
103 self.add_error('new_password', error)
104
105 def save(self, commit=True):
106 user = super().save(commit=False)
107
108 new_password = self.cleaned_data['new_password']
109 if new_password:
110 user.set_password(new_password)
111
112 if commit:
113 user.save()
114 return user
115
116
117 class ContributorProfileForm(forms.ModelForm):
118 """Edit contributor profile related user data."""
119
120 class Meta:
121 model = User
122 fields = ['organization', 'role', 'contact_phone']
123 labels = {
124 'organization': _('Your organization'),
125 'role': _('Your position'),
126 }
127
[end of src/accounts/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/accounts/forms.py b/src/accounts/forms.py
--- a/src/accounts/forms.py
+++ b/src/accounts/forms.py
@@ -39,6 +39,13 @@
class LoginForm(AuthenticationForm):
+ error_messages = {
+ 'invalid_login': _(
+ 'Please enter a correct email address and password.'
+ ),
+ 'inactive': _('This account is inactive.'),
+ }
+
username = forms.EmailField(
label=_('Your email address'),
required=True)
| {"golden_diff": "diff --git a/src/accounts/forms.py b/src/accounts/forms.py\n--- a/src/accounts/forms.py\n+++ b/src/accounts/forms.py\n@@ -39,6 +39,13 @@\n \n \n class LoginForm(AuthenticationForm):\n+ error_messages = {\n+ 'invalid_login': _(\n+ 'Please enter a correct email address and password.'\n+ ),\n+ 'inactive': _('This account is inactive.'),\n+ }\n+\n username = forms.EmailField(\n label=_('Your email address'),\n required=True)\n", "issue": "Mauvais article et mauvaise casse pour message d'erreur sur adresse mail\nSur la page `/comptes/connexion/`, on voit : \r\n\r\n```\r\nSaisissez un Adresse e-mail\u2026\r\n```\r\n\r\nAlors que l'on devrait avoir : \r\n\r\n```\r\nSaisissez une adresse e-mail\u2026\r\n```\r\n\r\n\r\n\r\nLa base du message d'erreur vient des [fichiers de traduction de Django](https://github.com/django/django/blob/6376278a904e2f8b34893a7166508dfd205fdceb/django/contrib/auth/locale/fr/LC_MESSAGES/django.po) : \r\n\r\n```py\r\nmsgid \"\"\r\n\"Please enter a correct %(username)s and password. Note that both fields may \"\r\n\"be case-sensitive.\"\r\nmsgstr \"\"\r\n\"Saisissez un %(username)s et un mot de passe valides. Remarquez que chacun \"\r\n\"de ces champs est sensible \u00e0 la casse (diff\u00e9renciation des majuscules/\"\r\n\"minuscules).\"\r\n```\r\n\r\nEt \u00e0 la place du placeholder `%(username)s`, on a `Adresse e-mail` dans ce projet.\r\n\r\nDans le fichier de traduction (`django.po`) du projet actuel, on voit : \r\n\r\n```py\r\nmsgid \"Email address\"\r\nmsgstr \"Adresse e-mail\"\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import password_validation\n\nfrom accounts.models import User\n\n\nclass RegisterForm(forms.ModelForm):\n \"\"\"Form used to create new user accounts.\"\"\"\n\n email = forms.EmailField(\n label=_('Your email address'),\n required=True,\n help_text=_('We will send a confirmation link to '\n 'this address before creating the account.'))\n full_name = forms.CharField(\n label=_('Your full name'),\n required=True,\n help_text=_('This is how we will address you in our communications.'))\n ml_consent = forms.BooleanField(\n label=_('I want to receive news and communications from the service.'),\n required=False,\n help_text=_('You will be able to unsubscribe at any time.'))\n\n class Meta:\n model = User\n fields = ['full_name', 'email', 'ml_consent']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['full_name'].widget.attrs.update({'autofocus': True})\n self.fields['email'].widget.attrs.update({\n 'placeholder': _('Please double-check this value.')})\n\n def clean_email(self):\n email = self.cleaned_data['email']\n return email.lower()\n\n\nclass LoginForm(AuthenticationForm):\n username = forms.EmailField(\n label=_('Your email address'),\n required=True)\n password = forms.CharField(\n label=_('Your password'),\n required=True,\n strip=False,\n widget=forms.PasswordInput)\n\n def clean_username(self):\n \"\"\"Don't prevent users to login when they user uppercase emails.\"\"\"\n\n username = self.cleaned_data['username']\n return username.lower()\n\n\nclass PasswordResetForm(forms.Form):\n \"\"\"Password reset request form.\"\"\"\n\n username = forms.EmailField(\n label=_('Your email address'),\n required=True)\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\"Edit profile related user data.\"\"\"\n\n new_password = forms.CharField(\n label=_('Choose a new password'),\n required=False,\n strip=False,\n help_text=password_validation.password_validators_help_text_html(),\n widget=forms.PasswordInput(attrs={\n 'placeholder': _('Leave empty to keep your existing password')\n }))\n\n class Meta:\n model = User\n fields = ['full_name', 'new_password', 'ml_consent']\n labels = {\n 'full_name': _('Your full name'),\n 'ml_consent':\n _('Yes, I want to receive news about the service.'),\n }\n help_texts = {\n 'full_name':\n _('This is how we will address you in our ' 'communications.'),\n 'ml_consent':\n _('We will send regular updates (no more than once a month) '\n 'about the new features and updates about our service.'),\n }\n\n def _post_clean(self):\n super()._post_clean()\n # Validate the password after self.instance is updated with form data\n # by super().\n password = self.cleaned_data.get('new_password')\n if password:\n try:\n password_validation.validate_password(password, self.instance)\n except forms.ValidationError as error:\n self.add_error('new_password', error)\n\n def save(self, commit=True):\n user = super().save(commit=False)\n\n new_password = self.cleaned_data['new_password']\n if new_password:\n user.set_password(new_password)\n\n if commit:\n user.save()\n return user\n\n\nclass ContributorProfileForm(forms.ModelForm):\n \"\"\"Edit contributor profile related user data.\"\"\"\n\n class Meta:\n model = User\n fields = ['organization', 'role', 'contact_phone']\n labels = {\n 'organization': _('Your organization'),\n 'role': _('Your position'),\n }\n", "path": "src/accounts/forms.py"}]} | 1,958 | 111 |
gh_patches_debug_35502 | rasdani/github-patches | git_diff | rasterio__rasterio-457 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a function to easily display a matotlib histogram in rio-insp
See #455 for background
Might be useful to surface this both in `rio insp` and as `rasterio.show_histogram()`.
</issue>
<code>
[start of rasterio/tool.py]
1
2 import code
3 import collections
4 import logging
5
6 try:
7 import matplotlib.pyplot as plt
8 except ImportError:
9 plt = None
10
11 import numpy
12
13 import rasterio
14
15
16 logger = logging.getLogger('rasterio')
17
18 Stats = collections.namedtuple('Stats', ['min', 'max', 'mean'])
19
20 # Collect dictionary of functions for use in the interpreter in main()
21 funcs = locals()
22
23
24 def show(source, cmap='gray'):
25 """Show a raster using matplotlib.
26
27 The raster may be either an ndarray or a (dataset, bidx)
28 tuple.
29 """
30 if isinstance(source, tuple):
31 arr = source[0].read(source[1])
32 else:
33 arr = source
34 if plt is not None:
35 plt.imshow(arr, cmap=cmap)
36 plt.show()
37 else:
38 raise ImportError("matplotlib could not be imported")
39
40
41 def stats(source):
42 """Return a tuple with raster min, max, and mean.
43 """
44 if isinstance(source, tuple):
45 arr = source[0].read(source[1])
46 else:
47 arr = source
48 return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
49
50
51 def main(banner, dataset, alt_interpreter=None):
52 """ Main entry point for use with python interpreter """
53 local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)
54 if not alt_interpreter:
55 code.interact(banner, local=local)
56 elif alt_interpreter == 'ipython':
57 import IPython
58 IPython.InteractiveShell.banner1 = banner
59 IPython.start_ipython(argv=[], user_ns=local)
60 else:
61 raise ValueError("Unsupported interpreter '%s'" % alt_interpreter)
62
63 return 0
64
[end of rasterio/tool.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/tool.py b/rasterio/tool.py
--- a/rasterio/tool.py
+++ b/rasterio/tool.py
@@ -11,6 +11,7 @@
import numpy
import rasterio
+from rasterio.five import zip_longest
logger = logging.getLogger('rasterio')
@@ -48,6 +49,76 @@
return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
+def show_hist(source, bins=10, masked=True, title='Histogram'):
+
+ """
+ Easily display a histogram with matplotlib.
+
+ Parameters
+ ----------
+ bins : int, optional
+ Compute histogram across N bins.
+ data : np.array or rasterio.Band or tuple(dataset, bidx)
+ Input data to display. The first three arrays in multi-dimensional
+ arrays are plotted as red, green, and blue.
+ masked : bool, optional
+ When working with a `rasterio.Band()` object, specifies if the data
+ should be masked on read.
+ title : str, optional
+ Title for the figure.
+ """
+
+ if plt is None:
+ raise ImportError("Could not import matplotlib")
+
+ if isinstance(source, (tuple, rasterio.Band)):
+ arr = source[0].read(source[1], masked=masked)
+ else:
+ arr = source
+
+ # The histogram is computed individually for each 'band' in the array
+ # so we need the overall min/max to constrain the plot
+ rng = arr.min(), arr.max()
+
+ if len(arr.shape) is 2:
+ arr = [arr]
+ colors = ['gold']
+ else:
+ colors = ('red', 'green', 'blue', 'violet', 'gold', 'saddlebrown')
+
+ # If a rasterio.Band() is given make sure the proper index is displayed
+ # in the legend.
+ if isinstance(source, (tuple, rasterio.Band)):
+ labels = [str(source[1])]
+ else:
+ labels = (str(i + 1) for i in range(len(arr)))
+
+ # This loop should add a single plot each band in the input array,
+ # regardless of if the number of bands exceeds the number of colors.
+ # The colors slicing ensures that the number of iterations always
+ # matches the number of bands.
+ # The goal is to provide a curated set of colors for working with
+ # smaller datasets and let matplotlib define additional colors when
+ # working with larger datasets.
+ for bnd, color, label in zip_longest(arr, colors[:len(arr)], labels):
+
+ plt.hist(
+ bnd.flatten(),
+ bins=bins,
+ alpha=0.5,
+ color=color,
+ label=label,
+ range=rng
+ )
+
+ plt.legend(loc="upper right")
+ plt.title(title, fontweight='bold')
+ plt.grid(True)
+ plt.xlabel('DN')
+ plt.ylabel('Frequency')
+ plt.show()
+
+
def main(banner, dataset, alt_interpreter=None):
""" Main entry point for use with python interpreter """
local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)
| {"golden_diff": "diff --git a/rasterio/tool.py b/rasterio/tool.py\n--- a/rasterio/tool.py\n+++ b/rasterio/tool.py\n@@ -11,6 +11,7 @@\n import numpy\n \n import rasterio\n+from rasterio.five import zip_longest\n \n \n logger = logging.getLogger('rasterio')\n@@ -48,6 +49,76 @@\n return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))\n \n \n+def show_hist(source, bins=10, masked=True, title='Histogram'):\n+\n+ \"\"\"\n+ Easily display a histogram with matplotlib.\n+\n+ Parameters\n+ ----------\n+ bins : int, optional\n+ Compute histogram across N bins.\n+ data : np.array or rasterio.Band or tuple(dataset, bidx)\n+ Input data to display. The first three arrays in multi-dimensional\n+ arrays are plotted as red, green, and blue.\n+ masked : bool, optional\n+ When working with a `rasterio.Band()` object, specifies if the data\n+ should be masked on read.\n+ title : str, optional\n+ Title for the figure.\n+ \"\"\"\n+\n+ if plt is None:\n+ raise ImportError(\"Could not import matplotlib\")\n+\n+ if isinstance(source, (tuple, rasterio.Band)):\n+ arr = source[0].read(source[1], masked=masked)\n+ else:\n+ arr = source\n+\n+ # The histogram is computed individually for each 'band' in the array\n+ # so we need the overall min/max to constrain the plot\n+ rng = arr.min(), arr.max()\n+\n+ if len(arr.shape) is 2:\n+ arr = [arr]\n+ colors = ['gold']\n+ else:\n+ colors = ('red', 'green', 'blue', 'violet', 'gold', 'saddlebrown')\n+\n+ # If a rasterio.Band() is given make sure the proper index is displayed\n+ # in the legend.\n+ if isinstance(source, (tuple, rasterio.Band)):\n+ labels = [str(source[1])]\n+ else:\n+ labels = (str(i + 1) for i in range(len(arr)))\n+\n+ # This loop should add a single plot each band in the input array,\n+ # regardless of if the number of bands exceeds the number of colors.\n+ # The colors slicing ensures that the number of iterations always\n+ # matches the number of bands.\n+ # The goal is to provide a curated set of colors for working with\n+ # smaller datasets and let matplotlib define additional colors when\n+ # working with larger datasets.\n+ for bnd, color, label in zip_longest(arr, colors[:len(arr)], labels):\n+\n+ plt.hist(\n+ bnd.flatten(),\n+ bins=bins,\n+ alpha=0.5,\n+ color=color,\n+ label=label,\n+ range=rng\n+ )\n+\n+ plt.legend(loc=\"upper right\")\n+ plt.title(title, fontweight='bold')\n+ plt.grid(True)\n+ plt.xlabel('DN')\n+ plt.ylabel('Frequency')\n+ plt.show()\n+\n+\n def main(banner, dataset, alt_interpreter=None):\n \"\"\" Main entry point for use with python interpreter \"\"\"\n local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)\n", "issue": "Add a function to easily display a matotlib histogram in rio-insp\nSee #455 for background \n\nMight be useful to surface this both in `rio insp` and as `rasterio.show_histogram()`.\n\n", "before_files": [{"content": "\nimport code\nimport collections\nimport logging\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n plt = None\n\nimport numpy\n\nimport rasterio\n\n\nlogger = logging.getLogger('rasterio')\n\nStats = collections.namedtuple('Stats', ['min', 'max', 'mean'])\n\n# Collect dictionary of functions for use in the interpreter in main()\nfuncs = locals()\n\n\ndef show(source, cmap='gray'):\n \"\"\"Show a raster using matplotlib.\n\n The raster may be either an ndarray or a (dataset, bidx)\n tuple.\n \"\"\"\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n else:\n arr = source\n if plt is not None:\n plt.imshow(arr, cmap=cmap)\n plt.show()\n else:\n raise ImportError(\"matplotlib could not be imported\")\n\n\ndef stats(source):\n \"\"\"Return a tuple with raster min, max, and mean.\n \"\"\"\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n else:\n arr = source\n return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))\n\n\ndef main(banner, dataset, alt_interpreter=None):\n \"\"\" Main entry point for use with python interpreter \"\"\"\n local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)\n if not alt_interpreter:\n code.interact(banner, local=local)\n elif alt_interpreter == 'ipython':\n import IPython\n IPython.InteractiveShell.banner1 = banner\n IPython.start_ipython(argv=[], user_ns=local)\n else:\n raise ValueError(\"Unsupported interpreter '%s'\" % alt_interpreter)\n\n return 0\n", "path": "rasterio/tool.py"}]} | 1,077 | 746 |
gh_patches_debug_36267 | rasdani/github-patches | git_diff | liberapay__liberapay.com-129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix sign-in
We seem to have a problem with old cookies from a different account preventing log in.
Goal page is not accessible once connected
"405 Method Not Allowed"
on this page
https://liberapay.com/unisson/goal.html when i want to change my goal
I'm connected on my account.
Fix sign-in
We seem to have a problem with old cookies from a different account preventing log in.
</issue>
<code>
[start of liberapay/security/authentication.py]
1 """Defines website authentication helpers.
2 """
3 import binascii
4
5 from aspen import Response
6 from liberapay.constants import SESSION
7 from liberapay.models.participant import Participant
8
9
10 class _ANON(object):
11 ANON = True
12 is_admin = False
13 id = None
14 __bool__ = __nonzero__ = lambda *a: False
15 get_tip_to = lambda self, tippee: Participant._zero_tip_dict(tippee)
16 __repr__ = lambda self: '<ANON>'
17
18
19 ANON = _ANON()
20
21
22 def sign_in(request, state):
23 try:
24 body = request.body
25 except Response:
26 return
27
28 p = None
29
30 if body.get('log-in.username'):
31 p = Participant.authenticate(
32 'username', 'password',
33 body.pop('log-in.username'), body.pop('log-in.password')
34 )
35 if p and p.status == 'closed':
36 p.update_status('active')
37
38 elif body.get('sign-in.username'):
39 if body.pop('sign-in.terms') != 'agree':
40 raise Response(400, 'you have to agree to the terms')
41 kind = body.pop('sign-in.kind')
42 if kind not in ('individual', 'organization'):
43 raise Response(400, 'bad kind')
44 with state['website'].db.get_cursor() as c:
45 p = Participant.make_active(
46 body.pop('sign-in.username'), kind, body.pop('sign-in.password'),
47 cursor=c
48 )
49 p.add_email(body.pop('sign-in.email'), cursor=c)
50 p.authenticated = True
51
52 if p:
53 response = state.setdefault('response', Response())
54 p.sign_in(response.headers.cookie)
55 if body.pop('form.repost', None) != 'true':
56 response.redirect(request.line.uri)
57 state['user'] = p
58
59
60 def start_user_as_anon():
61 """Make sure we always have a user object, regardless of exceptions during authentication.
62 """
63 return {'user': ANON}
64
65
66 def authenticate_user_if_possible(request, state, user):
67 """This signs the user in.
68 """
69 if request.line.uri.startswith('/assets/'):
70 return
71 if 'Authorization' in request.headers:
72 header = request.headers['authorization']
73 if not header.startswith('Basic '):
74 raise Response(401, 'Unsupported authentication method')
75 try:
76 creds = binascii.a2b_base64(header[len('Basic '):]).split(':', 1)
77 except binascii.Error:
78 raise Response(400, 'Malformed "Authorization" header')
79 participant = Participant.authenticate('id', 'password', *creds)
80 if not participant:
81 raise Response(401)
82 return {'user': participant}
83 elif SESSION in request.headers.cookie:
84 creds = request.headers.cookie[SESSION].value.split(':', 1)
85 p = Participant.authenticate('id', 'session', *creds)
86 if p:
87 return {'user': p}
88 elif request.method == 'POST':
89 sign_in(request, state)
90
91
92 def add_auth_to_response(response, request=None, user=ANON):
93 if request is None:
94 return # early parsing must've failed
95 if request.line.uri.startswith('/assets/'):
96 return # assets never get auth headers
97
98 if SESSION in request.headers.cookie:
99 if not user.ANON:
100 user.keep_signed_in(response.headers.cookie)
101
[end of liberapay/security/authentication.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/liberapay/security/authentication.py b/liberapay/security/authentication.py
--- a/liberapay/security/authentication.py
+++ b/liberapay/security/authentication.py
@@ -19,12 +19,15 @@
ANON = _ANON()
-def sign_in(request, state):
+def sign_in_with_form_data(request, state):
try:
body = request.body
except Response:
return
+ if not isinstance(body, dict):
+ return
+
p = None
if body.get('log-in.username'):
@@ -49,12 +52,7 @@
p.add_email(body.pop('sign-in.email'), cursor=c)
p.authenticated = True
- if p:
- response = state.setdefault('response', Response())
- p.sign_in(response.headers.cookie)
- if body.pop('form.repost', None) != 'true':
- response.redirect(request.line.uri)
- state['user'] = p
+ return p
def start_user_as_anon():
@@ -68,6 +66,8 @@
"""
if request.line.uri.startswith('/assets/'):
return
+
+ # HTTP auth
if 'Authorization' in request.headers:
header = request.headers['authorization']
if not header.startswith('Basic '):
@@ -80,13 +80,26 @@
if not participant:
raise Response(401)
return {'user': participant}
- elif SESSION in request.headers.cookie:
+
+ # Cookie and form auth
+ # We want to try cookie auth first, but we want form auth to supersede it
+ p = None
+ response = state.setdefault('response', Response())
+ if SESSION in request.headers.cookie:
creds = request.headers.cookie[SESSION].value.split(':', 1)
p = Participant.authenticate('id', 'session', *creds)
if p:
- return {'user': p}
- elif request.method == 'POST':
- sign_in(request, state)
+ state['user'] = p
+ if request.method == 'POST':
+ old_p = p
+ p = sign_in_with_form_data(request, state)
+ if p:
+ if old_p:
+ old_p.sign_out(response.headers.cookie)
+ p.sign_in(response.headers.cookie)
+ state['user'] = p
+ if request.body.pop('form.repost', None) != 'true':
+ response.redirect(request.line.uri)
def add_auth_to_response(response, request=None, user=ANON):
| {"golden_diff": "diff --git a/liberapay/security/authentication.py b/liberapay/security/authentication.py\n--- a/liberapay/security/authentication.py\n+++ b/liberapay/security/authentication.py\n@@ -19,12 +19,15 @@\n ANON = _ANON()\n \n \n-def sign_in(request, state):\n+def sign_in_with_form_data(request, state):\n try:\n body = request.body\n except Response:\n return\n \n+ if not isinstance(body, dict):\n+ return\n+\n p = None\n \n if body.get('log-in.username'):\n@@ -49,12 +52,7 @@\n p.add_email(body.pop('sign-in.email'), cursor=c)\n p.authenticated = True\n \n- if p:\n- response = state.setdefault('response', Response())\n- p.sign_in(response.headers.cookie)\n- if body.pop('form.repost', None) != 'true':\n- response.redirect(request.line.uri)\n- state['user'] = p\n+ return p\n \n \n def start_user_as_anon():\n@@ -68,6 +66,8 @@\n \"\"\"\n if request.line.uri.startswith('/assets/'):\n return\n+\n+ # HTTP auth\n if 'Authorization' in request.headers:\n header = request.headers['authorization']\n if not header.startswith('Basic '):\n@@ -80,13 +80,26 @@\n if not participant:\n raise Response(401)\n return {'user': participant}\n- elif SESSION in request.headers.cookie:\n+\n+ # Cookie and form auth\n+ # We want to try cookie auth first, but we want form auth to supersede it\n+ p = None\n+ response = state.setdefault('response', Response())\n+ if SESSION in request.headers.cookie:\n creds = request.headers.cookie[SESSION].value.split(':', 1)\n p = Participant.authenticate('id', 'session', *creds)\n if p:\n- return {'user': p}\n- elif request.method == 'POST':\n- sign_in(request, state)\n+ state['user'] = p\n+ if request.method == 'POST':\n+ old_p = p\n+ p = sign_in_with_form_data(request, state)\n+ if p:\n+ if old_p:\n+ old_p.sign_out(response.headers.cookie)\n+ p.sign_in(response.headers.cookie)\n+ state['user'] = p\n+ if request.body.pop('form.repost', None) != 'true':\n+ response.redirect(request.line.uri)\n \n \n def add_auth_to_response(response, request=None, user=ANON):\n", "issue": "Fix sign-in\nWe seem to have a problem with old cookies from a different account preventing log in.\n\nGoal page is not accessible once connected\n\"405 Method Not Allowed\"\non this page \nhttps://liberapay.com/unisson/goal.html when i want to change my goal\nI'm connected on my account.\n\nFix sign-in\nWe seem to have a problem with old cookies from a different account preventing log in.\n\n", "before_files": [{"content": "\"\"\"Defines website authentication helpers.\n\"\"\"\nimport binascii\n\nfrom aspen import Response\nfrom liberapay.constants import SESSION\nfrom liberapay.models.participant import Participant\n\n\nclass _ANON(object):\n ANON = True\n is_admin = False\n id = None\n __bool__ = __nonzero__ = lambda *a: False\n get_tip_to = lambda self, tippee: Participant._zero_tip_dict(tippee)\n __repr__ = lambda self: '<ANON>'\n\n\nANON = _ANON()\n\n\ndef sign_in(request, state):\n try:\n body = request.body\n except Response:\n return\n\n p = None\n\n if body.get('log-in.username'):\n p = Participant.authenticate(\n 'username', 'password',\n body.pop('log-in.username'), body.pop('log-in.password')\n )\n if p and p.status == 'closed':\n p.update_status('active')\n\n elif body.get('sign-in.username'):\n if body.pop('sign-in.terms') != 'agree':\n raise Response(400, 'you have to agree to the terms')\n kind = body.pop('sign-in.kind')\n if kind not in ('individual', 'organization'):\n raise Response(400, 'bad kind')\n with state['website'].db.get_cursor() as c:\n p = Participant.make_active(\n body.pop('sign-in.username'), kind, body.pop('sign-in.password'),\n cursor=c\n )\n p.add_email(body.pop('sign-in.email'), cursor=c)\n p.authenticated = True\n\n if p:\n response = state.setdefault('response', Response())\n p.sign_in(response.headers.cookie)\n if body.pop('form.repost', None) != 'true':\n response.redirect(request.line.uri)\n state['user'] = p\n\n\ndef start_user_as_anon():\n \"\"\"Make sure we always have a user object, regardless of exceptions during authentication.\n \"\"\"\n return {'user': ANON}\n\n\ndef authenticate_user_if_possible(request, state, user):\n \"\"\"This signs the user in.\n \"\"\"\n if request.line.uri.startswith('/assets/'):\n return\n if 'Authorization' in request.headers:\n header = request.headers['authorization']\n if not header.startswith('Basic '):\n raise Response(401, 'Unsupported authentication method')\n try:\n creds = binascii.a2b_base64(header[len('Basic '):]).split(':', 1)\n except binascii.Error:\n raise Response(400, 'Malformed \"Authorization\" header')\n participant = Participant.authenticate('id', 'password', *creds)\n if not participant:\n raise Response(401)\n return {'user': participant}\n elif SESSION in request.headers.cookie:\n creds = request.headers.cookie[SESSION].value.split(':', 1)\n p = Participant.authenticate('id', 'session', *creds)\n if p:\n return {'user': p}\n elif request.method == 'POST':\n sign_in(request, state)\n\n\ndef add_auth_to_response(response, request=None, user=ANON):\n if request is None:\n return # early parsing must've failed\n if request.line.uri.startswith('/assets/'):\n return # assets never get auth headers\n\n if SESSION in request.headers.cookie:\n if not user.ANON:\n user.keep_signed_in(response.headers.cookie)\n", "path": "liberapay/security/authentication.py"}]} | 1,553 | 565 |
gh_patches_debug_18954 | rasdani/github-patches | git_diff | conan-io__conan-center-index-9784 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] imgui/any. "Docking" feature
### Package Details
* Package Name/Version: **imgui/any**
* Homepage: **https://github.com/ocornut/imgui**
There is a feature "Docking" in this library that lives in a separate branch a long time (from 2018) and is still regularly updated over time (last merge from master about 14 days ago). I'd wanted to have a package with this feature, maybe something like `imgui/1.84.2-docking`
</issue>
<code>
[start of recipes/imgui/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 import os
3
4 required_conan_version = ">=1.33.0"
5
6
7 class IMGUIConan(ConanFile):
8 name = "imgui"
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://github.com/ocornut/imgui"
11 description = "Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies"
12 topics = ("dear", "imgui", "gui", "graphical", "bloat-free", )
13 license = "MIT"
14
15 settings = "os", "arch", "compiler", "build_type"
16 options = {
17 "shared": [True, False],
18 "fPIC": [True, False],
19 }
20 default_options = {
21 "shared": False,
22 "fPIC": True,
23 }
24
25 exports_sources = "CMakeLists.txt"
26 generators = "cmake"
27 _cmake = None
28
29 @property
30 def _source_subfolder(self):
31 return "source_subfolder"
32
33 def config_options(self):
34 if self.settings.os == "Windows":
35 del self.options.fPIC
36
37 def configure(self):
38 if self.options.shared:
39 del self.options.fPIC
40
41 def source(self):
42 tools.get(**self.conan_data["sources"][self.version],
43 destination=self._source_subfolder, strip_root=True)
44
45 def _configure_cmake(self):
46 if self._cmake:
47 return self._cmake
48 self._cmake = CMake(self)
49 self._cmake.configure()
50 return self._cmake
51
52 def build(self):
53 cmake = self._configure_cmake()
54 cmake.build()
55
56 def package(self):
57 self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
58 backends_folder = os.path.join(
59 self._source_subfolder,
60 "backends" if tools.Version(self.version) >= "1.80" else "examples"
61 )
62 self.copy(pattern="imgui_impl_*",
63 dst=os.path.join("res", "bindings"),
64 src=backends_folder)
65 cmake = self._configure_cmake()
66 cmake.install()
67
68 def package_info(self):
69 self.cpp_info.libs = ["imgui"]
70 self.cpp_info.defines.append("IMGUI_USER_CONFIG=\"imgui_user_config.h\"")
71 if self.settings.os == "Linux":
72 self.cpp_info.system_libs.append("m")
73 self.cpp_info.srcdirs = [os.path.join("res", "bindings")]
74
75 bin_path = os.path.join(self.package_folder, "bin")
76 self.output.info("Appending PATH env var with : {}".format(bin_path))
77 self.env_info.PATH.append(bin_path)
78
[end of recipes/imgui/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/imgui/all/conanfile.py b/recipes/imgui/all/conanfile.py
--- a/recipes/imgui/all/conanfile.py
+++ b/recipes/imgui/all/conanfile.py
@@ -1,5 +1,6 @@
from conans import ConanFile, CMake, tools
import os
+import re
required_conan_version = ">=1.33.0"
@@ -55,9 +56,11 @@
def package(self):
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
+ m = re.match(r'cci\.\d{8}\+(?P<version>\d+\.\d+)\.docking', str(self.version))
+ version = tools.Version(m.group('version')) if m else tools.Version(self.version)
backends_folder = os.path.join(
self._source_subfolder,
- "backends" if tools.Version(self.version) >= "1.80" else "examples"
+ "backends" if version >= "1.80" else "examples"
)
self.copy(pattern="imgui_impl_*",
dst=os.path.join("res", "bindings"),
| {"golden_diff": "diff --git a/recipes/imgui/all/conanfile.py b/recipes/imgui/all/conanfile.py\n--- a/recipes/imgui/all/conanfile.py\n+++ b/recipes/imgui/all/conanfile.py\n@@ -1,5 +1,6 @@\n from conans import ConanFile, CMake, tools\n import os\n+import re\n \n required_conan_version = \">=1.33.0\"\n \n@@ -55,9 +56,11 @@\n \n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n+ m = re.match(r'cci\\.\\d{8}\\+(?P<version>\\d+\\.\\d+)\\.docking', str(self.version))\n+ version = tools.Version(m.group('version')) if m else tools.Version(self.version)\n backends_folder = os.path.join(\n self._source_subfolder,\n- \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n+ \"backends\" if version >= \"1.80\" else \"examples\"\n )\n self.copy(pattern=\"imgui_impl_*\",\n dst=os.path.join(\"res\", \"bindings\"),\n", "issue": "[request] imgui/any. \"Docking\" feature\n### Package Details\r\n * Package Name/Version: **imgui/any**\r\n * Homepage: **https://github.com/ocornut/imgui**\r\n\r\nThere is a feature \"Docking\" in this library that lives in a separate branch a long time (from 2018) and is still regularly updated over time (last merge from master about 14 days ago). I'd wanted to have a package with this feature, maybe something like `imgui/1.84.2-docking`\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass IMGUIConan(ConanFile):\n name = \"imgui\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ocornut/imgui\"\n description = \"Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies\"\n topics = (\"dear\", \"imgui\", \"gui\", \"graphical\", \"bloat-free\", )\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n backends_folder = os.path.join(\n self._source_subfolder,\n \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n )\n self.copy(pattern=\"imgui_impl_*\",\n dst=os.path.join(\"res\", \"bindings\"),\n src=backends_folder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"imgui\"]\n self.cpp_info.defines.append(\"IMGUI_USER_CONFIG=\\\"imgui_user_config.h\\\"\")\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.srcdirs = [os.path.join(\"res\", \"bindings\")]\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/imgui/all/conanfile.py"}]} | 1,408 | 265 |
gh_patches_debug_26771 | rasdani/github-patches | git_diff | searx__searx-3472 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove distutils usage
In Python 3.10 and 3.11, distutils has been formally marked as deprecated. Code that imports distutils will no longer work from Python 3.12.
`searx_extra/update/update_firefox_version.py` still uses distutils. Maybe its use of `distutils.version` can be replaced by the `packaging` module?
See https://peps.python.org/pep-0632/
</issue>
<code>
[start of searx_extra/update/update_firefox_version.py]
1 #!/usr/bin/env python
2
3 import json
4 import requests
5 import re
6 from os.path import dirname, join
7 from urllib.parse import urlparse, urljoin
8 from distutils.version import LooseVersion, StrictVersion
9 from lxml import html
10 from searx import searx_dir
11
12 URL = 'https://ftp.mozilla.org/pub/firefox/releases/'
13 RELEASE_PATH = '/pub/firefox/releases/'
14
15 NORMAL_REGEX = re.compile('^[0-9]+\.[0-9](\.[0-9])?$')
16 # BETA_REGEX = re.compile('.*[0-9]b([0-9\-a-z]+)$')
17 # ESR_REGEX = re.compile('^[0-9]+\.[0-9](\.[0-9])?esr$')
18
19 #
20 useragents = {
21 "versions": (),
22 "os": ('Windows NT 10.0; WOW64',
23 'X11; Linux x86_64'),
24 "ua": "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}"
25 }
26
27
28 def fetch_firefox_versions():
29 resp = requests.get(URL, timeout=2.0)
30 if resp.status_code != 200:
31 raise Exception("Error fetching firefox versions, HTTP code " + resp.status_code)
32 else:
33 dom = html.fromstring(resp.text)
34 versions = []
35
36 for link in dom.xpath('//a/@href'):
37 url = urlparse(urljoin(URL, link))
38 path = url.path
39 if path.startswith(RELEASE_PATH):
40 version = path[len(RELEASE_PATH):-1]
41 if NORMAL_REGEX.match(version):
42 versions.append(LooseVersion(version))
43
44 list.sort(versions, reverse=True)
45 return versions
46
47
48 def fetch_firefox_last_versions():
49 versions = fetch_firefox_versions()
50
51 result = []
52 major_last = versions[0].version[0]
53 major_list = (major_last, major_last - 1)
54 for version in versions:
55 major_current = version.version[0]
56 if major_current in major_list:
57 result.append(version.vstring)
58
59 return result
60
61
62 def get_useragents_filename():
63 return join(join(searx_dir, "data"), "useragents.json")
64
65
66 useragents["versions"] = fetch_firefox_last_versions()
67 with open(get_useragents_filename(), "w") as f:
68 json.dump(useragents, f, indent=4, ensure_ascii=False)
69
[end of searx_extra/update/update_firefox_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx_extra/update/update_firefox_version.py b/searx_extra/update/update_firefox_version.py
--- a/searx_extra/update/update_firefox_version.py
+++ b/searx_extra/update/update_firefox_version.py
@@ -5,7 +5,7 @@
import re
from os.path import dirname, join
from urllib.parse import urlparse, urljoin
-from distutils.version import LooseVersion, StrictVersion
+from packaging.version import Version, parse
from lxml import html
from searx import searx_dir
@@ -39,7 +39,7 @@
if path.startswith(RELEASE_PATH):
version = path[len(RELEASE_PATH):-1]
if NORMAL_REGEX.match(version):
- versions.append(LooseVersion(version))
+ versions.append(Version(version))
list.sort(versions, reverse=True)
return versions
@@ -49,12 +49,12 @@
versions = fetch_firefox_versions()
result = []
- major_last = versions[0].version[0]
+ major_last = versions[0].major
major_list = (major_last, major_last - 1)
for version in versions:
- major_current = version.version[0]
+ major_current = version.major
if major_current in major_list:
- result.append(version.vstring)
+ result.append(str(version))
return result
| {"golden_diff": "diff --git a/searx_extra/update/update_firefox_version.py b/searx_extra/update/update_firefox_version.py\n--- a/searx_extra/update/update_firefox_version.py\n+++ b/searx_extra/update/update_firefox_version.py\n@@ -5,7 +5,7 @@\n import re\n from os.path import dirname, join\n from urllib.parse import urlparse, urljoin\n-from distutils.version import LooseVersion, StrictVersion\n+from packaging.version import Version, parse\n from lxml import html\n from searx import searx_dir\n \n@@ -39,7 +39,7 @@\n if path.startswith(RELEASE_PATH):\n version = path[len(RELEASE_PATH):-1]\n if NORMAL_REGEX.match(version):\n- versions.append(LooseVersion(version))\n+ versions.append(Version(version))\n \n list.sort(versions, reverse=True)\n return versions\n@@ -49,12 +49,12 @@\n versions = fetch_firefox_versions()\n \n result = []\n- major_last = versions[0].version[0]\n+ major_last = versions[0].major\n major_list = (major_last, major_last - 1)\n for version in versions:\n- major_current = version.version[0]\n+ major_current = version.major\n if major_current in major_list:\n- result.append(version.vstring)\n+ result.append(str(version))\n \n return result\n", "issue": "remove distutils usage\nIn Python 3.10 and 3.11, distutils has been formally marked as deprecated. Code that imports distutils will no longer work from Python 3.12.\r\n\r\n`searx_extra/update/update_firefox_version.py` still uses distutils. Maybe its use of `distutils.version` can be replaced by the `packaging` module?\r\n\r\nSee https://peps.python.org/pep-0632/\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport json\nimport requests\nimport re\nfrom os.path import dirname, join\nfrom urllib.parse import urlparse, urljoin\nfrom distutils.version import LooseVersion, StrictVersion\nfrom lxml import html\nfrom searx import searx_dir\n\nURL = 'https://ftp.mozilla.org/pub/firefox/releases/'\nRELEASE_PATH = '/pub/firefox/releases/'\n\nNORMAL_REGEX = re.compile('^[0-9]+\\.[0-9](\\.[0-9])?$')\n# BETA_REGEX = re.compile('.*[0-9]b([0-9\\-a-z]+)$')\n# ESR_REGEX = re.compile('^[0-9]+\\.[0-9](\\.[0-9])?esr$')\n\n# \nuseragents = {\n \"versions\": (),\n \"os\": ('Windows NT 10.0; WOW64',\n 'X11; Linux x86_64'),\n \"ua\": \"Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}\"\n}\n\n\ndef fetch_firefox_versions():\n resp = requests.get(URL, timeout=2.0)\n if resp.status_code != 200:\n raise Exception(\"Error fetching firefox versions, HTTP code \" + resp.status_code)\n else:\n dom = html.fromstring(resp.text)\n versions = []\n\n for link in dom.xpath('//a/@href'):\n url = urlparse(urljoin(URL, link))\n path = url.path\n if path.startswith(RELEASE_PATH):\n version = path[len(RELEASE_PATH):-1]\n if NORMAL_REGEX.match(version):\n versions.append(LooseVersion(version))\n\n list.sort(versions, reverse=True)\n return versions\n\n\ndef fetch_firefox_last_versions():\n versions = fetch_firefox_versions()\n\n result = []\n major_last = versions[0].version[0]\n major_list = (major_last, major_last - 1)\n for version in versions:\n major_current = version.version[0]\n if major_current in major_list:\n result.append(version.vstring)\n\n return result\n\n\ndef get_useragents_filename():\n return join(join(searx_dir, \"data\"), \"useragents.json\")\n\n\nuseragents[\"versions\"] = fetch_firefox_last_versions()\nwith open(get_useragents_filename(), \"w\") as f:\n json.dump(useragents, f, indent=4, ensure_ascii=False)\n", "path": "searx_extra/update/update_firefox_version.py"}]} | 1,303 | 305 |
gh_patches_debug_22803 | rasdani/github-patches | git_diff | python-poetry__poetry-6191 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include strtobool in utils.extras to speed up the shell command
# Pull Request Check List
<!-- This is just a reminder about the most common mistakes. Please make sure that you tick all *appropriate* boxes. But please read our [contribution guide](https://python-poetry.org/docs/contributing/) at least once, it will save you unnecessary review cycles! -->
- [x] Added **tests** for changed code.
- [ ] Updated **documentation** for changed code. (The change doesn't reflect in the current doc)
<!-- If you have *any* questions to *any* of the points above, just **submit and ask**! This checklist is here to *help* you, not to deter you from contributing! -->
# What does this PR do?
This line `from distutils.util import strtobool` in `console.commands.shell` is slow and makes poetry shell command slow, since importing `distutils.util` also does a bunch of other things.
`strtobool` is a very simple function (~10 lines), if poetry includes this function itself, for example, putting it in utils.extras.py and use from poetry.utils.extras import strtobool, poetry shell would run faster.
[Discord discussion link](https://discord.com/channels/487711540787675139/974839878669987840/988024065933594704)
</issue>
<code>
[start of src/poetry/console/commands/shell.py]
1 from __future__ import annotations
2
3 import sys
4
5 from distutils.util import strtobool
6 from os import environ
7 from typing import TYPE_CHECKING
8 from typing import cast
9
10 from poetry.console.commands.env_command import EnvCommand
11
12
13 if TYPE_CHECKING:
14 from poetry.utils.env import VirtualEnv
15
16
17 class ShellCommand(EnvCommand):
18 name = "shell"
19 description = "Spawns a shell within the virtual environment."
20
21 help = """The <info>shell</> command spawns a shell, according to the
22 <comment>$SHELL</> environment variable, within the virtual environment.
23 If one doesn't exist yet, it will be created.
24 """
25
26 def handle(self) -> int:
27 from poetry.utils.shell import Shell
28
29 # Check if it's already activated or doesn't exist and won't be created
30 venv_activated = strtobool(environ.get("POETRY_ACTIVE", "0")) or getattr(
31 sys, "real_prefix", sys.prefix
32 ) == str(self.env.path)
33 if venv_activated:
34 self.line(
35 f"Virtual environment already activated: <info>{self.env.path}</>"
36 )
37
38 return 0
39
40 self.line(f"Spawning shell within <info>{self.env.path}</>")
41
42 # Be sure that we have the right type of environment.
43 env = self.env
44 assert env.is_venv()
45 env = cast("VirtualEnv", env)
46
47 # Setting this to avoid spawning unnecessary nested shells
48 environ["POETRY_ACTIVE"] = "1"
49 shell = Shell.get()
50 shell.activate(env)
51 environ.pop("POETRY_ACTIVE")
52
53 return 0
54
[end of src/poetry/console/commands/shell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/poetry/console/commands/shell.py b/src/poetry/console/commands/shell.py
--- a/src/poetry/console/commands/shell.py
+++ b/src/poetry/console/commands/shell.py
@@ -2,7 +2,6 @@
import sys
-from distutils.util import strtobool
from os import environ
from typing import TYPE_CHECKING
from typing import cast
@@ -27,10 +26,7 @@
from poetry.utils.shell import Shell
# Check if it's already activated or doesn't exist and won't be created
- venv_activated = strtobool(environ.get("POETRY_ACTIVE", "0")) or getattr(
- sys, "real_prefix", sys.prefix
- ) == str(self.env.path)
- if venv_activated:
+ if self._is_venv_activated():
self.line(
f"Virtual environment already activated: <info>{self.env.path}</>"
)
@@ -51,3 +47,8 @@
environ.pop("POETRY_ACTIVE")
return 0
+
+ def _is_venv_activated(self) -> bool:
+ return bool(environ.get("POETRY_ACTIVE")) or getattr(
+ sys, "real_prefix", sys.prefix
+ ) == str(self.env.path)
| {"golden_diff": "diff --git a/src/poetry/console/commands/shell.py b/src/poetry/console/commands/shell.py\n--- a/src/poetry/console/commands/shell.py\n+++ b/src/poetry/console/commands/shell.py\n@@ -2,7 +2,6 @@\n \n import sys\n \n-from distutils.util import strtobool\n from os import environ\n from typing import TYPE_CHECKING\n from typing import cast\n@@ -27,10 +26,7 @@\n from poetry.utils.shell import Shell\n \n # Check if it's already activated or doesn't exist and won't be created\n- venv_activated = strtobool(environ.get(\"POETRY_ACTIVE\", \"0\")) or getattr(\n- sys, \"real_prefix\", sys.prefix\n- ) == str(self.env.path)\n- if venv_activated:\n+ if self._is_venv_activated():\n self.line(\n f\"Virtual environment already activated: <info>{self.env.path}</>\"\n )\n@@ -51,3 +47,8 @@\n environ.pop(\"POETRY_ACTIVE\")\n \n return 0\n+\n+ def _is_venv_activated(self) -> bool:\n+ return bool(environ.get(\"POETRY_ACTIVE\")) or getattr(\n+ sys, \"real_prefix\", sys.prefix\n+ ) == str(self.env.path)\n", "issue": "Include strtobool in utils.extras to speed up the shell command\n# Pull Request Check List\r\n\r\n<!-- This is just a reminder about the most common mistakes. Please make sure that you tick all *appropriate* boxes. But please read our [contribution guide](https://python-poetry.org/docs/contributing/) at least once, it will save you unnecessary review cycles! -->\r\n\r\n- [x] Added **tests** for changed code.\r\n- [ ] Updated **documentation** for changed code. (The change doesn't reflect in the current doc)\r\n\r\n<!-- If you have *any* questions to *any* of the points above, just **submit and ask**! This checklist is here to *help* you, not to deter you from contributing! -->\r\n\r\n# What does this PR do?\r\n\r\nThis line `from distutils.util import strtobool` in `console.commands.shell` is slow and makes poetry shell command slow, since importing `distutils.util` also does a bunch of other things. \r\n\r\n`strtobool` is a very simple function (~10 lines), if poetry includes this function itself, for example, putting it in utils.extras.py and use from poetry.utils.extras import strtobool, poetry shell would run faster.\r\n\r\n[Discord discussion link](https://discord.com/channels/487711540787675139/974839878669987840/988024065933594704)\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\n\nfrom distutils.util import strtobool\nfrom os import environ\nfrom typing import TYPE_CHECKING\nfrom typing import cast\n\nfrom poetry.console.commands.env_command import EnvCommand\n\n\nif TYPE_CHECKING:\n from poetry.utils.env import VirtualEnv\n\n\nclass ShellCommand(EnvCommand):\n name = \"shell\"\n description = \"Spawns a shell within the virtual environment.\"\n\n help = \"\"\"The <info>shell</> command spawns a shell, according to the\n<comment>$SHELL</> environment variable, within the virtual environment.\nIf one doesn't exist yet, it will be created.\n\"\"\"\n\n def handle(self) -> int:\n from poetry.utils.shell import Shell\n\n # Check if it's already activated or doesn't exist and won't be created\n venv_activated = strtobool(environ.get(\"POETRY_ACTIVE\", \"0\")) or getattr(\n sys, \"real_prefix\", sys.prefix\n ) == str(self.env.path)\n if venv_activated:\n self.line(\n f\"Virtual environment already activated: <info>{self.env.path}</>\"\n )\n\n return 0\n\n self.line(f\"Spawning shell within <info>{self.env.path}</>\")\n\n # Be sure that we have the right type of environment.\n env = self.env\n assert env.is_venv()\n env = cast(\"VirtualEnv\", env)\n\n # Setting this to avoid spawning unnecessary nested shells\n environ[\"POETRY_ACTIVE\"] = \"1\"\n shell = Shell.get()\n shell.activate(env)\n environ.pop(\"POETRY_ACTIVE\")\n\n return 0\n", "path": "src/poetry/console/commands/shell.py"}]} | 1,334 | 298 |
gh_patches_debug_59502 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using print_nan_grads in the Trainer results in an error
**Describe the bug**
When using
```
print_nan_grads=True
```
in the Trainer, I am getting the error below.
trainer.fit(lstm_model)
File "/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 364, in fit
self.run_pretrain_routine(model)
File "/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 471, in run_pretrain_routine
self.train()
File "/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 60, in train
self.run_training_epoch()
File "/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 99, in run_training_epoch
output = self.run_training_batch(batch, batch_nb)
File "/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py", line 219, in run_training_batch
self.print_nan_gradients()
File "/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/training_tricks_mixin.py", line 16, in print_nan_gradients
if torch.isnan(param.grad.float()).any():
AttributeError: 'NoneType' object has no attribute 'float'
**To Reproduce**
Steps to reproduce the behavior:
If some param object, does not have **.grad**, then that object should not be checked for nans
</issue>
<code>
[start of pytorch_lightning/trainer/training_tricks_mixin.py]
1 import torch
2 import logging
3 from pytorch_lightning.callbacks import GradientAccumulationScheduler
4
5
6 class TrainerTrainingTricksMixin(object):
7
8 def clip_gradients(self):
9 if self.gradient_clip_val > 0:
10 model = self.get_model()
11 torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
12
13 def print_nan_gradients(self):
14 model = self.get_model()
15 for param in model.parameters():
16 if torch.isnan(param.grad.float()).any():
17 logging.info(param, param.grad)
18
19 def configure_accumulated_gradients(self, accumulate_grad_batches):
20 self.accumulate_grad_batches = None
21
22 if isinstance(accumulate_grad_batches, dict):
23 self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
24 elif isinstance(accumulate_grad_batches, int):
25 schedule = {1: accumulate_grad_batches}
26 self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
27 else:
28 raise TypeError("Gradient accumulation supports only int and dict types")
29
[end of pytorch_lightning/trainer/training_tricks_mixin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/trainer/training_tricks_mixin.py b/pytorch_lightning/trainer/training_tricks_mixin.py
--- a/pytorch_lightning/trainer/training_tricks_mixin.py
+++ b/pytorch_lightning/trainer/training_tricks_mixin.py
@@ -13,7 +13,7 @@
def print_nan_gradients(self):
model = self.get_model()
for param in model.parameters():
- if torch.isnan(param.grad.float()).any():
+ if (param.grad is not None) and torch.isnan(param.grad.float()).any():
logging.info(param, param.grad)
def configure_accumulated_gradients(self, accumulate_grad_batches):
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/training_tricks_mixin.py b/pytorch_lightning/trainer/training_tricks_mixin.py\n--- a/pytorch_lightning/trainer/training_tricks_mixin.py\n+++ b/pytorch_lightning/trainer/training_tricks_mixin.py\n@@ -13,7 +13,7 @@\n def print_nan_gradients(self):\n model = self.get_model()\n for param in model.parameters():\n- if torch.isnan(param.grad.float()).any():\n+ if (param.grad is not None) and torch.isnan(param.grad.float()).any():\n logging.info(param, param.grad)\n \n def configure_accumulated_gradients(self, accumulate_grad_batches):\n", "issue": "Using print_nan_grads in the Trainer results in an error\n**Describe the bug**\r\nWhen using \r\n```\r\nprint_nan_grads=True\r\n```\r\nin the Trainer, I am getting the error below.\r\n\r\ntrainer.fit(lstm_model)\r\n File \"/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py\", line 364, in fit\r\n self.run_pretrain_routine(model)\r\n File \"/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py\", line 471, in run_pretrain_routine\r\n self.train()\r\n File \"/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py\", line 60, in train\r\n self.run_training_epoch()\r\n File \"/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py\", line 99, in run_training_epoch\r\n output = self.run_training_batch(batch, batch_nb)\r\n File \"/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/train_loop_mixin.py\", line 219, in run_training_batch\r\n self.print_nan_gradients()\r\n File \"/Users/anaconda3/envs/snorkel/lib/python3.6/site-packages/pytorch_lightning/trainer/training_tricks_mixin.py\", line 16, in print_nan_gradients\r\n if torch.isnan(param.grad.float()).any():\r\nAttributeError: 'NoneType' object has no attribute 'float'\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nIf some param object, does not have **.grad**, then that object should not be checked for nans\r\n\n", "before_files": [{"content": "import torch\nimport logging\nfrom pytorch_lightning.callbacks import GradientAccumulationScheduler\n\n\nclass TrainerTrainingTricksMixin(object):\n\n def clip_gradients(self):\n if self.gradient_clip_val > 0:\n model = self.get_model()\n torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)\n\n def print_nan_gradients(self):\n model = self.get_model()\n for param in model.parameters():\n if torch.isnan(param.grad.float()).any():\n logging.info(param, param.grad)\n\n def configure_accumulated_gradients(self, accumulate_grad_batches):\n self.accumulate_grad_batches = None\n\n if isinstance(accumulate_grad_batches, dict):\n self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)\n elif isinstance(accumulate_grad_batches, int):\n schedule = {1: accumulate_grad_batches}\n self.accumulation_scheduler = GradientAccumulationScheduler(schedule)\n else:\n raise TypeError(\"Gradient accumulation supports only int and dict types\")\n", "path": "pytorch_lightning/trainer/training_tricks_mixin.py"}]} | 1,205 | 149 |
gh_patches_debug_33388 | rasdani/github-patches | git_diff | goauthentik__authentik-5153 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Celery CPU usage 100% on new install
**Describe the bug**
I'm using the attached docker-compose and env file (don't worry about the secrets in the env file).
[env.txt](https://github.com/goauthentik/authentik/files/10758594/env.txt)
[docker-compose.yml.txt](https://github.com/goauthentik/authentik/files/10758616/docker-compose.yml.txt)
**To Reproduce**
Run `docker-compose up` with the default configuration. Watch as celery uses 100% of a single CPU core. I've followed the setup guide from scratch twice to make sure I was getting as close as possible to a default install.
**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
django.db.utils.InterfaceError: connection already closed
```
</details>
**Version and Deployment (please complete the following information):**
- authentik version: 2023.2.2
- Deployment: docker compose
</issue>
<code>
[start of authentik/root/celery.py]
1 """authentik core celery"""
2 import os
3 from contextvars import ContextVar
4 from logging.config import dictConfig
5 from typing import Callable
6
7 from celery import Celery
8 from celery.signals import (
9 after_task_publish,
10 setup_logging,
11 task_failure,
12 task_internal_error,
13 task_postrun,
14 task_prerun,
15 worker_ready,
16 )
17 from django.conf import settings
18 from django.db import ProgrammingError
19 from structlog.contextvars import STRUCTLOG_KEY_PREFIX
20 from structlog.stdlib import get_logger
21
22 from authentik.lib.sentry import before_send
23 from authentik.lib.utils.errors import exception_to_string
24
25 # set the default Django settings module for the 'celery' program.
26 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
27
28 LOGGER = get_logger()
29 CELERY_APP = Celery("authentik")
30 CTX_TASK_ID = ContextVar(STRUCTLOG_KEY_PREFIX + "task_id", default=Ellipsis)
31
32
33 @setup_logging.connect
34 def config_loggers(*args, **kwargs):
35 """Apply logging settings from settings.py to celery"""
36 dictConfig(settings.LOGGING)
37
38
39 @after_task_publish.connect
40 def after_task_publish_hook(sender=None, headers=None, body=None, **kwargs):
41 """Log task_id after it was published"""
42 info = headers if "task" in headers else body
43 LOGGER.info("Task published", task_id=info.get("id", ""), task_name=info.get("task", ""))
44
45
46 @task_prerun.connect
47 def task_prerun_hook(task_id: str, task, *args, **kwargs):
48 """Log task_id on worker"""
49 request_id = "task-" + task_id.replace("-", "")
50 CTX_TASK_ID.set(request_id)
51 LOGGER.info("Task started", task_id=task_id, task_name=task.__name__)
52
53
54 @task_postrun.connect
55 def task_postrun_hook(task_id, task, *args, retval=None, state=None, **kwargs):
56 """Log task_id on worker"""
57 CTX_TASK_ID.set(...)
58 LOGGER.info("Task finished", task_id=task_id, task_name=task.__name__, state=state)
59
60
61 @task_failure.connect
62 @task_internal_error.connect
63 def task_error_hook(task_id, exception: Exception, traceback, *args, **kwargs):
64 """Create system event for failed task"""
65 from authentik.events.models import Event, EventAction
66
67 LOGGER.warning("Task failure", exc=exception)
68 CTX_TASK_ID.set(...)
69 if before_send({}, {"exc_info": (None, exception, None)}) is not None:
70 Event.new(EventAction.SYSTEM_EXCEPTION, message=exception_to_string(exception)).save()
71
72
73 def _get_startup_tasks() -> list[Callable]:
74 """Get all tasks to be run on startup"""
75 from authentik.admin.tasks import clear_update_notifications
76 from authentik.outposts.tasks import outpost_connection_discovery, outpost_controller_all
77 from authentik.providers.proxy.tasks import proxy_set_defaults
78
79 return [
80 clear_update_notifications,
81 outpost_connection_discovery,
82 outpost_controller_all,
83 proxy_set_defaults,
84 ]
85
86
87 @worker_ready.connect
88 def worker_ready_hook(*args, **kwargs):
89 """Run certain tasks on worker start"""
90
91 LOGGER.info("Dispatching startup tasks...")
92 for task in _get_startup_tasks():
93 try:
94 task.delay()
95 except ProgrammingError as exc:
96 LOGGER.warning("Startup task failed", task=task, exc=exc)
97 from authentik.blueprints.v1.tasks import start_blueprint_watcher
98
99 start_blueprint_watcher()
100
101
102 # Using a string here means the worker doesn't have to serialize
103 # the configuration object to child processes.
104 # - namespace='CELERY' means all celery-related configuration keys
105 # should have a `CELERY_` prefix.
106 CELERY_APP.config_from_object(settings, namespace="CELERY")
107
108 # Load task modules from all registered Django app configs.
109 CELERY_APP.autodiscover_tasks()
110
[end of authentik/root/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/root/celery.py b/authentik/root/celery.py
--- a/authentik/root/celery.py
+++ b/authentik/root/celery.py
@@ -2,9 +2,12 @@
import os
from contextvars import ContextVar
from logging.config import dictConfig
+from pathlib import Path
+from tempfile import gettempdir
from typing import Callable
-from celery import Celery
+from celery import Celery, bootsteps
+from celery.apps.worker import Worker
from celery.signals import (
after_task_publish,
setup_logging,
@@ -28,6 +31,7 @@
LOGGER = get_logger()
CELERY_APP = Celery("authentik")
CTX_TASK_ID = ContextVar(STRUCTLOG_KEY_PREFIX + "task_id", default=Ellipsis)
+HEARTBEAT_FILE = Path(gettempdir() + "/authentik-worker")
@setup_logging.connect
@@ -99,6 +103,33 @@
start_blueprint_watcher()
+class LivenessProbe(bootsteps.StartStopStep):
+ """Add a timed task to touch a temporary file for healthchecking reasons"""
+
+ requires = {"celery.worker.components:Timer"}
+
+ def __init__(self, parent, **kwargs):
+ super().__init__(parent, **kwargs)
+ self.requests = []
+ self.tref = None
+
+ def start(self, parent: Worker):
+ self.tref = parent.timer.call_repeatedly(
+ 10.0,
+ self.update_heartbeat_file,
+ (parent,),
+ priority=10,
+ )
+ self.update_heartbeat_file(parent)
+
+ def stop(self, parent: Worker):
+ HEARTBEAT_FILE.unlink(missing_ok=True)
+
+ def update_heartbeat_file(self, worker: Worker):
+ """Touch heartbeat file"""
+ HEARTBEAT_FILE.touch()
+
+
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
@@ -107,3 +138,4 @@
# Load task modules from all registered Django app configs.
CELERY_APP.autodiscover_tasks()
+CELERY_APP.steps["worker"].add(LivenessProbe)
| {"golden_diff": "diff --git a/authentik/root/celery.py b/authentik/root/celery.py\n--- a/authentik/root/celery.py\n+++ b/authentik/root/celery.py\n@@ -2,9 +2,12 @@\n import os\n from contextvars import ContextVar\n from logging.config import dictConfig\n+from pathlib import Path\n+from tempfile import gettempdir\n from typing import Callable\n \n-from celery import Celery\n+from celery import Celery, bootsteps\n+from celery.apps.worker import Worker\n from celery.signals import (\n after_task_publish,\n setup_logging,\n@@ -28,6 +31,7 @@\n LOGGER = get_logger()\n CELERY_APP = Celery(\"authentik\")\n CTX_TASK_ID = ContextVar(STRUCTLOG_KEY_PREFIX + \"task_id\", default=Ellipsis)\n+HEARTBEAT_FILE = Path(gettempdir() + \"/authentik-worker\")\n \n \n @setup_logging.connect\n@@ -99,6 +103,33 @@\n start_blueprint_watcher()\n \n \n+class LivenessProbe(bootsteps.StartStopStep):\n+ \"\"\"Add a timed task to touch a temporary file for healthchecking reasons\"\"\"\n+\n+ requires = {\"celery.worker.components:Timer\"}\n+\n+ def __init__(self, parent, **kwargs):\n+ super().__init__(parent, **kwargs)\n+ self.requests = []\n+ self.tref = None\n+\n+ def start(self, parent: Worker):\n+ self.tref = parent.timer.call_repeatedly(\n+ 10.0,\n+ self.update_heartbeat_file,\n+ (parent,),\n+ priority=10,\n+ )\n+ self.update_heartbeat_file(parent)\n+\n+ def stop(self, parent: Worker):\n+ HEARTBEAT_FILE.unlink(missing_ok=True)\n+\n+ def update_heartbeat_file(self, worker: Worker):\n+ \"\"\"Touch heartbeat file\"\"\"\n+ HEARTBEAT_FILE.touch()\n+\n+\n # Using a string here means the worker doesn't have to serialize\n # the configuration object to child processes.\n # - namespace='CELERY' means all celery-related configuration keys\n@@ -107,3 +138,4 @@\n \n # Load task modules from all registered Django app configs.\n CELERY_APP.autodiscover_tasks()\n+CELERY_APP.steps[\"worker\"].add(LivenessProbe)\n", "issue": "Celery CPU usage 100% on new install\n**Describe the bug**\r\n\r\nI'm using the attached docker-compose and env file (don't worry about the secrets in the env file).\r\n\r\n[env.txt](https://github.com/goauthentik/authentik/files/10758594/env.txt)\r\n[docker-compose.yml.txt](https://github.com/goauthentik/authentik/files/10758616/docker-compose.yml.txt)\r\n\r\n\r\n**To Reproduce**\r\n\r\nRun `docker-compose up` with the default configuration. Watch as celery uses 100% of a single CPU core. I've followed the setup guide from scratch twice to make sure I was getting as close as possible to a default install.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\ndjango.db.utils.InterfaceError: connection already closed\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: 2023.2.2\r\n- Deployment: docker compose\r\n\r\n\r\n \n", "before_files": [{"content": "\"\"\"authentik core celery\"\"\"\nimport os\nfrom contextvars import ContextVar\nfrom logging.config import dictConfig\nfrom typing import Callable\n\nfrom celery import Celery\nfrom celery.signals import (\n after_task_publish,\n setup_logging,\n task_failure,\n task_internal_error,\n task_postrun,\n task_prerun,\n worker_ready,\n)\nfrom django.conf import settings\nfrom django.db import ProgrammingError\nfrom structlog.contextvars import STRUCTLOG_KEY_PREFIX\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.sentry import before_send\nfrom authentik.lib.utils.errors import exception_to_string\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n\nLOGGER = get_logger()\nCELERY_APP = Celery(\"authentik\")\nCTX_TASK_ID = ContextVar(STRUCTLOG_KEY_PREFIX + \"task_id\", default=Ellipsis)\n\n\n@setup_logging.connect\ndef config_loggers(*args, **kwargs):\n \"\"\"Apply logging settings from settings.py to celery\"\"\"\n dictConfig(settings.LOGGING)\n\n\n@after_task_publish.connect\ndef after_task_publish_hook(sender=None, headers=None, body=None, **kwargs):\n \"\"\"Log task_id after it was published\"\"\"\n info = headers if \"task\" in headers else body\n LOGGER.info(\"Task published\", task_id=info.get(\"id\", \"\"), task_name=info.get(\"task\", \"\"))\n\n\n@task_prerun.connect\ndef task_prerun_hook(task_id: str, task, *args, **kwargs):\n \"\"\"Log task_id on worker\"\"\"\n request_id = \"task-\" + task_id.replace(\"-\", \"\")\n CTX_TASK_ID.set(request_id)\n LOGGER.info(\"Task started\", task_id=task_id, task_name=task.__name__)\n\n\n@task_postrun.connect\ndef task_postrun_hook(task_id, task, *args, retval=None, state=None, **kwargs):\n \"\"\"Log task_id on worker\"\"\"\n CTX_TASK_ID.set(...)\n LOGGER.info(\"Task finished\", task_id=task_id, task_name=task.__name__, state=state)\n\n\n@task_failure.connect\n@task_internal_error.connect\ndef task_error_hook(task_id, exception: Exception, traceback, *args, **kwargs):\n \"\"\"Create system event for failed task\"\"\"\n from authentik.events.models import Event, EventAction\n\n LOGGER.warning(\"Task failure\", exc=exception)\n CTX_TASK_ID.set(...)\n if before_send({}, {\"exc_info\": (None, exception, None)}) is not None:\n Event.new(EventAction.SYSTEM_EXCEPTION, message=exception_to_string(exception)).save()\n\n\ndef _get_startup_tasks() -> list[Callable]:\n \"\"\"Get all tasks to be run on startup\"\"\"\n from authentik.admin.tasks import clear_update_notifications\n from authentik.outposts.tasks import outpost_connection_discovery, outpost_controller_all\n from authentik.providers.proxy.tasks import proxy_set_defaults\n\n return [\n clear_update_notifications,\n outpost_connection_discovery,\n outpost_controller_all,\n proxy_set_defaults,\n ]\n\n\n@worker_ready.connect\ndef worker_ready_hook(*args, **kwargs):\n \"\"\"Run certain tasks on worker start\"\"\"\n\n LOGGER.info(\"Dispatching startup tasks...\")\n for task in _get_startup_tasks():\n try:\n task.delay()\n except ProgrammingError as exc:\n LOGGER.warning(\"Startup task failed\", task=task, exc=exc)\n from authentik.blueprints.v1.tasks import start_blueprint_watcher\n\n start_blueprint_watcher()\n\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\nCELERY_APP.config_from_object(settings, namespace=\"CELERY\")\n\n# Load task modules from all registered Django app configs.\nCELERY_APP.autodiscover_tasks()\n", "path": "authentik/root/celery.py"}]} | 1,848 | 511 |
gh_patches_debug_17928 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-1109 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error installing via pip
Greetings.
recently I tried to install datumaro via pip on Windows 10.
I tried to do it on two devices and in both cases an error occured during installation due to encoding issues.
` ...File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\encodings\cp1251.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
UnicodeDecodeError: 'charmap' codec can't decode byte 0x98 in position 3870: character maps to <undefined>
[end of output]`
I tried to change encoding settings in PyCharm but it brought no results. I also tried to install different versions of datumaro (default, headless, versions), updated pip and used pip3. Python version is 3.11 hovewer I doubt it is involved.
Obviously it is Windows encoding issue, but I failed to find any related issues concerning the problem. Had someone seen the same error?
</issue>
<code>
[start of setup.py]
1 # Copyright (C) 2019-2022 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 # ruff: noqa: E501
6
7 import os
8 import os.path as osp
9 import re
10 from distutils.util import strtobool
11
12 import setuptools
13 from pybind11.setup_helpers import Pybind11Extension, build_ext
14
15
16 def find_version(project_dir=None):
17 if not project_dir:
18 project_dir = osp.dirname(osp.abspath(__file__))
19
20 file_path = osp.join(project_dir, "datumaro", "version.py")
21
22 with open(file_path, "r") as version_file:
23 version_text = version_file.read()
24
25 # PEP440:
26 # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
27 pep_regex = r"([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?"
28 version_regex = r"__version__\s*=\s*.(" + pep_regex + ")."
29 match = re.match(version_regex, version_text)
30 if not match:
31 raise RuntimeError("Failed to find version string in '%s'" % file_path)
32
33 version = version_text[match.start(1) : match.end(1)]
34 return version
35
36
37 CORE_REQUIREMENTS_FILE = "requirements-core.txt"
38 DEFAULT_REQUIREMENTS_FILE = "requirements-default.txt"
39
40
41 def parse_requirements(filename=CORE_REQUIREMENTS_FILE):
42 with open(filename) as fh:
43 return fh.readlines()
44
45
46 CORE_REQUIREMENTS = parse_requirements(CORE_REQUIREMENTS_FILE)
47 if strtobool(os.getenv("DATUMARO_HEADLESS", "0").lower()):
48 CORE_REQUIREMENTS.append("opencv-python-headless")
49 else:
50 CORE_REQUIREMENTS.append("opencv-python")
51
52 DEFAULT_REQUIREMENTS = parse_requirements(DEFAULT_REQUIREMENTS_FILE)
53
54 with open("README.md", "r") as fh:
55 long_description = fh.read()
56
57 ext_modules = [
58 Pybind11Extension(
59 "datumaro._capi",
60 ["src/datumaro/capi/pybind.cpp"],
61 define_macros=[("VERSION_INFO", find_version("./src"))],
62 extra_compile_args=["-O3"],
63 ),
64 ]
65
66 setuptools.setup(
67 name="datumaro",
68 version=find_version("./src"),
69 author="Intel",
70 author_email="[email protected]",
71 description="Dataset Management Framework (Datumaro)",
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 url="https://github.com/openvinotoolkit/datumaro",
75 package_dir={"": "src"},
76 packages=setuptools.find_packages(where="src", include=["datumaro*"]),
77 classifiers=[
78 "Programming Language :: Python :: 3",
79 "License :: OSI Approved :: MIT License",
80 "Operating System :: OS Independent",
81 ],
82 python_requires=">=3.8",
83 install_requires=CORE_REQUIREMENTS,
84 extras_require={
85 "tf": ["tensorflow"],
86 "tfds": ["tensorflow-datasets"],
87 "tf-gpu": ["tensorflow-gpu"],
88 "default": DEFAULT_REQUIREMENTS,
89 },
90 ext_modules=ext_modules,
91 entry_points={
92 "console_scripts": [
93 "datum=datumaro.cli.__main__:main",
94 ],
95 },
96 cmdclass={"build_ext": build_ext},
97 package_data={
98 "datumaro.plugins.synthetic_data": ["background_colors.txt"],
99 "datumaro.plugins.openvino_plugin.samples": ["coco.class", "imagenet.class"],
100 },
101 include_package_data=True,
102 )
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
file_path = osp.join(project_dir, "datumaro", "version.py")
- with open(file_path, "r") as version_file:
+ with open(file_path, "r", encoding="utf-8") as version_file:
version_text = version_file.read()
# PEP440:
@@ -39,7 +39,7 @@
def parse_requirements(filename=CORE_REQUIREMENTS_FILE):
- with open(filename) as fh:
+ with open(filename, "r", encoding="utf-8") as fh:
return fh.readlines()
@@ -51,7 +51,7 @@
DEFAULT_REQUIREMENTS = parse_requirements(DEFAULT_REQUIREMENTS_FILE)
-with open("README.md", "r") as fh:
+with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
ext_modules = [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n \n file_path = osp.join(project_dir, \"datumaro\", \"version.py\")\n \n- with open(file_path, \"r\") as version_file:\n+ with open(file_path, \"r\", encoding=\"utf-8\") as version_file:\n version_text = version_file.read()\n \n # PEP440:\n@@ -39,7 +39,7 @@\n \n \n def parse_requirements(filename=CORE_REQUIREMENTS_FILE):\n- with open(filename) as fh:\n+ with open(filename, \"r\", encoding=\"utf-8\") as fh:\n return fh.readlines()\n \n \n@@ -51,7 +51,7 @@\n \n DEFAULT_REQUIREMENTS = parse_requirements(DEFAULT_REQUIREMENTS_FILE)\n \n-with open(\"README.md\", \"r\") as fh:\n+with open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n \n ext_modules = [\n", "issue": "Error installing via pip\nGreetings.\r\nrecently I tried to install datumaro via pip on Windows 10. \r\nI tried to do it on two devices and in both cases an error occured during installation due to encoding issues.\r\n\r\n` ...File \"C:\\Users\\User\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\encodings\\cp1251.py\", line 23, in decode\r\n return codecs.charmap_decode(input,self.errors,decoding_table)[0]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n UnicodeDecodeError: 'charmap' codec can't decode byte 0x98 in position 3870: character maps to <undefined>\r\n [end of output]`\r\n\r\nI tried to change encoding settings in PyCharm but it brought no results. I also tried to install different versions of datumaro (default, headless, versions), updated pip and used pip3. Python version is 3.11 hovewer I doubt it is involved.\r\nObviously it is Windows encoding issue, but I failed to find any related issues concerning the problem. Had someone seen the same error? \r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (C) 2019-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\n# ruff: noqa: E501\n\nimport os\nimport os.path as osp\nimport re\nfrom distutils.util import strtobool\n\nimport setuptools\nfrom pybind11.setup_helpers import Pybind11Extension, build_ext\n\n\ndef find_version(project_dir=None):\n if not project_dir:\n project_dir = osp.dirname(osp.abspath(__file__))\n\n file_path = osp.join(project_dir, \"datumaro\", \"version.py\")\n\n with open(file_path, \"r\") as version_file:\n version_text = version_file.read()\n\n # PEP440:\n # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions\n pep_regex = r\"([1-9]\\d*!)?(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*((a|b|rc)(0|[1-9]\\d*))?(\\.post(0|[1-9]\\d*))?(\\.dev(0|[1-9]\\d*))?\"\n version_regex = r\"__version__\\s*=\\s*.(\" + pep_regex + \").\"\n match = re.match(version_regex, version_text)\n if not match:\n raise RuntimeError(\"Failed to find version string in '%s'\" % file_path)\n\n version = version_text[match.start(1) : match.end(1)]\n return version\n\n\nCORE_REQUIREMENTS_FILE = \"requirements-core.txt\"\nDEFAULT_REQUIREMENTS_FILE = \"requirements-default.txt\"\n\n\ndef parse_requirements(filename=CORE_REQUIREMENTS_FILE):\n with open(filename) as fh:\n return fh.readlines()\n\n\nCORE_REQUIREMENTS = parse_requirements(CORE_REQUIREMENTS_FILE)\nif strtobool(os.getenv(\"DATUMARO_HEADLESS\", \"0\").lower()):\n CORE_REQUIREMENTS.append(\"opencv-python-headless\")\nelse:\n CORE_REQUIREMENTS.append(\"opencv-python\")\n\nDEFAULT_REQUIREMENTS = parse_requirements(DEFAULT_REQUIREMENTS_FILE)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\next_modules = [\n Pybind11Extension(\n \"datumaro._capi\",\n [\"src/datumaro/capi/pybind.cpp\"],\n define_macros=[(\"VERSION_INFO\", find_version(\"./src\"))],\n extra_compile_args=[\"-O3\"],\n ),\n]\n\nsetuptools.setup(\n name=\"datumaro\",\n version=find_version(\"./src\"),\n author=\"Intel\",\n author_email=\"[email protected]\",\n description=\"Dataset Management Framework (Datumaro)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/openvinotoolkit/datumaro\",\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\", include=[\"datumaro*\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.8\",\n install_requires=CORE_REQUIREMENTS,\n extras_require={\n \"tf\": [\"tensorflow\"],\n \"tfds\": [\"tensorflow-datasets\"],\n \"tf-gpu\": [\"tensorflow-gpu\"],\n \"default\": DEFAULT_REQUIREMENTS,\n },\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"datum=datumaro.cli.__main__:main\",\n ],\n },\n cmdclass={\"build_ext\": build_ext},\n package_data={\n \"datumaro.plugins.synthetic_data\": [\"background_colors.txt\"],\n \"datumaro.plugins.openvino_plugin.samples\": [\"coco.class\", \"imagenet.class\"],\n },\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 1,801 | 227 |
gh_patches_debug_25967 | rasdani/github-patches | git_diff | google__turbinia-524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Turbinia's setup.py seems to be broken with the latest version of pip
Turbinia can't be isntalled with the latest version of `pip`. (20.1)
```
Collecting pip
Downloading pip-20.1-py2.py3-none-any.whl (1.5 MB)
Installing collected packages: pip
Attempting uninstall: pip
Found existing installation: pip 20.0.2
Uninstalling pip-20.0.2:
Successfully uninstalled pip-20.0.2
Successfully installed pip-20.1
[snip]
Collecting turbinia==20190819.6
Downloading turbinia-20190819.6.tar.gz (88 kB)
ERROR: Command errored out with exit status 1:
command: /opt/hostedtoolcache/Python/3.6.10/x64/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-jz1lyg2d/turbinia/setup.py'"'"'; __file__='"'"'/tmp/pip-install-jz1lyg2d/turbinia/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base /tmp/pip-pip-egg-info-rm1k5ext
cwd: /tmp/pip-install-jz1lyg2d/turbinia/
Complete output (7 lines):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-jz1lyg2d/turbinia/setup.py", line 65, in <module>
'requirements.txt', session=False)
File "/tmp/pip-install-jz1lyg2d/turbinia/setup.py", line 64, in <listcomp>
install_requires=[str(req.req) for req in parse_requirements(
AttributeError: 'ParsedRequirement' object has no attribute 'req'
```
Works fine on pip 19.1.1.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2017 Google Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 """This is the setup file for the project."""
18
19 # yapf: disable
20
21 from __future__ import unicode_literals
22
23 import sys
24
25 from setuptools import find_packages
26 from setuptools import setup
27
28 try: # for pip >= 10
29 from pip._internal.req import parse_requirements
30 except ImportError: # for pip <= 9.0.3
31 from pip.req import parse_requirements
32
33
34 # make sure turbinia is in path
35 sys.path.insert(0, '.')
36
37 import turbinia # pylint: disable=wrong-import-position
38
39 turbinia_description = (
40 'Turbinia is an open-source framework for deploying, managing, and running'
41 'forensic workloads on cloud platforms. It is intended to automate running '
42 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '
43 'help with processing evidence in the Cloud, scaling the processing of '
44 'large amounts of evidence, and decreasing response time by parallelizing'
45 'processing where possible.')
46
47 setup(
48 name='turbinia',
49 version=turbinia.__version__,
50 description='Automation and Scaling of Digital Forensics Tools',
51 long_description=turbinia_description,
52 license='Apache License, Version 2.0',
53 url='http://turbinia.plumbing/',
54 maintainer='Turbinia development team',
55 maintainer_email='[email protected]',
56 classifiers=[
57 'Development Status :: 4 - Beta',
58 'Environment :: Console',
59 'Operating System :: OS Independent',
60 'Programming Language :: Python',
61 ],
62 packages=find_packages(),
63 include_package_data=True,
64 zip_safe=False,
65 entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},
66 install_requires=[str(req.req) for req in parse_requirements(
67 'requirements.txt', session=False)
68 ],
69 extras_require={
70 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],
71 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],
72 'worker': ['plaso>=20171118', 'pyhindsight>=2.2.0']
73 }
74 )
75
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,11 +25,6 @@
from setuptools import find_packages
from setuptools import setup
-try: # for pip >= 10
- from pip._internal.req import parse_requirements
-except ImportError: # for pip <= 9.0.3
- from pip.req import parse_requirements
-
# make sure turbinia is in path
sys.path.insert(0, '.')
@@ -44,6 +39,9 @@
'large amounts of evidence, and decreasing response time by parallelizing'
'processing where possible.')
+requirements = []
+with open('requirements.txt','r') as f:
+ requirements = f.read().splitlines()
setup(
name='turbinia',
version=turbinia.__version__,
@@ -63,9 +61,7 @@
include_package_data=True,
zip_safe=False,
entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},
- install_requires=[str(req.req) for req in parse_requirements(
- 'requirements.txt', session=False)
- ],
+ install_requires=requirements,
extras_require={
'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],
'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,11 +25,6 @@\n from setuptools import find_packages\n from setuptools import setup\n \n-try: # for pip >= 10\n- from pip._internal.req import parse_requirements\n-except ImportError: # for pip <= 9.0.3\n- from pip.req import parse_requirements\n-\n \n # make sure turbinia is in path\n sys.path.insert(0, '.')\n@@ -44,6 +39,9 @@\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n \n+requirements = []\n+with open('requirements.txt','r') as f:\n+ requirements = f.read().splitlines()\n setup(\n name='turbinia',\n version=turbinia.__version__,\n@@ -63,9 +61,7 @@\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n- install_requires=[str(req.req) for req in parse_requirements(\n- 'requirements.txt', session=False)\n- ],\n+ install_requires=requirements,\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n", "issue": "Turbinia's setup.py seems to be broken with the latest version of pip\nTurbinia can't be isntalled with the latest version of `pip`. (20.1)\r\n\r\n```\r\nCollecting pip\r\n Downloading pip-20.1-py2.py3-none-any.whl (1.5 MB)\r\nInstalling collected packages: pip\r\n Attempting uninstall: pip\r\n Found existing installation: pip 20.0.2\r\n Uninstalling pip-20.0.2:\r\n Successfully uninstalled pip-20.0.2\r\nSuccessfully installed pip-20.1\r\n[snip]\r\nCollecting turbinia==20190819.6\r\n Downloading turbinia-20190819.6.tar.gz (88 kB)\r\n ERROR: Command errored out with exit status 1:\r\n command: /opt/hostedtoolcache/Python/3.6.10/x64/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-jz1lyg2d/turbinia/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-jz1lyg2d/turbinia/setup.py'\"'\"';f=getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__);code=f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' egg_info --egg-base /tmp/pip-pip-egg-info-rm1k5ext\r\n cwd: /tmp/pip-install-jz1lyg2d/turbinia/\r\n Complete output (7 lines):\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-install-jz1lyg2d/turbinia/setup.py\", line 65, in <module>\r\n 'requirements.txt', session=False)\r\n File \"/tmp/pip-install-jz1lyg2d/turbinia/setup.py\", line 64, in <listcomp>\r\n install_requires=[str(req.req) for req in parse_requirements(\r\n AttributeError: 'ParsedRequirement' object has no attribute 'req'\r\n```\r\n\r\nWorks fine on pip 19.1.1.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project.\"\"\"\n\n# yapf: disable\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\ntry: # for pip >= 10\n from pip._internal.req import parse_requirements\nexcept ImportError: # for pip <= 9.0.3\n from pip.req import parse_requirements\n\n\n# make sure turbinia is in path\nsys.path.insert(0, '.')\n\nimport turbinia # pylint: disable=wrong-import-position\n\nturbinia_description = (\n 'Turbinia is an open-source framework for deploying, managing, and running'\n 'forensic workloads on cloud platforms. It is intended to automate running '\n 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '\n 'help with processing evidence in the Cloud, scaling the processing of '\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n\nsetup(\n name='turbinia',\n version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n url='http://turbinia.plumbing/',\n maintainer='Turbinia development team',\n maintainer_email='[email protected]',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n install_requires=[str(req.req) for req in parse_requirements(\n 'requirements.txt', session=False)\n ],\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['plaso>=20171118', 'pyhindsight>=2.2.0']\n }\n)\n", "path": "setup.py"}]} | 1,834 | 332 |
gh_patches_debug_64426 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable search-memory-packet back only on broken GDB version
Tl;dr: Use the workaround from https://github.com/pwndbg/pwndbg/pull/322/files only for broken gdb versions
Disable search-memory-packet back only on broken GDB version
Tl;dr: Use the workaround from https://github.com/pwndbg/pwndbg/pull/322/files only for broken gdb versions
</issue>
<code>
[start of pwndbg/__init__.py]
1 import signal
2
3 import gdb
4
5 import pwndbg.color
6 import pwndbg.commands
7 import pwndbg.gdblib
8 from pwndbg.commands import load_commands
9 from pwndbg.gdblib import load_gdblib
10
11 load_commands()
12 load_gdblib()
13
14 # TODO: Convert these to gdblib modules and remove this
15 try:
16 import pwndbg.disasm
17 import pwndbg.disasm.arm
18 import pwndbg.disasm.jump
19 import pwndbg.disasm.mips
20 import pwndbg.disasm.ppc
21 import pwndbg.disasm.sparc
22 import pwndbg.disasm.x86
23 import pwndbg.heap
24 except ModuleNotFoundError:
25 pass
26
27 import pwndbg.exception
28 import pwndbg.lib.version
29 import pwndbg.ui
30
31 __version__ = pwndbg.lib.version.__version__
32 version = __version__
33
34 from pwndbg.gdblib import prompt
35
36 prompt.set_prompt()
37
38 pre_commands = """
39 set confirm off
40 set verbose off
41 set pagination off
42 set height 0
43 set history save on
44 set follow-fork-mode child
45 set backtrace past-main on
46 set step-mode on
47 set print pretty on
48 set width %i
49 handle SIGALRM nostop print nopass
50 handle SIGBUS stop print nopass
51 handle SIGPIPE nostop print nopass
52 handle SIGSEGV stop print nopass
53 """.strip() % (
54 pwndbg.ui.get_window_size()[1]
55 )
56
57 for line in pre_commands.strip().splitlines():
58 gdb.execute(line)
59
60 # This may throw an exception, see pwndbg/pwndbg#27
61 try:
62 gdb.execute("set disassembly-flavor intel")
63 except gdb.error:
64 pass
65
66 # handle resize event to align width and completion
67 signal.signal(
68 signal.SIGWINCH,
69 lambda signum, frame: gdb.execute("set width %i" % pwndbg.ui.get_window_size()[1]),
70 )
71
72 # Reading Comment file
73 from pwndbg.commands import comments
74
75 comments.init()
76
77 from pwndbg.gdblib import config_mod
78
79 config_mod.init_params()
80
[end of pwndbg/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py
--- a/pwndbg/__init__.py
+++ b/pwndbg/__init__.py
@@ -54,6 +54,10 @@
pwndbg.ui.get_window_size()[1]
)
+# See https://github.com/pwndbg/pwndbg/issues/808
+if int(getattr(gdb, "VERSION", "0.0").split(".")[0]) <= 9:
+ pre_commands += "\nset remote search-memory-packet off"
+
for line in pre_commands.strip().splitlines():
gdb.execute(line)
| {"golden_diff": "diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py\n--- a/pwndbg/__init__.py\n+++ b/pwndbg/__init__.py\n@@ -54,6 +54,10 @@\n pwndbg.ui.get_window_size()[1]\n )\n \n+# See https://github.com/pwndbg/pwndbg/issues/808\n+if int(getattr(gdb, \"VERSION\", \"0.0\").split(\".\")[0]) <= 9:\n+ pre_commands += \"\\nset remote search-memory-packet off\"\n+\n for line in pre_commands.strip().splitlines():\n gdb.execute(line)\n", "issue": "Disable search-memory-packet back only on broken GDB version\nTl;dr: Use the workaround from https://github.com/pwndbg/pwndbg/pull/322/files only for broken gdb versions\nDisable search-memory-packet back only on broken GDB version\nTl;dr: Use the workaround from https://github.com/pwndbg/pwndbg/pull/322/files only for broken gdb versions\n", "before_files": [{"content": "import signal\n\nimport gdb\n\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.gdblib\nfrom pwndbg.commands import load_commands\nfrom pwndbg.gdblib import load_gdblib\n\nload_commands()\nload_gdblib()\n\n# TODO: Convert these to gdblib modules and remove this\ntry:\n import pwndbg.disasm\n import pwndbg.disasm.arm\n import pwndbg.disasm.jump\n import pwndbg.disasm.mips\n import pwndbg.disasm.ppc\n import pwndbg.disasm.sparc\n import pwndbg.disasm.x86\n import pwndbg.heap\nexcept ModuleNotFoundError:\n pass\n\nimport pwndbg.exception\nimport pwndbg.lib.version\nimport pwndbg.ui\n\n__version__ = pwndbg.lib.version.__version__\nversion = __version__\n\nfrom pwndbg.gdblib import prompt\n\nprompt.set_prompt()\n\npre_commands = \"\"\"\nset confirm off\nset verbose off\nset pagination off\nset height 0\nset history save on\nset follow-fork-mode child\nset backtrace past-main on\nset step-mode on\nset print pretty on\nset width %i\nhandle SIGALRM nostop print nopass\nhandle SIGBUS stop print nopass\nhandle SIGPIPE nostop print nopass\nhandle SIGSEGV stop print nopass\n\"\"\".strip() % (\n pwndbg.ui.get_window_size()[1]\n)\n\nfor line in pre_commands.strip().splitlines():\n gdb.execute(line)\n\n# This may throw an exception, see pwndbg/pwndbg#27\ntry:\n gdb.execute(\"set disassembly-flavor intel\")\nexcept gdb.error:\n pass\n\n# handle resize event to align width and completion\nsignal.signal(\n signal.SIGWINCH,\n lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]),\n)\n\n# Reading Comment file\nfrom pwndbg.commands import comments\n\ncomments.init()\n\nfrom pwndbg.gdblib import config_mod\n\nconfig_mod.init_params()\n", "path": "pwndbg/__init__.py"}]} | 1,242 | 141 |
gh_patches_debug_8240 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Metadata should have language / language_version fields
Standardizing metadata between the various agents - put a fixed `python` string in `language`, and the version (`3.4.1` or whatever) in `language_version`
Keep the existing fields for now. Need to ensure that the CoreAgent handles them nicely when missing.
</issue>
<code>
[start of src/scout_apm/core/metadata.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import logging
6 import sys
7 from os import getpid
8
9 from scout_apm.core.commands import ApplicationEvent
10 from scout_apm.core.config import scout_config
11 from scout_apm.core.socket import CoreAgentSocket
12
13 logger = logging.getLogger(__name__)
14
15
16 class AppMetadata(object):
17 @classmethod
18 def report(cls):
19 event = ApplicationEvent(
20 event_type="scout.metadata",
21 event_value=cls.data(),
22 source="Pid: " + str(getpid()),
23 timestamp=dt.datetime.utcnow(),
24 )
25 CoreAgentSocket.instance().send(event)
26
27 @classmethod
28 def data(cls):
29 try:
30 data = {
31 "language": "python",
32 "version": "{}.{}.{}".format(*sys.version_info[:3]),
33 "server_time": dt.datetime.utcnow().isoformat() + "Z",
34 "framework": scout_config.value("framework"),
35 "framework_version": scout_config.value("framework_version"),
36 "environment": "",
37 "app_server": scout_config.value("app_server"),
38 "hostname": scout_config.value("hostname"),
39 "database_engine": "", # Detected
40 "database_adapter": "", # Raw
41 "application_name": "", # Environment.application_name,
42 "libraries": cls.get_python_packages_versions(),
43 "paas": "",
44 "application_root": scout_config.value("application_root"),
45 "scm_subdirectory": scout_config.value("scm_subdirectory"),
46 "git_sha": scout_config.value("revision_sha"),
47 }
48 except Exception as e:
49 logger.debug("Exception in AppMetadata: %r", e)
50 data = {}
51
52 return data
53
54 @classmethod
55 def get_python_packages_versions(cls):
56 try:
57 import pkg_resources
58 except ImportError:
59 return []
60
61 return list(
62 sorted(
63 (distribution.project_name, distribution.version)
64 for distribution in pkg_resources.working_set
65 )
66 )
67
[end of src/scout_apm/core/metadata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/core/metadata.py b/src/scout_apm/core/metadata.py
--- a/src/scout_apm/core/metadata.py
+++ b/src/scout_apm/core/metadata.py
@@ -29,6 +29,8 @@
try:
data = {
"language": "python",
+ "language_version": "{}.{}.{}".format(*sys.version_info[:3]),
+ # Deprecated: (see #327)
"version": "{}.{}.{}".format(*sys.version_info[:3]),
"server_time": dt.datetime.utcnow().isoformat() + "Z",
"framework": scout_config.value("framework"),
| {"golden_diff": "diff --git a/src/scout_apm/core/metadata.py b/src/scout_apm/core/metadata.py\n--- a/src/scout_apm/core/metadata.py\n+++ b/src/scout_apm/core/metadata.py\n@@ -29,6 +29,8 @@\n try:\n data = {\n \"language\": \"python\",\n+ \"language_version\": \"{}.{}.{}\".format(*sys.version_info[:3]),\n+ # Deprecated: (see #327)\n \"version\": \"{}.{}.{}\".format(*sys.version_info[:3]),\n \"server_time\": dt.datetime.utcnow().isoformat() + \"Z\",\n \"framework\": scout_config.value(\"framework\"),\n", "issue": "Metadata should have language / language_version fields\nStandardizing metadata between the various agents - put a fixed `python` string in `language`, and the version (`3.4.1` or whatever) in `language_version`\r\n\r\nKeep the existing fields for now. Need to ensure that the CoreAgent handles them nicely when missing.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\nimport sys\nfrom os import getpid\n\nfrom scout_apm.core.commands import ApplicationEvent\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.socket import CoreAgentSocket\n\nlogger = logging.getLogger(__name__)\n\n\nclass AppMetadata(object):\n @classmethod\n def report(cls):\n event = ApplicationEvent(\n event_type=\"scout.metadata\",\n event_value=cls.data(),\n source=\"Pid: \" + str(getpid()),\n timestamp=dt.datetime.utcnow(),\n )\n CoreAgentSocket.instance().send(event)\n\n @classmethod\n def data(cls):\n try:\n data = {\n \"language\": \"python\",\n \"version\": \"{}.{}.{}\".format(*sys.version_info[:3]),\n \"server_time\": dt.datetime.utcnow().isoformat() + \"Z\",\n \"framework\": scout_config.value(\"framework\"),\n \"framework_version\": scout_config.value(\"framework_version\"),\n \"environment\": \"\",\n \"app_server\": scout_config.value(\"app_server\"),\n \"hostname\": scout_config.value(\"hostname\"),\n \"database_engine\": \"\", # Detected\n \"database_adapter\": \"\", # Raw\n \"application_name\": \"\", # Environment.application_name,\n \"libraries\": cls.get_python_packages_versions(),\n \"paas\": \"\",\n \"application_root\": scout_config.value(\"application_root\"),\n \"scm_subdirectory\": scout_config.value(\"scm_subdirectory\"),\n \"git_sha\": scout_config.value(\"revision_sha\"),\n }\n except Exception as e:\n logger.debug(\"Exception in AppMetadata: %r\", e)\n data = {}\n\n return data\n\n @classmethod\n def get_python_packages_versions(cls):\n try:\n import pkg_resources\n except ImportError:\n return []\n\n return list(\n sorted(\n (distribution.project_name, distribution.version)\n for distribution in pkg_resources.working_set\n )\n )\n", "path": "src/scout_apm/core/metadata.py"}]} | 1,165 | 147 |
gh_patches_debug_13083 | rasdani/github-patches | git_diff | NVIDIA__NeMo-5260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix links to speaker identification notebook
# What does this PR do ?
Fixes #5258
**Collection**: [Note which collection this PR will affect]
# Changelog
- Add specific line by line info of high level changes in this PR.
# Usage
* You can potentially add a usage example below
```python
# Add a code snippet demonstrating how to use this
```
# Before your PR is "Ready for review"
**Pre checks**:
- [ ] Make sure you read and followed [Contributor guidelines](https://github.com/NVIDIA/NeMo/blob/main/CONTRIBUTING.md)
- [ ] Did you write any new necessary tests?
- [ ] Did you add or update any necessary documentation?
- [ ] Does the PR affect components that are optional to install? (Ex: Numba, Pynini, Apex etc)
- [ ] Reviewer: Does the PR have correct import guards for all optional libraries?
**PR Type**:
- [ ] New Feature
- [ ] Bugfix
- [ ] Documentation
If you haven't finished some of the above items you can still open "Draft" PR.
## Who can review?
Anyone in the community is free to review the PR once the checks have passed.
[Contributor guidelines](https://github.com/NVIDIA/NeMo/blob/main/CONTRIBUTING.md) contains specific people who can review PRs to various areas.
# Additional Information
* Related to # (issue)
</issue>
<code>
[start of examples/speaker_tasks/recognition/speaker_reco.py]
1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 import pytorch_lightning as pl
18 import torch
19 from omegaconf import OmegaConf
20 from pytorch_lightning import seed_everything
21
22 from nemo.collections.asr.models import EncDecSpeakerLabelModel
23 from nemo.core.config import hydra_runner
24 from nemo.utils import logging
25 from nemo.utils.exp_manager import exp_manager
26
27 """
28 Basic run (on GPU for 10 epochs for 2 class training):
29 EXP_NAME=sample_run
30 python ./speaker_reco.py --config-path='conf' --config-name='SpeakerNet_recognition_3x2x512.yaml' \
31 trainer.max_epochs=10 \
32 model.train_ds.batch_size=64 model.validation_ds.batch_size=64 \
33 model.train_ds.manifest_filepath="<train_manifest>" model.validation_ds.manifest_filepath="<dev_manifest>" \
34 model.test_ds.manifest_filepath="<test_manifest>" \
35 trainer.devices=1 \
36 model.decoder.params.num_classes=2 \
37 exp_manager.name=$EXP_NAME +exp_manager.use_datetime_version=False \
38 exp_manager.exp_dir='./speaker_exps'
39
40 See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb for notebook tutorial
41
42 Optional: Use tarred dataset to speech up data loading.
43 Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset.
44 Note that it's possible that tarred datasets impacts validation scores because it drop values in order to have same amount of files per tarfile;
45 Scores might be off since some data is missing.
46
47 Use the `convert_to_tarred_audio_dataset.py` script under <NEMO_ROOT>/speech_recognition/scripts in order to prepare tarred audio dataset.
48 For details, please see TarredAudioToClassificationLabelDataset in <NEMO_ROOT>/nemo/collections/asr/data/audio_to_label.py
49 """
50
51 seed_everything(42)
52
53
54 @hydra_runner(config_path="conf", config_name="SpeakerNet_verification_3x2x256.yaml")
55 def main(cfg):
56
57 logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
58 trainer = pl.Trainer(**cfg.trainer)
59 log_dir = exp_manager(trainer, cfg.get("exp_manager", None))
60 speaker_model = EncDecSpeakerLabelModel(cfg=cfg.model, trainer=trainer)
61 trainer.fit(speaker_model)
62 if not trainer.fast_dev_run:
63 model_path = os.path.join(log_dir, '..', 'spkr.nemo')
64 speaker_model.save_to(model_path)
65
66 torch.distributed.destroy_process_group()
67 if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
68 if trainer.is_global_zero:
69 trainer = pl.Trainer(devices=1, accelerator=cfg.trainer.accelerator, strategy=cfg.trainer.strategy)
70 if speaker_model.prepare_test(trainer):
71 trainer.test(speaker_model)
72
73
74 if __name__ == '__main__':
75 main()
76
[end of examples/speaker_tasks/recognition/speaker_reco.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/speaker_tasks/recognition/speaker_reco.py b/examples/speaker_tasks/recognition/speaker_reco.py
--- a/examples/speaker_tasks/recognition/speaker_reco.py
+++ b/examples/speaker_tasks/recognition/speaker_reco.py
@@ -37,7 +37,7 @@
exp_manager.name=$EXP_NAME +exp_manager.use_datetime_version=False \
exp_manager.exp_dir='./speaker_exps'
-See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb for notebook tutorial
+See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb for notebook tutorial
Optional: Use tarred dataset to speech up data loading.
Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset.
| {"golden_diff": "diff --git a/examples/speaker_tasks/recognition/speaker_reco.py b/examples/speaker_tasks/recognition/speaker_reco.py\n--- a/examples/speaker_tasks/recognition/speaker_reco.py\n+++ b/examples/speaker_tasks/recognition/speaker_reco.py\n@@ -37,7 +37,7 @@\n exp_manager.name=$EXP_NAME +exp_manager.use_datetime_version=False \\\n exp_manager.exp_dir='./speaker_exps'\n \n-See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb for notebook tutorial\n+See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb for notebook tutorial\n \n Optional: Use tarred dataset to speech up data loading.\n Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset.\n", "issue": "Fix links to speaker identification notebook\n# What does this PR do ?\r\n\r\nFixes #5258\r\n\r\n**Collection**: [Note which collection this PR will affect]\r\n\r\n# Changelog \r\n- Add specific line by line info of high level changes in this PR.\r\n\r\n# Usage\r\n* You can potentially add a usage example below\r\n\r\n```python\r\n# Add a code snippet demonstrating how to use this \r\n```\r\n\r\n# Before your PR is \"Ready for review\"\r\n**Pre checks**:\r\n- [ ] Make sure you read and followed [Contributor guidelines](https://github.com/NVIDIA/NeMo/blob/main/CONTRIBUTING.md)\r\n- [ ] Did you write any new necessary tests?\r\n- [ ] Did you add or update any necessary documentation?\r\n- [ ] Does the PR affect components that are optional to install? (Ex: Numba, Pynini, Apex etc)\r\n - [ ] Reviewer: Does the PR have correct import guards for all optional libraries?\r\n \r\n**PR Type**:\r\n- [ ] New Feature\r\n- [ ] Bugfix\r\n- [ ] Documentation\r\n\r\nIf you haven't finished some of the above items you can still open \"Draft\" PR.\r\n\r\n\r\n## Who can review?\r\n\r\nAnyone in the community is free to review the PR once the checks have passed. \r\n[Contributor guidelines](https://github.com/NVIDIA/NeMo/blob/main/CONTRIBUTING.md) contains specific people who can review PRs to various areas.\r\n\r\n# Additional Information\r\n* Related to # (issue)\r\n\n", "before_files": [{"content": "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport pytorch_lightning as pl\nimport torch\nfrom omegaconf import OmegaConf\nfrom pytorch_lightning import seed_everything\n\nfrom nemo.collections.asr.models import EncDecSpeakerLabelModel\nfrom nemo.core.config import hydra_runner\nfrom nemo.utils import logging\nfrom nemo.utils.exp_manager import exp_manager\n\n\"\"\"\nBasic run (on GPU for 10 epochs for 2 class training):\nEXP_NAME=sample_run\npython ./speaker_reco.py --config-path='conf' --config-name='SpeakerNet_recognition_3x2x512.yaml' \\\n trainer.max_epochs=10 \\\n model.train_ds.batch_size=64 model.validation_ds.batch_size=64 \\\n model.train_ds.manifest_filepath=\"<train_manifest>\" model.validation_ds.manifest_filepath=\"<dev_manifest>\" \\\n model.test_ds.manifest_filepath=\"<test_manifest>\" \\\n trainer.devices=1 \\\n model.decoder.params.num_classes=2 \\\n exp_manager.name=$EXP_NAME +exp_manager.use_datetime_version=False \\\n exp_manager.exp_dir='./speaker_exps'\n\nSee https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb for notebook tutorial\n\nOptional: Use tarred dataset to speech up data loading.\n Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset.\n Note that it's possible that tarred datasets impacts validation scores because it drop values in order to have same amount of files per tarfile; \n Scores might be off since some data is missing. \n \n Use the `convert_to_tarred_audio_dataset.py` script under <NEMO_ROOT>/speech_recognition/scripts in order to prepare tarred audio dataset.\n For details, please see TarredAudioToClassificationLabelDataset in <NEMO_ROOT>/nemo/collections/asr/data/audio_to_label.py\n\"\"\"\n\nseed_everything(42)\n\n\n@hydra_runner(config_path=\"conf\", config_name=\"SpeakerNet_verification_3x2x256.yaml\")\ndef main(cfg):\n\n logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')\n trainer = pl.Trainer(**cfg.trainer)\n log_dir = exp_manager(trainer, cfg.get(\"exp_manager\", None))\n speaker_model = EncDecSpeakerLabelModel(cfg=cfg.model, trainer=trainer)\n trainer.fit(speaker_model)\n if not trainer.fast_dev_run:\n model_path = os.path.join(log_dir, '..', 'spkr.nemo')\n speaker_model.save_to(model_path)\n\n torch.distributed.destroy_process_group()\n if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:\n if trainer.is_global_zero:\n trainer = pl.Trainer(devices=1, accelerator=cfg.trainer.accelerator, strategy=cfg.trainer.strategy)\n if speaker_model.prepare_test(trainer):\n trainer.test(speaker_model)\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/speaker_tasks/recognition/speaker_reco.py"}]} | 1,788 | 196 |
gh_patches_debug_31535 | rasdani/github-patches | git_diff | liqd__adhocracy4-1155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No Validation or Error when Phases in Dashboard are set in Illogical Order
URL: https://meinberlin-dev.liqd.net/projekte/module/burgerinnenhaushalt-2-phasen/?mode=list
user: Project initiator
expected behaviour: If I have more than one phase in a module, I would expect them to only be able to be set to occur in 1st proposal, 2nd rating, and 3rd voting time slots. If I make a mistake, I hope I am alerted and cannot publish this.
behaviour: Both 3-phase and 2-phase modules can be published with phases that make no sense in their time line. Users can set up and publish modules with voting phases that occur before rating and proposal phases. There is no validation or error.
important screensize: any
device & browser: any
Comment/Question: moved from mB: https://github.com/liqd/a4-meinberlin/issues/4029
Screenshot?
2-Phase module published with reverse phases:
<img width="600" alt="Bildschirmfoto 2021-12-08 um 12 41 47" src="https://user-images.githubusercontent.com/35491681/145202707-b0d39c50-e5a1-476b-9afa-542cc9a85687.png">
</issue>
<code>
[start of adhocracy4/phases/forms.py]
1 from django.forms.models import BaseInlineFormSet
2 from django.utils.translation import gettext_lazy as _
3
4
5 class PhaseInlineFormSet(BaseInlineFormSet):
6 def clean(self):
7 """
8 Make sure phases of the same module don't overlap.
9 """
10 super().clean()
11 phase_dates = []
12 for form in self.forms:
13 if 'start_date' in form.cleaned_data \
14 and 'end_date' in form.cleaned_data \
15 and form.cleaned_data['start_date'] is not None \
16 and form.cleaned_data['end_date'] is not None:
17 start_date = form.cleaned_data['start_date']
18 end_date = form.cleaned_data['end_date']
19 if phase_dates:
20 for phase_date in phase_dates:
21 if (start_date < phase_date[1]
22 and phase_date[0] < end_date):
23 msg = _('Phases cannot run at the same time '
24 'and must follow after each other.')
25 form.add_error('end_date', msg)
26 if start_date and end_date:
27 phase_dates.append((start_date, end_date))
28
[end of adhocracy4/phases/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/adhocracy4/phases/forms.py b/adhocracy4/phases/forms.py
--- a/adhocracy4/phases/forms.py
+++ b/adhocracy4/phases/forms.py
@@ -4,9 +4,7 @@
class PhaseInlineFormSet(BaseInlineFormSet):
def clean(self):
- """
- Make sure phases of the same module don't overlap.
- """
+ """Make sure phases of the same module don't overlap."""
super().clean()
phase_dates = []
for form in self.forms:
@@ -16,6 +14,7 @@
and form.cleaned_data['end_date'] is not None:
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
+ weight = form.instance.weight
if phase_dates:
for phase_date in phase_dates:
if (start_date < phase_date[1]
@@ -23,5 +22,12 @@
msg = _('Phases cannot run at the same time '
'and must follow after each other.')
form.add_error('end_date', msg)
+ if ((start_date < phase_date[0]
+ and weight > phase_date[2])
+ or (start_date > phase_date[0]
+ and weight < phase_date[2])):
+ msg = _('Phases need to be in same order '
+ 'as in form.')
+ form.add_error('start_date', msg)
if start_date and end_date:
- phase_dates.append((start_date, end_date))
+ phase_dates.append((start_date, end_date, weight))
| {"golden_diff": "diff --git a/adhocracy4/phases/forms.py b/adhocracy4/phases/forms.py\n--- a/adhocracy4/phases/forms.py\n+++ b/adhocracy4/phases/forms.py\n@@ -4,9 +4,7 @@\n \n class PhaseInlineFormSet(BaseInlineFormSet):\n def clean(self):\n- \"\"\"\n- Make sure phases of the same module don't overlap.\n- \"\"\"\n+ \"\"\"Make sure phases of the same module don't overlap.\"\"\"\n super().clean()\n phase_dates = []\n for form in self.forms:\n@@ -16,6 +14,7 @@\n and form.cleaned_data['end_date'] is not None:\n start_date = form.cleaned_data['start_date']\n end_date = form.cleaned_data['end_date']\n+ weight = form.instance.weight\n if phase_dates:\n for phase_date in phase_dates:\n if (start_date < phase_date[1]\n@@ -23,5 +22,12 @@\n msg = _('Phases cannot run at the same time '\n 'and must follow after each other.')\n form.add_error('end_date', msg)\n+ if ((start_date < phase_date[0]\n+ and weight > phase_date[2])\n+ or (start_date > phase_date[0]\n+ and weight < phase_date[2])):\n+ msg = _('Phases need to be in same order '\n+ 'as in form.')\n+ form.add_error('start_date', msg)\n if start_date and end_date:\n- phase_dates.append((start_date, end_date))\n+ phase_dates.append((start_date, end_date, weight))\n", "issue": "No Validation or Error when Phases in Dashboard are set in Illogical Order\nURL: https://meinberlin-dev.liqd.net/projekte/module/burgerinnenhaushalt-2-phasen/?mode=list\r\nuser: Project initiator\r\nexpected behaviour: If I have more than one phase in a module, I would expect them to only be able to be set to occur in 1st proposal, 2nd rating, and 3rd voting time slots. If I make a mistake, I hope I am alerted and cannot publish this.\r\nbehaviour: Both 3-phase and 2-phase modules can be published with phases that make no sense in their time line. Users can set up and publish modules with voting phases that occur before rating and proposal phases. There is no validation or error.\r\nimportant screensize: any\r\ndevice & browser: any\r\nComment/Question: moved from mB: https://github.com/liqd/a4-meinberlin/issues/4029\r\n\r\nScreenshot?\r\n2-Phase module published with reverse phases:\r\n<img width=\"600\" alt=\"Bildschirmfoto 2021-12-08 um 12 41 47\" src=\"https://user-images.githubusercontent.com/35491681/145202707-b0d39c50-e5a1-476b-9afa-542cc9a85687.png\">\r\n\r\n\n", "before_files": [{"content": "from django.forms.models import BaseInlineFormSet\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass PhaseInlineFormSet(BaseInlineFormSet):\n def clean(self):\n \"\"\"\n Make sure phases of the same module don't overlap.\n \"\"\"\n super().clean()\n phase_dates = []\n for form in self.forms:\n if 'start_date' in form.cleaned_data \\\n and 'end_date' in form.cleaned_data \\\n and form.cleaned_data['start_date'] is not None \\\n and form.cleaned_data['end_date'] is not None:\n start_date = form.cleaned_data['start_date']\n end_date = form.cleaned_data['end_date']\n if phase_dates:\n for phase_date in phase_dates:\n if (start_date < phase_date[1]\n and phase_date[0] < end_date):\n msg = _('Phases cannot run at the same time '\n 'and must follow after each other.')\n form.add_error('end_date', msg)\n if start_date and end_date:\n phase_dates.append((start_date, end_date))\n", "path": "adhocracy4/phases/forms.py"}]} | 1,129 | 355 |
gh_patches_debug_2292 | rasdani/github-patches | git_diff | ipython__ipython-10213 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove usage of backports.shutil_get_terminal_size
This is for pre-3.3 Python.
Pretty easy it should only require deleting lines.
Maybe a few need to be dedented.
</issue>
<code>
[start of IPython/utils/terminal.py]
1 # encoding: utf-8
2 """
3 Utilities for working with terminals.
4
5 Authors:
6
7 * Brian E. Granger
8 * Fernando Perez
9 * Alexander Belchenko (e-mail: bialix AT ukr.net)
10 """
11
12 # Copyright (c) IPython Development Team.
13 # Distributed under the terms of the Modified BSD License.
14
15 import os
16 import sys
17 import warnings
18 try:
19 from shutil import get_terminal_size as _get_terminal_size
20 except ImportError:
21 # use backport on Python 2
22 from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size
23
24 from . import py3compat
25
26 #-----------------------------------------------------------------------------
27 # Code
28 #-----------------------------------------------------------------------------
29
30 # This variable is part of the expected API of the module:
31 ignore_termtitle = True
32
33
34
35 if os.name == 'posix':
36 def _term_clear():
37 os.system('clear')
38 elif sys.platform == 'win32':
39 def _term_clear():
40 os.system('cls')
41 else:
42 def _term_clear():
43 pass
44
45
46
47 def toggle_set_term_title(val):
48 """Control whether set_term_title is active or not.
49
50 set_term_title() allows writing to the console titlebar. In embedded
51 widgets this can cause problems, so this call can be used to toggle it on
52 or off as needed.
53
54 The default state of the module is for the function to be disabled.
55
56 Parameters
57 ----------
58 val : bool
59 If True, set_term_title() actually writes to the terminal (using the
60 appropriate platform-specific module). If False, it is a no-op.
61 """
62 global ignore_termtitle
63 ignore_termtitle = not(val)
64
65
66 def _set_term_title(*args,**kw):
67 """Dummy no-op."""
68 pass
69
70
71 def _set_term_title_xterm(title):
72 """ Change virtual terminal title in xterm-workalikes """
73 sys.stdout.write('\033]0;%s\007' % title)
74
75 if os.name == 'posix':
76 TERM = os.environ.get('TERM','')
77 if TERM.startswith('xterm'):
78 _set_term_title = _set_term_title_xterm
79 elif sys.platform == 'win32':
80 try:
81 import ctypes
82
83 SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
84 SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
85
86 def _set_term_title(title):
87 """Set terminal title using ctypes to access the Win32 APIs."""
88 SetConsoleTitleW(title)
89 except ImportError:
90 def _set_term_title(title):
91 """Set terminal title using the 'title' command."""
92 global ignore_termtitle
93
94 try:
95 # Cannot be on network share when issuing system commands
96 curr = os.getcwd()
97 os.chdir("C:")
98 ret = os.system("title " + title)
99 finally:
100 os.chdir(curr)
101 if ret:
102 # non-zero return code signals error, don't try again
103 ignore_termtitle = True
104
105
106 def set_term_title(title):
107 """Set terminal title using the necessary platform-dependent calls."""
108 if ignore_termtitle:
109 return
110 _set_term_title(title)
111
112
113 def freeze_term_title():
114 warnings.warn("This function is deprecated, use toggle_set_term_title()")
115 global ignore_termtitle
116 ignore_termtitle = True
117
118
119 def get_terminal_size(defaultx=80, defaulty=25):
120 return _get_terminal_size((defaultx, defaulty))
121
[end of IPython/utils/terminal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/utils/terminal.py b/IPython/utils/terminal.py
--- a/IPython/utils/terminal.py
+++ b/IPython/utils/terminal.py
@@ -15,11 +15,7 @@
import os
import sys
import warnings
-try:
- from shutil import get_terminal_size as _get_terminal_size
-except ImportError:
- # use backport on Python 2
- from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size
+from shutil import get_terminal_size as _get_terminal_size
from . import py3compat
| {"golden_diff": "diff --git a/IPython/utils/terminal.py b/IPython/utils/terminal.py\n--- a/IPython/utils/terminal.py\n+++ b/IPython/utils/terminal.py\n@@ -15,11 +15,7 @@\n import os\n import sys\n import warnings\n-try:\n- from shutil import get_terminal_size as _get_terminal_size\n-except ImportError:\n- # use backport on Python 2\n- from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size\n+from shutil import get_terminal_size as _get_terminal_size\n \n from . import py3compat\n", "issue": "remove usage of backports.shutil_get_terminal_size\nThis is for pre-3.3 Python.\r\n\r\nPretty easy it should only require deleting lines. \r\nMaybe a few need to be dedented.\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nUtilities for working with terminals.\n\nAuthors:\n\n* Brian E. Granger\n* Fernando Perez\n* Alexander Belchenko (e-mail: bialix AT ukr.net)\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nimport sys\nimport warnings\ntry:\n from shutil import get_terminal_size as _get_terminal_size\nexcept ImportError:\n # use backport on Python 2\n from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size\n\nfrom . import py3compat\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n# This variable is part of the expected API of the module:\nignore_termtitle = True\n\n\n\nif os.name == 'posix':\n def _term_clear():\n os.system('clear')\nelif sys.platform == 'win32':\n def _term_clear():\n os.system('cls')\nelse:\n def _term_clear():\n pass\n\n\n\ndef toggle_set_term_title(val):\n \"\"\"Control whether set_term_title is active or not.\n\n set_term_title() allows writing to the console titlebar. In embedded\n widgets this can cause problems, so this call can be used to toggle it on\n or off as needed.\n\n The default state of the module is for the function to be disabled.\n\n Parameters\n ----------\n val : bool\n If True, set_term_title() actually writes to the terminal (using the\n appropriate platform-specific module). If False, it is a no-op.\n \"\"\"\n global ignore_termtitle\n ignore_termtitle = not(val)\n\n\ndef _set_term_title(*args,**kw):\n \"\"\"Dummy no-op.\"\"\"\n pass\n\n\ndef _set_term_title_xterm(title):\n \"\"\" Change virtual terminal title in xterm-workalikes \"\"\"\n sys.stdout.write('\\033]0;%s\\007' % title)\n\nif os.name == 'posix':\n TERM = os.environ.get('TERM','')\n if TERM.startswith('xterm'):\n _set_term_title = _set_term_title_xterm\nelif sys.platform == 'win32':\n try:\n import ctypes\n\n SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW\n SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]\n \n def _set_term_title(title):\n \"\"\"Set terminal title using ctypes to access the Win32 APIs.\"\"\"\n SetConsoleTitleW(title)\n except ImportError:\n def _set_term_title(title):\n \"\"\"Set terminal title using the 'title' command.\"\"\"\n global ignore_termtitle\n\n try:\n # Cannot be on network share when issuing system commands\n curr = os.getcwd()\n os.chdir(\"C:\")\n ret = os.system(\"title \" + title)\n finally:\n os.chdir(curr)\n if ret:\n # non-zero return code signals error, don't try again\n ignore_termtitle = True\n\n\ndef set_term_title(title):\n \"\"\"Set terminal title using the necessary platform-dependent calls.\"\"\"\n if ignore_termtitle:\n return\n _set_term_title(title)\n\n\ndef freeze_term_title():\n warnings.warn(\"This function is deprecated, use toggle_set_term_title()\")\n global ignore_termtitle\n ignore_termtitle = True\n\n\ndef get_terminal_size(defaultx=80, defaulty=25):\n return _get_terminal_size((defaultx, defaulty))\n", "path": "IPython/utils/terminal.py"}]} | 1,580 | 131 |
gh_patches_debug_879 | rasdani/github-patches | git_diff | getpelican__pelican-1507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
abbr support doesn't work for multiline
Eg:
``` rst
this is an :abbr:`TLA (Three Letter
Abbreviation)`
```
will output
`<abbr>TLA (Three Letter Abbreviation)</abbr>`
instead of
`<abbr title="Three Letter Abbreviation">TLA</abbr>`
I believe this could be fixed by adding the `re.M` flag to the `re.compile` call on this line: https://github.com/getpelican/pelican/blob/636fd6cc380f2537924532a587c70e96a386e25c/pelican/rstdirectives.py#L101
This refs ticket #395
</issue>
<code>
[start of pelican/rstdirectives.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals, print_function
3
4 from docutils import nodes, utils
5 from docutils.parsers.rst import directives, roles, Directive
6 from pygments.formatters import HtmlFormatter
7 from pygments import highlight
8 from pygments.lexers import get_lexer_by_name, TextLexer
9 import re
10 import six
11 import pelican.settings as pys
12
13
14 class Pygments(Directive):
15 """ Source code syntax highlighting.
16 """
17 required_arguments = 1
18 optional_arguments = 0
19 final_argument_whitespace = True
20 option_spec = {
21 'anchorlinenos': directives.flag,
22 'classprefix': directives.unchanged,
23 'hl_lines': directives.unchanged,
24 'lineanchors': directives.unchanged,
25 'linenos': directives.unchanged,
26 'linenospecial': directives.nonnegative_int,
27 'linenostart': directives.nonnegative_int,
28 'linenostep': directives.nonnegative_int,
29 'lineseparator': directives.unchanged,
30 'linespans': directives.unchanged,
31 'nobackground': directives.flag,
32 'nowrap': directives.flag,
33 'tagsfile': directives.unchanged,
34 'tagurlformat': directives.unchanged,
35 }
36 has_content = True
37
38 def run(self):
39 self.assert_has_content()
40 try:
41 lexer = get_lexer_by_name(self.arguments[0])
42 except ValueError:
43 # no lexer found - use the text one instead of an exception
44 lexer = TextLexer()
45
46 # Fetch the defaults
47 if pys.PYGMENTS_RST_OPTIONS is not None:
48 for k, v in six.iteritems(pys.PYGMENTS_RST_OPTIONS):
49 # Locally set options overrides the defaults
50 if k not in self.options:
51 self.options[k] = v
52
53 if ('linenos' in self.options and
54 self.options['linenos'] not in ('table', 'inline')):
55 if self.options['linenos'] == 'none':
56 self.options.pop('linenos')
57 else:
58 self.options['linenos'] = 'table'
59
60 for flag in ('nowrap', 'nobackground', 'anchorlinenos'):
61 if flag in self.options:
62 self.options[flag] = True
63
64 # noclasses should already default to False, but just in case...
65 formatter = HtmlFormatter(noclasses=False, **self.options)
66 parsed = highlight('\n'.join(self.content), lexer, formatter)
67 return [nodes.raw('', parsed, format='html')]
68
69 directives.register_directive('code-block', Pygments)
70 directives.register_directive('sourcecode', Pygments)
71
72
73 _abbr_re = re.compile('\((.*)\)$')
74
75
76 class abbreviation(nodes.Inline, nodes.TextElement):
77 pass
78
79
80 def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
81 text = utils.unescape(text)
82 m = _abbr_re.search(text)
83 if m is None:
84 return [abbreviation(text, text)], []
85 abbr = text[:m.start()].strip()
86 expl = m.group(1)
87 return [abbreviation(abbr, abbr, explanation=expl)], []
88
89 roles.register_local_role('abbr', abbr_role)
90
[end of pelican/rstdirectives.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py
--- a/pelican/rstdirectives.py
+++ b/pelican/rstdirectives.py
@@ -70,7 +70,7 @@
directives.register_directive('sourcecode', Pygments)
-_abbr_re = re.compile('\((.*)\)$')
+_abbr_re = re.compile('\((.*)\)$', re.DOTALL)
class abbreviation(nodes.Inline, nodes.TextElement):
| {"golden_diff": "diff --git a/pelican/rstdirectives.py b/pelican/rstdirectives.py\n--- a/pelican/rstdirectives.py\n+++ b/pelican/rstdirectives.py\n@@ -70,7 +70,7 @@\n directives.register_directive('sourcecode', Pygments)\n \n \n-_abbr_re = re.compile('\\((.*)\\)$')\n+_abbr_re = re.compile('\\((.*)\\)$', re.DOTALL)\n \n \n class abbreviation(nodes.Inline, nodes.TextElement):\n", "issue": "abbr support doesn't work for multiline\nEg:\n\n``` rst\nthis is an :abbr:`TLA (Three Letter\nAbbreviation)`\n```\n\nwill output\n`<abbr>TLA (Three Letter Abbreviation)</abbr>`\n\ninstead of\n`<abbr title=\"Three Letter Abbreviation\">TLA</abbr>`\n\nI believe this could be fixed by adding the `re.M` flag to the `re.compile` call on this line: https://github.com/getpelican/pelican/blob/636fd6cc380f2537924532a587c70e96a386e25c/pelican/rstdirectives.py#L101\n\nThis refs ticket #395 \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nfrom docutils import nodes, utils\nfrom docutils.parsers.rst import directives, roles, Directive\nfrom pygments.formatters import HtmlFormatter\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name, TextLexer\nimport re\nimport six\nimport pelican.settings as pys\n\n\nclass Pygments(Directive):\n \"\"\" Source code syntax highlighting.\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {\n 'anchorlinenos': directives.flag,\n 'classprefix': directives.unchanged,\n 'hl_lines': directives.unchanged,\n 'lineanchors': directives.unchanged,\n 'linenos': directives.unchanged,\n 'linenospecial': directives.nonnegative_int,\n 'linenostart': directives.nonnegative_int,\n 'linenostep': directives.nonnegative_int,\n 'lineseparator': directives.unchanged,\n 'linespans': directives.unchanged,\n 'nobackground': directives.flag,\n 'nowrap': directives.flag,\n 'tagsfile': directives.unchanged,\n 'tagurlformat': directives.unchanged,\n }\n has_content = True\n\n def run(self):\n self.assert_has_content()\n try:\n lexer = get_lexer_by_name(self.arguments[0])\n except ValueError:\n # no lexer found - use the text one instead of an exception\n lexer = TextLexer()\n\n # Fetch the defaults\n if pys.PYGMENTS_RST_OPTIONS is not None:\n for k, v in six.iteritems(pys.PYGMENTS_RST_OPTIONS):\n # Locally set options overrides the defaults\n if k not in self.options:\n self.options[k] = v\n\n if ('linenos' in self.options and\n self.options['linenos'] not in ('table', 'inline')):\n if self.options['linenos'] == 'none':\n self.options.pop('linenos')\n else:\n self.options['linenos'] = 'table'\n\n for flag in ('nowrap', 'nobackground', 'anchorlinenos'):\n if flag in self.options:\n self.options[flag] = True\n\n # noclasses should already default to False, but just in case...\n formatter = HtmlFormatter(noclasses=False, **self.options)\n parsed = highlight('\\n'.join(self.content), lexer, formatter)\n return [nodes.raw('', parsed, format='html')]\n\ndirectives.register_directive('code-block', Pygments)\ndirectives.register_directive('sourcecode', Pygments)\n\n\n_abbr_re = re.compile('\\((.*)\\)$')\n\n\nclass abbreviation(nodes.Inline, nodes.TextElement):\n pass\n\n\ndef abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):\n text = utils.unescape(text)\n m = _abbr_re.search(text)\n if m is None:\n return [abbreviation(text, text)], []\n abbr = text[:m.start()].strip()\n expl = m.group(1)\n return [abbreviation(abbr, abbr, explanation=expl)], []\n\nroles.register_local_role('abbr', abbr_role)\n", "path": "pelican/rstdirectives.py"}]} | 1,579 | 109 |
gh_patches_debug_602 | rasdani/github-patches | git_diff | pex-tool__pex-1844 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.97
On the docket:
+ [x] Avoid ENOEXEC for Pex internal --venvs. #1843
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.96"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.96"
+__version__ = "2.1.97"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.96\"\n+__version__ = \"2.1.97\"\n", "issue": "Release 2.1.97\nOn the docket:\r\n+ [x] Avoid ENOEXEC for Pex internal --venvs. #1843\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.96\"\n", "path": "pex/version.py"}]} | 620 | 97 |
gh_patches_debug_1026 | rasdani/github-patches | git_diff | pytorch__ignite-1365 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MyPy: improve ignite.base module
## 🚀 Feature
Currently, mypy ignores all errors for all modules. We have to rework our typing such that mypy checks the code.
In this issue, let's improve https://github.com/pytorch/ignite/tree/master/ignite/base module such that mypy passes on it.
For Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue.
Please, take a look at CONTRIBUTING guide.
Improve typing for ignite.handlers module (1343)
Fixes #1343
Description:
Improves typing (when possible) for `ignite.handlers` module.
Check list:
* [x] New tests are added (if a new feature is added)
* [ ] New doc strings: description and/or example code are in RST format
* [ ] Documentation is updated (if required)
</issue>
<code>
[start of ignite/base/mixins.py]
1 from collections import OrderedDict
2 from collections.abc import Mapping
3
4
5 class Serializable:
6
7 _state_dict_all_req_keys = ()
8 _state_dict_one_of_opt_keys = ()
9
10 def state_dict(self) -> OrderedDict:
11 pass
12
13 def load_state_dict(self, state_dict: Mapping) -> None:
14 if not isinstance(state_dict, Mapping):
15 raise TypeError("Argument state_dict should be a dictionary, but given {}".format(type(state_dict)))
16
17 for k in self._state_dict_all_req_keys:
18 if k not in state_dict:
19 raise ValueError(
20 "Required state attribute '{}' is absent in provided state_dict '{}'".format(k, state_dict.keys())
21 )
22 opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]
23 if len(opts) > 0 and ((not any(opts)) or (all(opts))):
24 raise ValueError("state_dict should contain only one of '{}' keys".format(self._state_dict_one_of_opt_keys))
25
[end of ignite/base/mixins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/base/mixins.py b/ignite/base/mixins.py
--- a/ignite/base/mixins.py
+++ b/ignite/base/mixins.py
@@ -4,8 +4,8 @@
class Serializable:
- _state_dict_all_req_keys = ()
- _state_dict_one_of_opt_keys = ()
+ _state_dict_all_req_keys = () # type: tuple
+ _state_dict_one_of_opt_keys = () # type: tuple
def state_dict(self) -> OrderedDict:
pass
| {"golden_diff": "diff --git a/ignite/base/mixins.py b/ignite/base/mixins.py\n--- a/ignite/base/mixins.py\n+++ b/ignite/base/mixins.py\n@@ -4,8 +4,8 @@\n \n class Serializable:\n \n- _state_dict_all_req_keys = ()\n- _state_dict_one_of_opt_keys = ()\n+ _state_dict_all_req_keys = () # type: tuple\n+ _state_dict_one_of_opt_keys = () # type: tuple\n \n def state_dict(self) -> OrderedDict:\n pass\n", "issue": "MyPy: improve ignite.base module\n## \ud83d\ude80 Feature\r\n\r\nCurrently, mypy ignores all errors for all modules. We have to rework our typing such that mypy checks the code.\r\nIn this issue, let's improve https://github.com/pytorch/ignite/tree/master/ignite/base module such that mypy passes on it.\r\n\r\nFor Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue.\r\nPlease, take a look at CONTRIBUTING guide.\nImprove typing for ignite.handlers module (1343)\nFixes #1343 \r\n\r\nDescription:\r\n\r\nImproves typing (when possible) for `ignite.handlers` module.\r\n\r\nCheck list:\r\n* [x] New tests are added (if a new feature is added)\r\n* [ ] New doc strings: description and/or example code are in RST format\r\n* [ ] Documentation is updated (if required)\r\n\n", "before_files": [{"content": "from collections import OrderedDict\nfrom collections.abc import Mapping\n\n\nclass Serializable:\n\n _state_dict_all_req_keys = ()\n _state_dict_one_of_opt_keys = ()\n\n def state_dict(self) -> OrderedDict:\n pass\n\n def load_state_dict(self, state_dict: Mapping) -> None:\n if not isinstance(state_dict, Mapping):\n raise TypeError(\"Argument state_dict should be a dictionary, but given {}\".format(type(state_dict)))\n\n for k in self._state_dict_all_req_keys:\n if k not in state_dict:\n raise ValueError(\n \"Required state attribute '{}' is absent in provided state_dict '{}'\".format(k, state_dict.keys())\n )\n opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]\n if len(opts) > 0 and ((not any(opts)) or (all(opts))):\n raise ValueError(\"state_dict should contain only one of '{}' keys\".format(self._state_dict_one_of_opt_keys))\n", "path": "ignite/base/mixins.py"}]} | 976 | 122 |
gh_patches_debug_19753 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-323 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change how works generating data by schema
The current design is bad for following reasons:
`Field` is an object which can represent any method of any provider and it's mean that is data provider which should return data and it's the only thing that we want by this object, but here how it works right now:
```
>>> from mimesis.schema import Field
>>> from mimesis.enums import Gender
_ = Field('en')
>>> _ = Field('en')
>>> app_schema = (
... lambda: {
... "id": _('uuid'),
... "name": _('word'),
... "version": _('version'),
... "owner": {
... "email": _('email'),
... "token": _('token'),
... "creator": _('full_name', gender=Gender.FEMALE),
... },
... }
... )
>>> _.fill(schema=app_schema, iterations=10)
```
It looks really annoying.
It should be done using another way because here every instance of `Field` contains method `fill`, but why? Good question... because this is disgusting API designed by me. And now I see my mistake and suggest that it be corrected.
I suggest this:
```
from mimesis.schema import Field, Schema
_ = Field('en')
app_data = Schema(lambda: {
"id": _('uuid'),
"name": _('word'),
"version": _('version'),
"owner": {
"email": _('email'),
"token": _('token'),
"creator": _('full_name', gender=Gender.FEMALE),
},
}).create(iterations=20)
```
I think that is much better because the code is much more readable and looks more logical.
</issue>
<code>
[start of mimesis/providers/development.py]
1 from mimesis.data import (BACKEND, CONTAINER, FRONTEND, LICENSES, NOSQL, OS,
2 PROGRAMMING_LANGS, SQL)
3 from mimesis.providers.base import BaseProvider
4
5
6 class Development(BaseProvider):
7 """Class for getting fake data for Developers."""
8
9 def software_license(self) -> str:
10 """Get a random software license from list.
11
12 :return: License name.
13
14 :Example:
15 The BSD 3-Clause License.
16 """
17 return self.random.choice(LICENSES)
18
19 def version(self, pre_release: bool = False) -> str:
20 """Generate a random version information.
21
22 :param pre_release: Pre-release.
23 :return: The version of software.
24
25 :Example:
26 0.11.3-alpha.1
27 """
28 major, minor, patch = self.random.randints(3, 0, 10)
29 version = '{}.{}.{}'.format(major, minor, patch)
30
31 if pre_release:
32 suffixes = ('alpha', 'beta', 'rc')
33 suffix = self.random.choice(suffixes)
34 number = self.random.randint(1, 11)
35 return '{}-{}.{}'.format(version, suffix, number)
36
37 return version
38
39 def database(self, nosql: bool = False) -> str:
40 """Get a random database name.
41
42 :param bool nosql: only NoSQL databases.
43 :return: Database name.
44
45 :Example:
46 PostgreSQL.
47 """
48 if nosql:
49 return self.random.choice(NOSQL)
50 return self.random.choice(SQL)
51
52 def container(self) -> str:
53 """Get a random containerization system.
54
55 :return: Containerization system.
56
57 :Example:
58 Docker.
59 """
60 return self.random.choice(CONTAINER)
61
62 def version_control_system(self) -> str:
63 """Get a random version control system.
64
65 :return: Version control system
66
67 :Example:
68 Git
69 """
70 vcs = ('Git', 'Subversion')
71 return self.random.choice(vcs)
72
73 def programming_language(self) -> str:
74 """Get a random programming language from the list.
75
76 :return: Programming language.
77
78 :Example:
79 Erlang.
80 """
81 return self.random.choice(PROGRAMMING_LANGS)
82
83 def backend(self) -> str:
84 """Get a random backend stack.
85
86 :return: Stack.
87
88 :Example:
89 Elixir/Phoenix
90 """
91 return self.random.choice(BACKEND)
92
93 def frontend(self) -> str:
94 """Get a random front-end stack.
95
96 :return: Stack.
97
98 :Example:
99 JS/React.
100 """
101 return self.random.choice(FRONTEND)
102
103 def os(self) -> str:
104 """Get a random operating system or distributive name.
105
106 :return: The name of OS.
107
108 :Example:
109 Gentoo
110 """
111 return self.random.choice(OS)
112
113 def boolean(self) -> bool:
114 """Get a random boolean value.
115
116 :return: True of False.
117 """
118 values = (0, 1)
119 value = self.random.choice(values)
120 return bool(value)
121
[end of mimesis/providers/development.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mimesis/providers/development.py b/mimesis/providers/development.py
--- a/mimesis/providers/development.py
+++ b/mimesis/providers/development.py
@@ -16,16 +16,28 @@
"""
return self.random.choice(LICENSES)
- def version(self, pre_release: bool = False) -> str:
+ def version(self, calver: bool = False, pre_release: bool = False) -> str:
"""Generate a random version information.
+ :param calver: Use calendar versioning.
:param pre_release: Pre-release.
:return: The version of software.
-
+.
:Example:
0.11.3-alpha.1
"""
+ version = '{}.{}.{}'
major, minor, patch = self.random.randints(3, 0, 10)
+
+ if calver:
+ if minor == 0:
+ minor += 1
+
+ if patch == 0:
+ patch += 1
+ major = self.random.randint(2016, 2018)
+ return version.format(major, minor, patch)
+
version = '{}.{}.{}'.format(major, minor, patch)
if pre_release:
| {"golden_diff": "diff --git a/mimesis/providers/development.py b/mimesis/providers/development.py\n--- a/mimesis/providers/development.py\n+++ b/mimesis/providers/development.py\n@@ -16,16 +16,28 @@\n \"\"\"\n return self.random.choice(LICENSES)\n \n- def version(self, pre_release: bool = False) -> str:\n+ def version(self, calver: bool = False, pre_release: bool = False) -> str:\n \"\"\"Generate a random version information.\n \n+ :param calver: Use calendar versioning.\n :param pre_release: Pre-release.\n :return: The version of software.\n-\n+.\n :Example:\n 0.11.3-alpha.1\n \"\"\"\n+ version = '{}.{}.{}'\n major, minor, patch = self.random.randints(3, 0, 10)\n+\n+ if calver:\n+ if minor == 0:\n+ minor += 1\n+\n+ if patch == 0:\n+ patch += 1\n+ major = self.random.randint(2016, 2018)\n+ return version.format(major, minor, patch)\n+\n version = '{}.{}.{}'.format(major, minor, patch)\n \n if pre_release:\n", "issue": "Change how works generating data by schema\nThe current design is bad for following reasons:\r\n\r\n`Field` is an object which can represent any method of any provider and it's mean that is data provider which should return data and it's the only thing that we want by this object, but here how it works right now:\r\n\r\n```\r\n>>> from mimesis.schema import Field\r\n>>> from mimesis.enums import Gender\r\n_ = Field('en')\r\n>>> _ = Field('en')\r\n>>> app_schema = (\r\n... lambda: {\r\n... \"id\": _('uuid'),\r\n... \"name\": _('word'),\r\n... \"version\": _('version'),\r\n... \"owner\": {\r\n... \"email\": _('email'),\r\n... \"token\": _('token'),\r\n... \"creator\": _('full_name', gender=Gender.FEMALE),\r\n... },\r\n... }\r\n... )\r\n>>> _.fill(schema=app_schema, iterations=10)\r\n```\r\n\r\nIt looks really annoying.\r\n\r\nIt should be done using another way because here every instance of `Field` contains method `fill`, but why? Good question... because this is disgusting API designed by me. And now I see my mistake and suggest that it be corrected.\r\n\r\nI suggest this:\r\n\r\n```\r\nfrom mimesis.schema import Field, Schema\r\n_ = Field('en')\r\napp_data = Schema(lambda: {\r\n \"id\": _('uuid'),\r\n \"name\": _('word'),\r\n \"version\": _('version'),\r\n \"owner\": {\r\n \"email\": _('email'),\r\n \"token\": _('token'),\r\n \"creator\": _('full_name', gender=Gender.FEMALE),\r\n },\r\n}).create(iterations=20)\r\n```\r\n\r\nI think that is much better because the code is much more readable and looks more logical.\n", "before_files": [{"content": "from mimesis.data import (BACKEND, CONTAINER, FRONTEND, LICENSES, NOSQL, OS,\n PROGRAMMING_LANGS, SQL)\nfrom mimesis.providers.base import BaseProvider\n\n\nclass Development(BaseProvider):\n \"\"\"Class for getting fake data for Developers.\"\"\"\n\n def software_license(self) -> str:\n \"\"\"Get a random software license from list.\n\n :return: License name.\n\n :Example:\n The BSD 3-Clause License.\n \"\"\"\n return self.random.choice(LICENSES)\n\n def version(self, pre_release: bool = False) -> str:\n \"\"\"Generate a random version information.\n\n :param pre_release: Pre-release.\n :return: The version of software.\n\n :Example:\n 0.11.3-alpha.1\n \"\"\"\n major, minor, patch = self.random.randints(3, 0, 10)\n version = '{}.{}.{}'.format(major, minor, patch)\n\n if pre_release:\n suffixes = ('alpha', 'beta', 'rc')\n suffix = self.random.choice(suffixes)\n number = self.random.randint(1, 11)\n return '{}-{}.{}'.format(version, suffix, number)\n\n return version\n\n def database(self, nosql: bool = False) -> str:\n \"\"\"Get a random database name.\n\n :param bool nosql: only NoSQL databases.\n :return: Database name.\n\n :Example:\n PostgreSQL.\n \"\"\"\n if nosql:\n return self.random.choice(NOSQL)\n return self.random.choice(SQL)\n\n def container(self) -> str:\n \"\"\"Get a random containerization system.\n\n :return: Containerization system.\n\n :Example:\n Docker.\n \"\"\"\n return self.random.choice(CONTAINER)\n\n def version_control_system(self) -> str:\n \"\"\"Get a random version control system.\n\n :return: Version control system\n\n :Example:\n Git\n \"\"\"\n vcs = ('Git', 'Subversion')\n return self.random.choice(vcs)\n\n def programming_language(self) -> str:\n \"\"\"Get a random programming language from the list.\n\n :return: Programming language.\n\n :Example:\n Erlang.\n \"\"\"\n return self.random.choice(PROGRAMMING_LANGS)\n\n def backend(self) -> str:\n \"\"\"Get a random backend stack.\n\n :return: Stack.\n\n :Example:\n Elixir/Phoenix\n \"\"\"\n return self.random.choice(BACKEND)\n\n def frontend(self) -> str:\n \"\"\"Get a random front-end stack.\n\n :return: Stack.\n\n :Example:\n JS/React.\n \"\"\"\n return self.random.choice(FRONTEND)\n\n def os(self) -> str:\n \"\"\"Get a random operating system or distributive name.\n\n :return: The name of OS.\n\n :Example:\n Gentoo\n \"\"\"\n return self.random.choice(OS)\n\n def boolean(self) -> bool:\n \"\"\"Get a random boolean value.\n\n :return: True of False.\n \"\"\"\n values = (0, 1)\n value = self.random.choice(values)\n return bool(value)\n", "path": "mimesis/providers/development.py"}]} | 1,842 | 287 |
gh_patches_debug_60161 | rasdani/github-patches | git_diff | conan-io__conan-center-index-1706 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[odbc] CMake module name
odbc/2.3.7
According to CMake module, it should use capital letters: https://cmake.org/cmake/help/v3.12/module/FindODBC.html
</issue>
<code>
[start of recipes/odbc/all/conanfile.py]
1 import os
2 from conans import ConanFile, AutoToolsBuildEnvironment, tools
3 from conans.errors import ConanInvalidConfiguration
4
5
6 class OdbcConan(ConanFile):
7 name = 'odbc'
8 description = 'Package providing unixODBC'
9 url = 'https://github.com/conan-io/conan-center-index'
10 homepage = "http://www.unixodbc.org"
11 license = ('LGPL-2.1', 'GPL-2.1')
12
13 settings = 'os', 'compiler', 'build_type', 'arch'
14 options = {'shared': [True, False], 'fPIC': [True, False], 'with_libiconv': [True, False]}
15 default_options = {'shared': False, 'fPIC': True, 'with_libiconv': True}
16 topics = ('odbc', 'database', 'dbms', 'data-access')
17
18 _source_subfolder = 'source_subfolder'
19
20 def configure(self):
21 del self.settings.compiler.libcxx # Pure C
22 del self.settings.compiler.cppstd
23 if self.settings.os == "Windows":
24 raise ConanInvalidConfiguration("Windows not supported yet. Please, open an issue if you need such support")
25
26 def requirements(self):
27 if self.options.with_libiconv:
28 self.requires("libiconv/1.16")
29
30 def source(self):
31 tools.get(**self.conan_data["sources"][self.version])
32 extracted_dir = 'unixODBC-%s' % self.version
33 os.rename(extracted_dir, self._source_subfolder)
34
35 def build(self):
36 env_build = AutoToolsBuildEnvironment(self)
37 static_flag = 'no' if self.options.shared else 'yes'
38 shared_flag = 'yes' if self.options.shared else 'no'
39 libiconv_flag = 'yes' if self.options.with_libiconv else 'no'
40 args = ['--enable-static=%s' % static_flag,
41 '--enable-shared=%s' % shared_flag,
42 '--enable-ltdl-install',
43 '--enable-iconv=%s' % libiconv_flag]
44 if self.options.with_libiconv:
45 libiconv_prefix = self.deps_cpp_info["libiconv"].rootpath
46 args.append('--with-libiconv-prefix=%s' % libiconv_prefix)
47
48 env_build.configure(configure_dir=self._source_subfolder, args=args)
49 env_build.make()
50 env_build.install()
51 tools.rmdir(os.path.join(self.package_folder, "share"))
52 tools.rmdir(os.path.join(self.package_folder, "etc"))
53 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
54 os.remove(os.path.join(self.package_folder, "lib", "libodbc.la"))
55 os.remove(os.path.join(self.package_folder, "lib", "libodbccr.la"))
56 os.remove(os.path.join(self.package_folder, "lib", "libodbcinst.la"))
57 os.remove(os.path.join(self.package_folder, "lib", "libltdl.la"))
58
59 def package(self):
60 self.copy('COPYING', src=self._source_subfolder, dst="licenses")
61
62 def package_info(self):
63 self.env_info.path.append(os.path.join(self.package_folder, 'bin'))
64
65 self.cpp_info.libs = ['odbc', 'odbccr', 'odbcinst', 'ltdl']
66 if self.settings.os == 'Linux':
67 self.cpp_info.libs.append('dl')
68
[end of recipes/odbc/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/odbc/all/conanfile.py b/recipes/odbc/all/conanfile.py
--- a/recipes/odbc/all/conanfile.py
+++ b/recipes/odbc/all/conanfile.py
@@ -60,6 +60,9 @@
self.copy('COPYING', src=self._source_subfolder, dst="licenses")
def package_info(self):
+ self.cpp_info.names["cmake_find_package"] = "ODBC"
+ self.cpp_info.names["cmake_find_package_multi"] = "ODBC"
+
self.env_info.path.append(os.path.join(self.package_folder, 'bin'))
self.cpp_info.libs = ['odbc', 'odbccr', 'odbcinst', 'ltdl']
| {"golden_diff": "diff --git a/recipes/odbc/all/conanfile.py b/recipes/odbc/all/conanfile.py\n--- a/recipes/odbc/all/conanfile.py\n+++ b/recipes/odbc/all/conanfile.py\n@@ -60,6 +60,9 @@\n self.copy('COPYING', src=self._source_subfolder, dst=\"licenses\")\n \n def package_info(self):\n+ self.cpp_info.names[\"cmake_find_package\"] = \"ODBC\"\n+ self.cpp_info.names[\"cmake_find_package_multi\"] = \"ODBC\"\n+\n self.env_info.path.append(os.path.join(self.package_folder, 'bin'))\n \n self.cpp_info.libs = ['odbc', 'odbccr', 'odbcinst', 'ltdl']\n", "issue": "[odbc] CMake module name\nodbc/2.3.7\r\n\r\nAccording to CMake module, it should use capital letters: https://cmake.org/cmake/help/v3.12/module/FindODBC.html\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass OdbcConan(ConanFile):\n name = 'odbc'\n description = 'Package providing unixODBC'\n url = 'https://github.com/conan-io/conan-center-index'\n homepage = \"http://www.unixodbc.org\"\n license = ('LGPL-2.1', 'GPL-2.1')\n\n settings = 'os', 'compiler', 'build_type', 'arch'\n options = {'shared': [True, False], 'fPIC': [True, False], 'with_libiconv': [True, False]}\n default_options = {'shared': False, 'fPIC': True, 'with_libiconv': True}\n topics = ('odbc', 'database', 'dbms', 'data-access')\n\n _source_subfolder = 'source_subfolder'\n\n def configure(self):\n del self.settings.compiler.libcxx # Pure C\n del self.settings.compiler.cppstd\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Windows not supported yet. Please, open an issue if you need such support\")\n\n def requirements(self):\n if self.options.with_libiconv:\n self.requires(\"libiconv/1.16\")\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = 'unixODBC-%s' % self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def build(self):\n env_build = AutoToolsBuildEnvironment(self)\n static_flag = 'no' if self.options.shared else 'yes'\n shared_flag = 'yes' if self.options.shared else 'no'\n libiconv_flag = 'yes' if self.options.with_libiconv else 'no'\n args = ['--enable-static=%s' % static_flag,\n '--enable-shared=%s' % shared_flag,\n '--enable-ltdl-install',\n '--enable-iconv=%s' % libiconv_flag]\n if self.options.with_libiconv:\n libiconv_prefix = self.deps_cpp_info[\"libiconv\"].rootpath\n args.append('--with-libiconv-prefix=%s' % libiconv_prefix)\n\n env_build.configure(configure_dir=self._source_subfolder, args=args)\n env_build.make()\n env_build.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"etc\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n os.remove(os.path.join(self.package_folder, \"lib\", \"libodbc.la\"))\n os.remove(os.path.join(self.package_folder, \"lib\", \"libodbccr.la\"))\n os.remove(os.path.join(self.package_folder, \"lib\", \"libodbcinst.la\"))\n os.remove(os.path.join(self.package_folder, \"lib\", \"libltdl.la\"))\n\n def package(self):\n self.copy('COPYING', src=self._source_subfolder, dst=\"licenses\")\n\n def package_info(self):\n self.env_info.path.append(os.path.join(self.package_folder, 'bin'))\n\n self.cpp_info.libs = ['odbc', 'odbccr', 'odbcinst', 'ltdl']\n if self.settings.os == 'Linux':\n self.cpp_info.libs.append('dl')\n", "path": "recipes/odbc/all/conanfile.py"}]} | 1,463 | 168 |
gh_patches_debug_32339 | rasdani/github-patches | git_diff | pypa__pip-6678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip._internal.utils.glibc_version_string() can use os.confstr('CS_GNU_LIBC_VERSION') to avoid ctypes
Currently, the pip._internal.utils.glibc_version_string() function is implemented with ctypes to access gnu_get_libc_version() function. But os.confstr('CS_GNU_LIBC_VERSION') could be used instead to avoid ctypes.
I recently modified platform.libc_ver() on Python stdlib to use os.confstr('CS_GNU_LIBC_VERSION') is available:
* https://bugs.python.org/issue35389
* https://github.com/python/cpython/blob/d4efd917ac24940063a1ce80073fe3570c5f07f8/Lib/platform.py#L174-L183
```py
if executable is None:
try:
ver = os.confstr('CS_GNU_LIBC_VERSION')
# parse 'glibc 2.28' as ('glibc', '2.28')
parts = ver.split(maxsplit=1)
if len(parts) == 2:
return tuple(parts)
except (AttributeError, ValueError, OSError):
# os.confstr() or CS_GNU_LIBC_VERSION value not available
pass
```
Note: I noticed this issue when an user reported a traceback in pip when the ctypes is not available: https://mail.python.org/archives/list/[email protected]/thread/MTIRNYFAZTQQPHKAQXXREP33NYV2TW2J/
Handle ImportError and OSError when importing ctypes (#6543)
Non-dynamic executables can raise OSError when importing ctypes
because dlopen(NULL) is called on module import and dlopen()
won't work on non-dynamic executables.
This commit teaches the glibc version sniffing module to
handle a missing or not working ctypes module.
With this change applied, `pip install` works on non-dynamic / fully statically linked Python executables on Linux.
</issue>
<code>
[start of src/pip/_internal/utils/glibc.py]
1 from __future__ import absolute_import
2
3 import ctypes
4 import re
5 import warnings
6
7 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
8
9 if MYPY_CHECK_RUNNING:
10 from typing import Optional, Tuple
11
12
13 def glibc_version_string():
14 # type: () -> Optional[str]
15 "Returns glibc version string, or None if not using glibc."
16
17 # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
18 # manpage says, "If filename is NULL, then the returned handle is for the
19 # main program". This way we can let the linker do the work to figure out
20 # which libc our process is actually using.
21 process_namespace = ctypes.CDLL(None)
22 try:
23 gnu_get_libc_version = process_namespace.gnu_get_libc_version
24 except AttributeError:
25 # Symbol doesn't exist -> therefore, we are not linked to
26 # glibc.
27 return None
28
29 # Call gnu_get_libc_version, which returns a string like "2.5"
30 gnu_get_libc_version.restype = ctypes.c_char_p
31 version_str = gnu_get_libc_version()
32 # py2 / py3 compatibility:
33 if not isinstance(version_str, str):
34 version_str = version_str.decode("ascii")
35
36 return version_str
37
38
39 # Separated out from have_compatible_glibc for easier unit testing
40 def check_glibc_version(version_str, required_major, minimum_minor):
41 # type: (str, int, int) -> bool
42 # Parse string and check against requested version.
43 #
44 # We use a regexp instead of str.split because we want to discard any
45 # random junk that might come after the minor version -- this might happen
46 # in patched/forked versions of glibc (e.g. Linaro's version of glibc
47 # uses version strings like "2.20-2014.11"). See gh-3588.
48 m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
49 if not m:
50 warnings.warn("Expected glibc version with 2 components major.minor,"
51 " got: %s" % version_str, RuntimeWarning)
52 return False
53 return (int(m.group("major")) == required_major and
54 int(m.group("minor")) >= minimum_minor)
55
56
57 def have_compatible_glibc(required_major, minimum_minor):
58 # type: (int, int) -> bool
59 version_str = glibc_version_string() # type: Optional[str]
60 if version_str is None:
61 return False
62 return check_glibc_version(version_str, required_major, minimum_minor)
63
64
65 # platform.libc_ver regularly returns completely nonsensical glibc
66 # versions. E.g. on my computer, platform says:
67 #
68 # ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
69 # ('glibc', '2.7')
70 # ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
71 # ('glibc', '2.9')
72 #
73 # But the truth is:
74 #
75 # ~$ ldd --version
76 # ldd (Debian GLIBC 2.22-11) 2.22
77 #
78 # This is unfortunate, because it means that the linehaul data on libc
79 # versions that was generated by pip 8.1.2 and earlier is useless and
80 # misleading. Solution: instead of using platform, use our code that actually
81 # works.
82 def libc_ver():
83 # type: () -> Tuple[str, str]
84 """Try to determine the glibc version
85
86 Returns a tuple of strings (lib, version) which default to empty strings
87 in case the lookup fails.
88 """
89 glibc_version = glibc_version_string()
90 if glibc_version is None:
91 return ("", "")
92 else:
93 return ("glibc", glibc_version)
94
[end of src/pip/_internal/utils/glibc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/utils/glibc.py b/src/pip/_internal/utils/glibc.py
--- a/src/pip/_internal/utils/glibc.py
+++ b/src/pip/_internal/utils/glibc.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import
-import ctypes
+import os
import re
import warnings
@@ -13,6 +13,33 @@
def glibc_version_string():
# type: () -> Optional[str]
"Returns glibc version string, or None if not using glibc."
+ return glibc_version_string_confstr() or glibc_version_string_ctypes()
+
+
+def glibc_version_string_confstr():
+ # type: () -> Optional[str]
+ "Primary implementation of glibc_version_string using os.confstr."
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module:
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17":
+ _, version = os.confstr("CS_GNU_LIBC_VERSION").split()
+ except (AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def glibc_version_string_ctypes():
+ # type: () -> Optional[str]
+ "Fallback implementation of glibc_version_string using ctypes."
+
+ try:
+ import ctypes
+ except ImportError:
+ return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
@@ -56,7 +83,7 @@
def have_compatible_glibc(required_major, minimum_minor):
# type: (int, int) -> bool
- version_str = glibc_version_string() # type: Optional[str]
+ version_str = glibc_version_string()
if version_str is None:
return False
return check_glibc_version(version_str, required_major, minimum_minor)
| {"golden_diff": "diff --git a/src/pip/_internal/utils/glibc.py b/src/pip/_internal/utils/glibc.py\n--- a/src/pip/_internal/utils/glibc.py\n+++ b/src/pip/_internal/utils/glibc.py\n@@ -1,6 +1,6 @@\n from __future__ import absolute_import\n \n-import ctypes\n+import os\n import re\n import warnings\n \n@@ -13,6 +13,33 @@\n def glibc_version_string():\n # type: () -> Optional[str]\n \"Returns glibc version string, or None if not using glibc.\"\n+ return glibc_version_string_confstr() or glibc_version_string_ctypes()\n+\n+\n+def glibc_version_string_confstr():\n+ # type: () -> Optional[str]\n+ \"Primary implementation of glibc_version_string using os.confstr.\"\n+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely\n+ # to be broken or missing. This strategy is used in the standard library\n+ # platform module:\n+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183\n+ try:\n+ # os.confstr(\"CS_GNU_LIBC_VERSION\") returns a string like \"glibc 2.17\":\n+ _, version = os.confstr(\"CS_GNU_LIBC_VERSION\").split()\n+ except (AttributeError, OSError, ValueError):\n+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...\n+ return None\n+ return version\n+\n+\n+def glibc_version_string_ctypes():\n+ # type: () -> Optional[str]\n+ \"Fallback implementation of glibc_version_string using ctypes.\"\n+\n+ try:\n+ import ctypes\n+ except ImportError:\n+ return None\n \n # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen\n # manpage says, \"If filename is NULL, then the returned handle is for the\n@@ -56,7 +83,7 @@\n \n def have_compatible_glibc(required_major, minimum_minor):\n # type: (int, int) -> bool\n- version_str = glibc_version_string() # type: Optional[str]\n+ version_str = glibc_version_string()\n if version_str is None:\n return False\n return check_glibc_version(version_str, required_major, minimum_minor)\n", "issue": "pip._internal.utils.glibc_version_string() can use os.confstr('CS_GNU_LIBC_VERSION') to avoid ctypes\nCurrently, the pip._internal.utils.glibc_version_string() function is implemented with ctypes to access gnu_get_libc_version() function. But os.confstr('CS_GNU_LIBC_VERSION') could be used instead to avoid ctypes.\r\n\r\nI recently modified platform.libc_ver() on Python stdlib to use os.confstr('CS_GNU_LIBC_VERSION') is available:\r\n* https://bugs.python.org/issue35389\r\n* https://github.com/python/cpython/blob/d4efd917ac24940063a1ce80073fe3570c5f07f8/Lib/platform.py#L174-L183\r\n\r\n```py\r\n if executable is None:\r\n try:\r\n ver = os.confstr('CS_GNU_LIBC_VERSION')\r\n # parse 'glibc 2.28' as ('glibc', '2.28')\r\n parts = ver.split(maxsplit=1)\r\n if len(parts) == 2:\r\n return tuple(parts)\r\n except (AttributeError, ValueError, OSError):\r\n # os.confstr() or CS_GNU_LIBC_VERSION value not available\r\n pass\r\n```\r\n\r\nNote: I noticed this issue when an user reported a traceback in pip when the ctypes is not available: https://mail.python.org/archives/list/[email protected]/thread/MTIRNYFAZTQQPHKAQXXREP33NYV2TW2J/\nHandle ImportError and OSError when importing ctypes (#6543)\nNon-dynamic executables can raise OSError when importing ctypes\r\nbecause dlopen(NULL) is called on module import and dlopen()\r\nwon't work on non-dynamic executables.\r\n\r\nThis commit teaches the glibc version sniffing module to\r\nhandle a missing or not working ctypes module.\r\n\r\nWith this change applied, `pip install` works on non-dynamic / fully statically linked Python executables on Linux.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport ctypes\nimport re\nimport warnings\n\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from typing import Optional, Tuple\n\n\ndef glibc_version_string():\n # type: () -> Optional[str]\n \"Returns glibc version string, or None if not using glibc.\"\n\n # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen\n # manpage says, \"If filename is NULL, then the returned handle is for the\n # main program\". This way we can let the linker do the work to figure out\n # which libc our process is actually using.\n process_namespace = ctypes.CDLL(None)\n try:\n gnu_get_libc_version = process_namespace.gnu_get_libc_version\n except AttributeError:\n # Symbol doesn't exist -> therefore, we are not linked to\n # glibc.\n return None\n\n # Call gnu_get_libc_version, which returns a string like \"2.5\"\n gnu_get_libc_version.restype = ctypes.c_char_p\n version_str = gnu_get_libc_version()\n # py2 / py3 compatibility:\n if not isinstance(version_str, str):\n version_str = version_str.decode(\"ascii\")\n\n return version_str\n\n\n# Separated out from have_compatible_glibc for easier unit testing\ndef check_glibc_version(version_str, required_major, minimum_minor):\n # type: (str, int, int) -> bool\n # Parse string and check against requested version.\n #\n # We use a regexp instead of str.split because we want to discard any\n # random junk that might come after the minor version -- this might happen\n # in patched/forked versions of glibc (e.g. Linaro's version of glibc\n # uses version strings like \"2.20-2014.11\"). See gh-3588.\n m = re.match(r\"(?P<major>[0-9]+)\\.(?P<minor>[0-9]+)\", version_str)\n if not m:\n warnings.warn(\"Expected glibc version with 2 components major.minor,\"\n \" got: %s\" % version_str, RuntimeWarning)\n return False\n return (int(m.group(\"major\")) == required_major and\n int(m.group(\"minor\")) >= minimum_minor)\n\n\ndef have_compatible_glibc(required_major, minimum_minor):\n # type: (int, int) -> bool\n version_str = glibc_version_string() # type: Optional[str]\n if version_str is None:\n return False\n return check_glibc_version(version_str, required_major, minimum_minor)\n\n\n# platform.libc_ver regularly returns completely nonsensical glibc\n# versions. E.g. on my computer, platform says:\n#\n# ~$ python2.7 -c 'import platform; print(platform.libc_ver())'\n# ('glibc', '2.7')\n# ~$ python3.5 -c 'import platform; print(platform.libc_ver())'\n# ('glibc', '2.9')\n#\n# But the truth is:\n#\n# ~$ ldd --version\n# ldd (Debian GLIBC 2.22-11) 2.22\n#\n# This is unfortunate, because it means that the linehaul data on libc\n# versions that was generated by pip 8.1.2 and earlier is useless and\n# misleading. Solution: instead of using platform, use our code that actually\n# works.\ndef libc_ver():\n # type: () -> Tuple[str, str]\n \"\"\"Try to determine the glibc version\n\n Returns a tuple of strings (lib, version) which default to empty strings\n in case the lookup fails.\n \"\"\"\n glibc_version = glibc_version_string()\n if glibc_version is None:\n return (\"\", \"\")\n else:\n return (\"glibc\", glibc_version)\n", "path": "src/pip/_internal/utils/glibc.py"}]} | 2,025 | 561 |
gh_patches_debug_5336 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1441 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MA: Where are the Republicans?
All state legislators in MA are in either the Democratic party or Other. It's a nice fantasy but it's not reality!
</issue>
<code>
[start of openstates/ma/legislators.py]
1 import re
2
3 import lxml.html
4 from billy.scrape.legislators import LegislatorScraper, Legislator
5
6
7 def clean_district(district):
8 mappings = {
9 1: 'First',
10 2: 'Second',
11 3: 'Third',
12 4: 'Fourth',
13 5: 'Fifth',
14 6: 'Sixth',
15 7: 'Seventh',
16 8: 'Eighth',
17 9: 'Ninth',
18 10: 'Tenth',
19 11: 'Eleventh',
20 12: 'Twelfth',
21 13: 'Thirteenth',
22 14: 'Fourteenth',
23 15: 'Fifteenth',
24 16: 'Sixteenth',
25 17: 'Seventeenth',
26 18: 'Eighteenth',
27 19: 'Nineteenth',
28 20: 'Twentieth',
29 }
30 pieces = re.match('(\d+)\w\w\s(.+)', district)
31 if pieces:
32 ordinal, rest = pieces.groups()
33 ordinal = int(ordinal)
34 if ordinal <= 20:
35 ordinal = mappings[ordinal]
36 elif ordinal < 30:
37 ordinal = 'Twenty-' + mappings[ordinal-20]
38 elif ordinal == 30:
39 ordinal = 'Thirtieth'
40 elif ordinal < 40:
41 ordinal = 'Thirty-' + mappings[ordinal-30]
42 district = '{} {}'.format(ordinal, rest)
43
44 return district
45
46
47 class MALegislatorScraper(LegislatorScraper):
48 jurisdiction = 'ma'
49
50 def scrape(self, chamber, term):
51 self.validate_term(term, latest_only=True)
52
53 if chamber == 'upper':
54 chamber_type = 'Senate'
55 else:
56 chamber_type = 'House'
57
58 url = "https://malegislature.gov/People/%s" % chamber_type
59 page = self.get(url).text
60 doc = lxml.html.fromstring(page)
61 doc.make_links_absolute("https://malegislature.gov")
62
63 for member_url in doc.xpath('//td[@class="pictureCol"]/a/@href'):
64 self.scrape_member(chamber, term, member_url)
65
66 def scrape_member(self, chamber, term, member_url):
67 page = self.get(member_url).text
68 root = lxml.html.fromstring(page)
69 root.make_links_absolute(member_url)
70
71 photo_url = root.xpath('//div[@class="thumbPhoto"]/img/@src')[0]
72 full_name = root.xpath('//h1/span')[0].tail.strip()
73
74 email = root.xpath('//a[contains(@href, "mailto")]/@href')[0]
75 email = email.replace('mailto:', '')
76
77 party, district = root.xpath('//h1/span')[1].text.split('-')
78 party = party.strip()
79 district = clean_district(district.strip())
80
81 if party == 'Democrat':
82 party = 'Democratic'
83 elif party == 'R':
84 party = 'Republican'
85 else:
86 party = 'Other'
87
88 leg = Legislator(term, chamber, district, full_name, party=party,
89 photo_url=photo_url, url=member_url)
90 leg.add_source(member_url)
91
92 # offices
93
94 # this bool is so we only attach the email to one office
95 # and we make sure to create at least one office
96 email_stored = True
97 if email:
98 email_stored = False
99
100 for addr in root.xpath('//address/div[@class="contactGroup"]'):
101 office_name = addr.xpath('../preceding-sibling::h4/text()'
102 )[0].strip()
103 address = addr.xpath('a')[0].text_content()
104 address = re.sub('\s{2,}', '\n', address)
105
106 phone = fax = next = None
107 for phonerow in addr.xpath('./div/div'):
108 phonerow = phonerow.text_content().strip()
109 if phonerow == 'Phone:':
110 next = 'phone'
111 elif phonerow == 'Fax:':
112 next = 'fax'
113 elif next == 'phone':
114 phone = phonerow
115 next = None
116 elif next == 'fax':
117 fax = phonerow
118 next = None
119 else:
120 self.warning('unknown phonerow %s', phonerow)
121
122 # all pieces collected
123 if 'District' in office_name:
124 otype = 'district'
125 elif 'State' in office_name:
126 otype = 'capitol'
127
128 if not email_stored:
129 email_stored = True
130 leg.add_office(otype, office_name, phone=phone, fax=fax,
131 address=address, email=email)
132 else:
133 leg.add_office(otype, office_name, phone=phone, fax=fax,
134 address=address)
135
136 if not email_stored:
137 leg.add_office('capitol', 'Capitol Office', email=email)
138
139 self.save_legislator(leg)
140
[end of openstates/ma/legislators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/ma/legislators.py b/openstates/ma/legislators.py
--- a/openstates/ma/legislators.py
+++ b/openstates/ma/legislators.py
@@ -78,9 +78,9 @@
party = party.strip()
district = clean_district(district.strip())
- if party == 'Democrat':
+ if party in ('D', 'Democrat', 'Democratic'):
party = 'Democratic'
- elif party == 'R':
+ elif party in ('R', 'Republican'):
party = 'Republican'
else:
party = 'Other'
| {"golden_diff": "diff --git a/openstates/ma/legislators.py b/openstates/ma/legislators.py\n--- a/openstates/ma/legislators.py\n+++ b/openstates/ma/legislators.py\n@@ -78,9 +78,9 @@\n party = party.strip()\n district = clean_district(district.strip())\n \n- if party == 'Democrat':\n+ if party in ('D', 'Democrat', 'Democratic'):\n party = 'Democratic'\n- elif party == 'R':\n+ elif party in ('R', 'Republican'):\n party = 'Republican'\n else:\n party = 'Other'\n", "issue": "MA: Where are the Republicans?\nAll state legislators in MA are in either the Democratic party or Other. It's a nice fantasy but it's not reality!\r\n\n", "before_files": [{"content": "import re\n\nimport lxml.html\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\n\n\ndef clean_district(district):\n mappings = {\n 1: 'First',\n 2: 'Second',\n 3: 'Third',\n 4: 'Fourth',\n 5: 'Fifth',\n 6: 'Sixth',\n 7: 'Seventh',\n 8: 'Eighth',\n 9: 'Ninth',\n 10: 'Tenth',\n 11: 'Eleventh',\n 12: 'Twelfth',\n 13: 'Thirteenth',\n 14: 'Fourteenth',\n 15: 'Fifteenth',\n 16: 'Sixteenth',\n 17: 'Seventeenth',\n 18: 'Eighteenth',\n 19: 'Nineteenth',\n 20: 'Twentieth',\n }\n pieces = re.match('(\\d+)\\w\\w\\s(.+)', district)\n if pieces:\n ordinal, rest = pieces.groups()\n ordinal = int(ordinal)\n if ordinal <= 20:\n ordinal = mappings[ordinal]\n elif ordinal < 30:\n ordinal = 'Twenty-' + mappings[ordinal-20]\n elif ordinal == 30:\n ordinal = 'Thirtieth'\n elif ordinal < 40:\n ordinal = 'Thirty-' + mappings[ordinal-30]\n district = '{} {}'.format(ordinal, rest)\n\n return district\n\n\nclass MALegislatorScraper(LegislatorScraper):\n jurisdiction = 'ma'\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=True)\n\n if chamber == 'upper':\n chamber_type = 'Senate'\n else:\n chamber_type = 'House'\n\n url = \"https://malegislature.gov/People/%s\" % chamber_type\n page = self.get(url).text\n doc = lxml.html.fromstring(page)\n doc.make_links_absolute(\"https://malegislature.gov\")\n\n for member_url in doc.xpath('//td[@class=\"pictureCol\"]/a/@href'):\n self.scrape_member(chamber, term, member_url)\n\n def scrape_member(self, chamber, term, member_url):\n page = self.get(member_url).text\n root = lxml.html.fromstring(page)\n root.make_links_absolute(member_url)\n\n photo_url = root.xpath('//div[@class=\"thumbPhoto\"]/img/@src')[0]\n full_name = root.xpath('//h1/span')[0].tail.strip()\n\n email = root.xpath('//a[contains(@href, \"mailto\")]/@href')[0]\n email = email.replace('mailto:', '')\n\n party, district = root.xpath('//h1/span')[1].text.split('-')\n party = party.strip()\n district = clean_district(district.strip())\n\n if party == 'Democrat':\n party = 'Democratic'\n elif party == 'R':\n party = 'Republican'\n else:\n party = 'Other'\n\n leg = Legislator(term, chamber, district, full_name, party=party,\n photo_url=photo_url, url=member_url)\n leg.add_source(member_url)\n\n # offices\n\n # this bool is so we only attach the email to one office\n # and we make sure to create at least one office\n email_stored = True\n if email:\n email_stored = False\n\n for addr in root.xpath('//address/div[@class=\"contactGroup\"]'):\n office_name = addr.xpath('../preceding-sibling::h4/text()'\n )[0].strip()\n address = addr.xpath('a')[0].text_content()\n address = re.sub('\\s{2,}', '\\n', address)\n\n phone = fax = next = None\n for phonerow in addr.xpath('./div/div'):\n phonerow = phonerow.text_content().strip()\n if phonerow == 'Phone:':\n next = 'phone'\n elif phonerow == 'Fax:':\n next = 'fax'\n elif next == 'phone':\n phone = phonerow\n next = None\n elif next == 'fax':\n fax = phonerow\n next = None\n else:\n self.warning('unknown phonerow %s', phonerow)\n\n # all pieces collected\n if 'District' in office_name:\n otype = 'district'\n elif 'State' in office_name:\n otype = 'capitol'\n\n if not email_stored:\n email_stored = True\n leg.add_office(otype, office_name, phone=phone, fax=fax,\n address=address, email=email)\n else:\n leg.add_office(otype, office_name, phone=phone, fax=fax,\n address=address)\n\n if not email_stored:\n leg.add_office('capitol', 'Capitol Office', email=email)\n\n self.save_legislator(leg)\n", "path": "openstates/ma/legislators.py"}]} | 1,988 | 141 |
gh_patches_debug_34695 | rasdani/github-patches | git_diff | numpy__numpy-7952 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Building Numpy package fails with TypeError: __init__() takes from 3 to 4 positional arguments but 13 were given
I'm trying in build Numpy on Ubuntu Server Xenial (Armbian 5.14). I have installed `libexpat1-dev`, `libpython3-dev`, `libpython3.5-dev`, `python3-dev`, `python3.5-dev`, `build-essential`, `gcc`, `gfortran`, `gfortran-5`, `libgfortran-5-dev`, `libgfortran3`, `libblas-common`, `libblas-dev`, `libblas3`, `libopenblas-base`, `libopenblas-dev`, `cython`, `libpng-dev`. Cloned git repo to a dir and ran `python3 setup.py build`. Here's the log:
`$ python3 setup.py build`
`Running from numpy source directory.`
`Cythonizing sources`
`numpy/random/mtrand/mtrand.pyx has not changed`
`Traceback (most recent call last):`
`File "setup.py", line 390, in <module>`
`setup_package()`
`File "setup.py", line 382, in setup_package`
`setup(**metadata)`
`File "/home/odroid/downloads/numpy/numpy/distutils/core.py", line 135, in setup
config = configuration()`
`File "setup.py", line 165, in configuration`
`config.add_subpackage('numpy')`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 1001, in add_subpackage`
`caller_level = 2)`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 970, in get_subpackage`
`caller_level = caller_level + 1)`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 907, in _get_configuration_from_setup_py`
`config = setup_module.configuration(*args)`
`File "numpy/setup.py", line 10, in configuration`
`config.add_subpackage('core')`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 1001, in add_subpackage`
`caller_level = 2)`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 970, in get_subpackage`
`caller_level = caller_level + 1)`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 907, in _get_configuration_from_setup_py`
`config = setup_module.configuration(*args)`
`File "numpy/core/setup.py", line 638, in configuration
generate_numpy_api]`
`File "/home/odroid/downloads/numpy/numpy/distutils/misc_util.py", line 1483, in add_extension`
`ext = Extension(**ext_args)`
`File "/home/odroid/downloads/numpy/numpy/distutils/extension.py", line 52, in __init__
export_symbols)`
`TypeError: __init__() takes from 3 to 4 positional arguments but 13 were given`
How can it be fixed?
</issue>
<code>
[start of numpy/distutils/extension.py]
1 """distutils.extension
2
3 Provides the Extension class, used to describe C/C++ extension
4 modules in setup scripts.
5
6 Overridden to support f2py.
7
8 """
9 from __future__ import division, absolute_import, print_function
10
11 import sys
12 import re
13 from distutils.extension import Extension as old_Extension
14
15 if sys.version_info[0] >= 3:
16 basestring = str
17
18
19 cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
20 fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
21
22 class Extension(old_Extension):
23 def __init__ (self, name, sources,
24 include_dirs=None,
25 define_macros=None,
26 undef_macros=None,
27 library_dirs=None,
28 libraries=None,
29 runtime_library_dirs=None,
30 extra_objects=None,
31 extra_compile_args=None,
32 extra_link_args=None,
33 export_symbols=None,
34 swig_opts=None,
35 depends=None,
36 language=None,
37 f2py_options=None,
38 module_dirs=None,
39 extra_f77_compile_args=None,
40 extra_f90_compile_args=None,
41 ):
42 old_Extension.__init__(self, name, [],
43 include_dirs,
44 define_macros,
45 undef_macros,
46 library_dirs,
47 libraries,
48 runtime_library_dirs,
49 extra_objects,
50 extra_compile_args,
51 extra_link_args,
52 export_symbols)
53 # Avoid assert statements checking that sources contains strings:
54 self.sources = sources
55
56 # Python 2.4 distutils new features
57 self.swig_opts = swig_opts or []
58 # swig_opts is assumed to be a list. Here we handle the case where it
59 # is specified as a string instead.
60 if isinstance(self.swig_opts, basestring):
61 import warnings
62 msg = "swig_opts is specified as a string instead of a list"
63 warnings.warn(msg, SyntaxWarning)
64 self.swig_opts = self.swig_opts.split()
65
66 # Python 2.3 distutils new features
67 self.depends = depends or []
68 self.language = language
69
70 # numpy_distutils features
71 self.f2py_options = f2py_options or []
72 self.module_dirs = module_dirs or []
73 self.extra_f77_compile_args = extra_f77_compile_args or []
74 self.extra_f90_compile_args = extra_f90_compile_args or []
75
76 return
77
78 def has_cxx_sources(self):
79 for source in self.sources:
80 if cxx_ext_re(str(source)):
81 return True
82 return False
83
84 def has_f2py_sources(self):
85 for source in self.sources:
86 if fortran_pyf_ext_re(source):
87 return True
88 return False
89
90 # class Extension
91
[end of numpy/distutils/extension.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py
--- a/numpy/distutils/extension.py
+++ b/numpy/distutils/extension.py
@@ -20,36 +20,39 @@
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
class Extension(old_Extension):
- def __init__ (self, name, sources,
- include_dirs=None,
- define_macros=None,
- undef_macros=None,
- library_dirs=None,
- libraries=None,
- runtime_library_dirs=None,
- extra_objects=None,
- extra_compile_args=None,
- extra_link_args=None,
- export_symbols=None,
- swig_opts=None,
- depends=None,
- language=None,
- f2py_options=None,
- module_dirs=None,
- extra_f77_compile_args=None,
- extra_f90_compile_args=None,
- ):
- old_Extension.__init__(self, name, [],
- include_dirs,
- define_macros,
- undef_macros,
- library_dirs,
- libraries,
- runtime_library_dirs,
- extra_objects,
- extra_compile_args,
- extra_link_args,
- export_symbols)
+ def __init__ (
+ self, name, sources,
+ include_dirs=None,
+ define_macros=None,
+ undef_macros=None,
+ library_dirs=None,
+ libraries=None,
+ runtime_library_dirs=None,
+ extra_objects=None,
+ extra_compile_args=None,
+ extra_link_args=None,
+ export_symbols=None,
+ swig_opts=None,
+ depends=None,
+ language=None,
+ f2py_options=None,
+ module_dirs=None,
+ extra_f77_compile_args=None,
+ extra_f90_compile_args=None,):
+
+ old_Extension.__init__(
+ self, name, [],
+ include_dirs=include_dirs,
+ define_macros=define_macros,
+ undef_macros=undef_macros,
+ library_dirs=library_dirs,
+ libraries=libraries,
+ runtime_library_dirs=runtime_library_dirs,
+ extra_objects=extra_objects,
+ extra_compile_args=extra_compile_args,
+ extra_link_args=extra_link_args,
+ export_symbols=export_symbols)
+
# Avoid assert statements checking that sources contains strings:
self.sources = sources
| {"golden_diff": "diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py\n--- a/numpy/distutils/extension.py\n+++ b/numpy/distutils/extension.py\n@@ -20,36 +20,39 @@\n fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\\Z', re.I).match\n \n class Extension(old_Extension):\n- def __init__ (self, name, sources,\n- include_dirs=None,\n- define_macros=None,\n- undef_macros=None,\n- library_dirs=None,\n- libraries=None,\n- runtime_library_dirs=None,\n- extra_objects=None,\n- extra_compile_args=None,\n- extra_link_args=None,\n- export_symbols=None,\n- swig_opts=None,\n- depends=None,\n- language=None,\n- f2py_options=None,\n- module_dirs=None,\n- extra_f77_compile_args=None,\n- extra_f90_compile_args=None,\n- ):\n- old_Extension.__init__(self, name, [],\n- include_dirs,\n- define_macros,\n- undef_macros,\n- library_dirs,\n- libraries,\n- runtime_library_dirs,\n- extra_objects,\n- extra_compile_args,\n- extra_link_args,\n- export_symbols)\n+ def __init__ (\n+ self, name, sources,\n+ include_dirs=None,\n+ define_macros=None,\n+ undef_macros=None,\n+ library_dirs=None,\n+ libraries=None,\n+ runtime_library_dirs=None,\n+ extra_objects=None,\n+ extra_compile_args=None,\n+ extra_link_args=None,\n+ export_symbols=None,\n+ swig_opts=None,\n+ depends=None,\n+ language=None,\n+ f2py_options=None,\n+ module_dirs=None,\n+ extra_f77_compile_args=None,\n+ extra_f90_compile_args=None,):\n+\n+ old_Extension.__init__(\n+ self, name, [],\n+ include_dirs=include_dirs,\n+ define_macros=define_macros,\n+ undef_macros=undef_macros,\n+ library_dirs=library_dirs,\n+ libraries=libraries,\n+ runtime_library_dirs=runtime_library_dirs,\n+ extra_objects=extra_objects,\n+ extra_compile_args=extra_compile_args,\n+ extra_link_args=extra_link_args,\n+ export_symbols=export_symbols)\n+\n # Avoid assert statements checking that sources contains strings:\n self.sources = sources\n", "issue": "Building Numpy package fails with TypeError: __init__() takes from 3 to 4 positional arguments but 13 were given\nI'm trying in build Numpy on Ubuntu Server Xenial (Armbian 5.14). I have installed `libexpat1-dev`, `libpython3-dev`, `libpython3.5-dev`, `python3-dev`, `python3.5-dev`, `build-essential`, `gcc`, `gfortran`, `gfortran-5`, `libgfortran-5-dev`, `libgfortran3`, `libblas-common`, `libblas-dev`, `libblas3`, `libopenblas-base`, `libopenblas-dev`, `cython`, `libpng-dev`. Cloned git repo to a dir and ran `python3 setup.py build`. Here's the log:\n\n`$ python3 setup.py build`\n\n`Running from numpy source directory.`\n`Cythonizing sources`\n`numpy/random/mtrand/mtrand.pyx has not changed`\n`Traceback (most recent call last):`\n`File \"setup.py\", line 390, in <module>`\n`setup_package()`\n`File \"setup.py\", line 382, in setup_package`\n`setup(**metadata)`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/core.py\", line 135, in setup\n config = configuration()`\n`File \"setup.py\", line 165, in configuration`\n`config.add_subpackage('numpy')`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 1001, in add_subpackage`\n`caller_level = 2)`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 970, in get_subpackage`\n`caller_level = caller_level + 1)`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 907, in _get_configuration_from_setup_py`\n`config = setup_module.configuration(*args)`\n`File \"numpy/setup.py\", line 10, in configuration`\n`config.add_subpackage('core')`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 1001, in add_subpackage`\n`caller_level = 2)`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 970, in get_subpackage`\n`caller_level = caller_level + 1)`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 907, in _get_configuration_from_setup_py`\n`config = setup_module.configuration(*args)`\n`File \"numpy/core/setup.py\", line 638, in configuration\n generate_numpy_api]`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/misc_util.py\", line 1483, in add_extension`\n`ext = Extension(**ext_args)`\n`File \"/home/odroid/downloads/numpy/numpy/distutils/extension.py\", line 52, in __init__\n export_symbols)`\n`TypeError: __init__() takes from 3 to 4 positional arguments but 13 were given`\n\nHow can it be fixed?\n\n", "before_files": [{"content": "\"\"\"distutils.extension\n\nProvides the Extension class, used to describe C/C++ extension\nmodules in setup scripts.\n\nOverridden to support f2py.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nimport re\nfrom distutils.extension import Extension as old_Extension\n\nif sys.version_info[0] >= 3:\n basestring = str\n\n\ncxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\\Z', re.I).match\nfortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\\Z', re.I).match\n\nclass Extension(old_Extension):\n def __init__ (self, name, sources,\n include_dirs=None,\n define_macros=None,\n undef_macros=None,\n library_dirs=None,\n libraries=None,\n runtime_library_dirs=None,\n extra_objects=None,\n extra_compile_args=None,\n extra_link_args=None,\n export_symbols=None,\n swig_opts=None,\n depends=None,\n language=None,\n f2py_options=None,\n module_dirs=None,\n extra_f77_compile_args=None,\n extra_f90_compile_args=None,\n ):\n old_Extension.__init__(self, name, [],\n include_dirs,\n define_macros,\n undef_macros,\n library_dirs,\n libraries,\n runtime_library_dirs,\n extra_objects,\n extra_compile_args,\n extra_link_args,\n export_symbols)\n # Avoid assert statements checking that sources contains strings:\n self.sources = sources\n\n # Python 2.4 distutils new features\n self.swig_opts = swig_opts or []\n # swig_opts is assumed to be a list. Here we handle the case where it\n # is specified as a string instead.\n if isinstance(self.swig_opts, basestring):\n import warnings\n msg = \"swig_opts is specified as a string instead of a list\"\n warnings.warn(msg, SyntaxWarning)\n self.swig_opts = self.swig_opts.split()\n\n # Python 2.3 distutils new features\n self.depends = depends or []\n self.language = language\n\n # numpy_distutils features\n self.f2py_options = f2py_options or []\n self.module_dirs = module_dirs or []\n self.extra_f77_compile_args = extra_f77_compile_args or []\n self.extra_f90_compile_args = extra_f90_compile_args or []\n\n return\n\n def has_cxx_sources(self):\n for source in self.sources:\n if cxx_ext_re(str(source)):\n return True\n return False\n\n def has_f2py_sources(self):\n for source in self.sources:\n if fortran_pyf_ext_re(source):\n return True\n return False\n\n# class Extension\n", "path": "numpy/distutils/extension.py"}]} | 2,024 | 545 |
gh_patches_debug_24249 | rasdani/github-patches | git_diff | lutris__lutris-1232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
latest version 0.4.21 thinks i don't have a vulkan loader
Got the new message pop up to say I don't have a Vulkan loader installed. This is on an Ubuntu 18.10 fresh upgrade.
I do:
> liam@liam-main:~$ sudo apt install libvulkan1 libvulkan1:i386
[sudo] password for liam:
Reading package lists... Done
Building dependency tree
Reading state information... Done
libvulkan1 is already the newest version (1.1.82.0-0ubuntu1).
libvulkan1:i386 is already the newest version (1.1.82.0-0ubuntu1).
0 to upgrade, 0 to newly install, 0 to remove and 5 not to upgrade.
Need more details? Let me know.
</issue>
<code>
[start of lutris/util/vulkan.py]
1 """Vulkan helper module"""
2 import os
3 import re
4 from enum import Enum
5
6 class vulkan_available(Enum):
7 NONE = 0
8 THIRTY_TWO = 1
9 SIXTY_FOUR = 2
10 ALL = 3
11
12 def search_for_file(directory):
13 if os.path.isdir(directory):
14 pattern = re.compile(r'^libvulkan\.so')
15 files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
16 files = [os.path.join(directory, f) for f in files if pattern.search(f)]
17 if files:
18 return True
19 return False
20
21 def vulkan_check():
22 vulkan_lib = search_for_file("/usr/lib")
23 vulkan_lib32 = search_for_file("/usr/lib32")
24 vulkan_lib_multi = search_for_file("/usr/lib/x86_64-linux-gnu")
25 vulkan_lib32_multi = search_for_file("/usr/lib32/i386-linux-gnu")
26 has_32_bit = vulkan_lib32 or vulkan_lib32_multi
27 has_64_bit = vulkan_lib or vulkan_lib_multi
28
29 if not (has_64_bit or has_32_bit):
30 return vulkan_available.NONE
31 if has_64_bit and not has_32_bit:
32 return vulkan_available.SIXTY_FOUR
33 if not has_64_bit and has_32_bit:
34 return vulkan_available.THIRTY_TWO
35 return vulkan_available.ALL
36
[end of lutris/util/vulkan.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/util/vulkan.py b/lutris/util/vulkan.py
--- a/lutris/util/vulkan.py
+++ b/lutris/util/vulkan.py
@@ -1,6 +1,7 @@
"""Vulkan helper module"""
import os
import re
+import subprocess
from enum import Enum
class vulkan_available(Enum):
@@ -9,22 +10,16 @@
SIXTY_FOUR = 2
ALL = 3
-def search_for_file(directory):
- if os.path.isdir(directory):
- pattern = re.compile(r'^libvulkan\.so')
- files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
- files = [os.path.join(directory, f) for f in files if pattern.search(f)]
- if files:
- return True
- return False
-
def vulkan_check():
- vulkan_lib = search_for_file("/usr/lib")
- vulkan_lib32 = search_for_file("/usr/lib32")
- vulkan_lib_multi = search_for_file("/usr/lib/x86_64-linux-gnu")
- vulkan_lib32_multi = search_for_file("/usr/lib32/i386-linux-gnu")
- has_32_bit = vulkan_lib32 or vulkan_lib32_multi
- has_64_bit = vulkan_lib or vulkan_lib_multi
+ has_64_bit = False
+ has_32_bit = False
+ for line in subprocess.check_output(["ldconfig", "-p"]).splitlines():
+ line = str(line)
+ if 'libvulkan' in line:
+ if 'x86-64' in line:
+ has_64_bit = True
+ else:
+ has_32_bit = True
if not (has_64_bit or has_32_bit):
return vulkan_available.NONE
| {"golden_diff": "diff --git a/lutris/util/vulkan.py b/lutris/util/vulkan.py\n--- a/lutris/util/vulkan.py\n+++ b/lutris/util/vulkan.py\n@@ -1,6 +1,7 @@\n \"\"\"Vulkan helper module\"\"\"\n import os\n import re\n+import subprocess\n from enum import Enum\n \n class vulkan_available(Enum):\n@@ -9,22 +10,16 @@\n SIXTY_FOUR = 2\n ALL = 3\n \n-def search_for_file(directory):\n- if os.path.isdir(directory):\n- pattern = re.compile(r'^libvulkan\\.so')\n- files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n- files = [os.path.join(directory, f) for f in files if pattern.search(f)]\n- if files:\n- return True\n- return False\n-\n def vulkan_check():\n- vulkan_lib = search_for_file(\"/usr/lib\")\n- vulkan_lib32 = search_for_file(\"/usr/lib32\")\n- vulkan_lib_multi = search_for_file(\"/usr/lib/x86_64-linux-gnu\")\n- vulkan_lib32_multi = search_for_file(\"/usr/lib32/i386-linux-gnu\")\n- has_32_bit = vulkan_lib32 or vulkan_lib32_multi\n- has_64_bit = vulkan_lib or vulkan_lib_multi\n+ has_64_bit = False\n+ has_32_bit = False\n+ for line in subprocess.check_output([\"ldconfig\", \"-p\"]).splitlines():\n+ line = str(line)\n+ if 'libvulkan' in line:\n+ if 'x86-64' in line:\n+ has_64_bit = True\n+ else:\n+ has_32_bit = True\n \n if not (has_64_bit or has_32_bit):\n return vulkan_available.NONE\n", "issue": "latest version 0.4.21 thinks i don't have a vulkan loader\nGot the new message pop up to say I don't have a Vulkan loader installed. This is on an Ubuntu 18.10 fresh upgrade.\r\n\r\nI do:\r\n\r\n> liam@liam-main:~$ sudo apt install libvulkan1 libvulkan1:i386 \r\n[sudo] password for liam: \r\nReading package lists... Done\r\nBuilding dependency tree \r\nReading state information... Done\r\nlibvulkan1 is already the newest version (1.1.82.0-0ubuntu1).\r\nlibvulkan1:i386 is already the newest version (1.1.82.0-0ubuntu1).\r\n0 to upgrade, 0 to newly install, 0 to remove and 5 not to upgrade.\r\n\r\nNeed more details? Let me know.\n", "before_files": [{"content": "\"\"\"Vulkan helper module\"\"\"\nimport os\nimport re\nfrom enum import Enum\n\nclass vulkan_available(Enum):\n NONE = 0\n THIRTY_TWO = 1\n SIXTY_FOUR = 2\n ALL = 3\n\ndef search_for_file(directory):\n if os.path.isdir(directory):\n pattern = re.compile(r'^libvulkan\\.so')\n files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n files = [os.path.join(directory, f) for f in files if pattern.search(f)]\n if files:\n return True\n return False\n\ndef vulkan_check():\n vulkan_lib = search_for_file(\"/usr/lib\")\n vulkan_lib32 = search_for_file(\"/usr/lib32\")\n vulkan_lib_multi = search_for_file(\"/usr/lib/x86_64-linux-gnu\")\n vulkan_lib32_multi = search_for_file(\"/usr/lib32/i386-linux-gnu\")\n has_32_bit = vulkan_lib32 or vulkan_lib32_multi\n has_64_bit = vulkan_lib or vulkan_lib_multi\n\n if not (has_64_bit or has_32_bit):\n return vulkan_available.NONE\n if has_64_bit and not has_32_bit:\n return vulkan_available.SIXTY_FOUR\n if not has_64_bit and has_32_bit:\n return vulkan_available.THIRTY_TWO\n return vulkan_available.ALL\n", "path": "lutris/util/vulkan.py"}]} | 1,117 | 432 |
gh_patches_debug_2132 | rasdani/github-patches | git_diff | marshmallow-code__webargs-414 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Schema factory only variable fail - can't pass list type
Looking at the [schema factory docs](https://webargs.readthedocs.io/en/latest/advanced.html#schema-factories), I'm interested in trying the
```
# Filter based on 'fields' query parameter
only = request.args.get("fields", None)
```
part.
However, when I try appending something like `?fields=some_field` to my HTTP request, I get the following error:
```
File "edited/marshmallow/schema.py", line 349, in __init__
raise StringNotCollectionError('"only" should be a list of strings')
```
As far as I can see, webargs always passes the query string parameters as strings. I tried wrapping it in square brackets, but I think I'm barking up the wrong tree. Have I misunderstood something, or is this a bug?
</issue>
<code>
[start of examples/schema_example.py]
1 """Example implementation of using a marshmallow Schema for both request input
2 and output with a `use_schema` decorator.
3 Run the app:
4
5 $ python examples/schema_example.py
6
7 Try the following with httpie (a cURL-like utility, http://httpie.org):
8
9 $ pip install httpie
10 $ http GET :5001/users/
11 $ http GET :5001/users/42
12 $ http POST :5001/users/ usename=brian first_name=Brian last_name=May
13 $ http PATCH :5001/users/42 username=freddie
14 $ http GET :5001/users/ limit==1
15 """
16 import functools
17 from flask import Flask, request, jsonify
18 import random
19
20 from marshmallow import Schema, fields, post_dump
21 from webargs.flaskparser import parser, use_kwargs
22
23 app = Flask(__name__)
24
25 ##### Fake database and models #####
26
27
28 class Model:
29 def __init__(self, **kwargs):
30 self.__dict__.update(kwargs)
31
32 def update(self, **kwargs):
33 self.__dict__.update(kwargs)
34
35 @classmethod
36 def insert(cls, db, **kwargs):
37 collection = db[cls.collection]
38 new_id = None
39 if "id" in kwargs: # for setting up fixtures
40 new_id = kwargs.pop("id")
41 else: # find a new id
42 found_id = False
43 while not found_id:
44 new_id = random.randint(1, 9999)
45 if new_id not in collection:
46 found_id = True
47 new_record = cls(id=new_id, **kwargs)
48 collection[new_id] = new_record
49 return new_record
50
51
52 class User(Model):
53 collection = "users"
54
55
56 db = {"users": {}}
57
58
59 ##### use_schema #####
60
61
62 def use_schema(schema, list_view=False, locations=None):
63 """View decorator for using a marshmallow schema to
64 (1) parse a request's input and
65 (2) serializing the view's output to a JSON response.
66 """
67
68 def decorator(func):
69 @functools.wraps(func)
70 def wrapped(*args, **kwargs):
71 use_args_wrapper = parser.use_args(schema, locations=locations)
72 # Function wrapped with use_args
73 func_with_args = use_args_wrapper(func)
74 ret = func_with_args(*args, **kwargs)
75 # Serialize and jsonify the return value
76 return jsonify(schema.dump(ret, many=list_view).data)
77
78 return wrapped
79
80 return decorator
81
82
83 ##### Schemas #####
84
85
86 class UserSchema(Schema):
87 id = fields.Int(dump_only=True)
88 username = fields.Str()
89 first_name = fields.Str()
90 last_name = fields.Str()
91
92 class Meta:
93 strict = True
94
95 @post_dump(pass_many=True)
96 def wrap_with_envelope(self, data, many, **kwargs):
97 return {"data": data}
98
99
100 ##### Routes #####
101
102
103 @app.route("/users/<int:user_id>", methods=["GET", "PATCH"])
104 @use_schema(UserSchema())
105 def user_detail(reqargs, user_id):
106 user = db["users"].get(user_id)
107 if not user:
108 return jsonify({"message": "User not found"}), 404
109 if request.method == "PATCH" and reqargs:
110 user.update(**reqargs)
111 return user
112
113
114 # You can add additional arguments with use_kwargs
115 @app.route("/users/", methods=["GET", "POST"])
116 @use_kwargs({"limit": fields.Int(missing=10, location="query")})
117 @use_schema(UserSchema(), list_view=True)
118 def user_list(reqargs, limit):
119 users = db["users"].values()
120 if request.method == "POST":
121 User.insert(db=db, **reqargs)
122 return list(users)[:limit]
123
124
125 # Return validation errors as JSON
126 @app.errorhandler(422)
127 @app.errorhandler(400)
128 def handle_validation_error(err):
129 exc = getattr(err, "exc", None)
130 if exc:
131 headers = err.data["headers"]
132 messages = exc.messages
133 else:
134 headers = None
135 messages = ["Invalid request."]
136 if headers:
137 return jsonify({"errors": messages}), err.code, headers
138 else:
139 return jsonify({"errors": messages}), err.code
140
141
142 if __name__ == "__main__":
143 User.insert(
144 db=db, id=42, username="fred", first_name="Freddie", last_name="Mercury"
145 )
146 app.run(port=5001, debug=True)
147
[end of examples/schema_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/schema_example.py b/examples/schema_example.py
--- a/examples/schema_example.py
+++ b/examples/schema_example.py
@@ -89,9 +89,6 @@
first_name = fields.Str()
last_name = fields.Str()
- class Meta:
- strict = True
-
@post_dump(pass_many=True)
def wrap_with_envelope(self, data, many, **kwargs):
return {"data": data}
| {"golden_diff": "diff --git a/examples/schema_example.py b/examples/schema_example.py\n--- a/examples/schema_example.py\n+++ b/examples/schema_example.py\n@@ -89,9 +89,6 @@\n first_name = fields.Str()\n last_name = fields.Str()\n \n- class Meta:\n- strict = True\n-\n @post_dump(pass_many=True)\n def wrap_with_envelope(self, data, many, **kwargs):\n return {\"data\": data}\n", "issue": "Schema factory only variable fail - can't pass list type\nLooking at the [schema factory docs](https://webargs.readthedocs.io/en/latest/advanced.html#schema-factories), I'm interested in trying the\r\n```\r\n# Filter based on 'fields' query parameter\r\nonly = request.args.get(\"fields\", None)\r\n```\r\npart.\r\n\r\nHowever, when I try appending something like `?fields=some_field` to my HTTP request, I get the following error:\r\n```\r\nFile \"edited/marshmallow/schema.py\", line 349, in __init__\r\n raise StringNotCollectionError('\"only\" should be a list of strings')\r\n```\r\n\r\nAs far as I can see, webargs always passes the query string parameters as strings. I tried wrapping it in square brackets, but I think I'm barking up the wrong tree. Have I misunderstood something, or is this a bug?\n", "before_files": [{"content": "\"\"\"Example implementation of using a marshmallow Schema for both request input\nand output with a `use_schema` decorator.\nRun the app:\n\n $ python examples/schema_example.py\n\nTry the following with httpie (a cURL-like utility, http://httpie.org):\n\n $ pip install httpie\n $ http GET :5001/users/\n $ http GET :5001/users/42\n $ http POST :5001/users/ usename=brian first_name=Brian last_name=May\n $ http PATCH :5001/users/42 username=freddie\n $ http GET :5001/users/ limit==1\n\"\"\"\nimport functools\nfrom flask import Flask, request, jsonify\nimport random\n\nfrom marshmallow import Schema, fields, post_dump\nfrom webargs.flaskparser import parser, use_kwargs\n\napp = Flask(__name__)\n\n##### Fake database and models #####\n\n\nclass Model:\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n def update(self, **kwargs):\n self.__dict__.update(kwargs)\n\n @classmethod\n def insert(cls, db, **kwargs):\n collection = db[cls.collection]\n new_id = None\n if \"id\" in kwargs: # for setting up fixtures\n new_id = kwargs.pop(\"id\")\n else: # find a new id\n found_id = False\n while not found_id:\n new_id = random.randint(1, 9999)\n if new_id not in collection:\n found_id = True\n new_record = cls(id=new_id, **kwargs)\n collection[new_id] = new_record\n return new_record\n\n\nclass User(Model):\n collection = \"users\"\n\n\ndb = {\"users\": {}}\n\n\n##### use_schema #####\n\n\ndef use_schema(schema, list_view=False, locations=None):\n \"\"\"View decorator for using a marshmallow schema to\n (1) parse a request's input and\n (2) serializing the view's output to a JSON response.\n \"\"\"\n\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n use_args_wrapper = parser.use_args(schema, locations=locations)\n # Function wrapped with use_args\n func_with_args = use_args_wrapper(func)\n ret = func_with_args(*args, **kwargs)\n # Serialize and jsonify the return value\n return jsonify(schema.dump(ret, many=list_view).data)\n\n return wrapped\n\n return decorator\n\n\n##### Schemas #####\n\n\nclass UserSchema(Schema):\n id = fields.Int(dump_only=True)\n username = fields.Str()\n first_name = fields.Str()\n last_name = fields.Str()\n\n class Meta:\n strict = True\n\n @post_dump(pass_many=True)\n def wrap_with_envelope(self, data, many, **kwargs):\n return {\"data\": data}\n\n\n##### Routes #####\n\n\[email protected](\"/users/<int:user_id>\", methods=[\"GET\", \"PATCH\"])\n@use_schema(UserSchema())\ndef user_detail(reqargs, user_id):\n user = db[\"users\"].get(user_id)\n if not user:\n return jsonify({\"message\": \"User not found\"}), 404\n if request.method == \"PATCH\" and reqargs:\n user.update(**reqargs)\n return user\n\n\n# You can add additional arguments with use_kwargs\[email protected](\"/users/\", methods=[\"GET\", \"POST\"])\n@use_kwargs({\"limit\": fields.Int(missing=10, location=\"query\")})\n@use_schema(UserSchema(), list_view=True)\ndef user_list(reqargs, limit):\n users = db[\"users\"].values()\n if request.method == \"POST\":\n User.insert(db=db, **reqargs)\n return list(users)[:limit]\n\n\n# Return validation errors as JSON\[email protected](422)\[email protected](400)\ndef handle_validation_error(err):\n exc = getattr(err, \"exc\", None)\n if exc:\n headers = err.data[\"headers\"]\n messages = exc.messages\n else:\n headers = None\n messages = [\"Invalid request.\"]\n if headers:\n return jsonify({\"errors\": messages}), err.code, headers\n else:\n return jsonify({\"errors\": messages}), err.code\n\n\nif __name__ == \"__main__\":\n User.insert(\n db=db, id=42, username=\"fred\", first_name=\"Freddie\", last_name=\"Mercury\"\n )\n app.run(port=5001, debug=True)\n", "path": "examples/schema_example.py"}]} | 2,047 | 98 |
gh_patches_debug_793 | rasdani/github-patches | git_diff | scrapy__scrapy-742 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sort spider names on 'scrapy list' command
Hey there,
i think the spider names on the `scrapy list` command should be order by name in the console output.
</issue>
<code>
[start of scrapy/commands/list.py]
1 from __future__ import print_function
2 from scrapy.command import ScrapyCommand
3
4 class Command(ScrapyCommand):
5
6 requires_project = True
7 default_settings = {'LOG_ENABLED': False}
8
9 def short_desc(self):
10 return "List available spiders"
11
12 def run(self, args, opts):
13 crawler = self.crawler_process.create_crawler()
14 for s in crawler.spiders.list():
15 print(s)
16
[end of scrapy/commands/list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/commands/list.py b/scrapy/commands/list.py
--- a/scrapy/commands/list.py
+++ b/scrapy/commands/list.py
@@ -11,5 +11,5 @@
def run(self, args, opts):
crawler = self.crawler_process.create_crawler()
- for s in crawler.spiders.list():
+ for s in sorted(crawler.spiders.list()):
print(s)
| {"golden_diff": "diff --git a/scrapy/commands/list.py b/scrapy/commands/list.py\n--- a/scrapy/commands/list.py\n+++ b/scrapy/commands/list.py\n@@ -11,5 +11,5 @@\n \n def run(self, args, opts):\n crawler = self.crawler_process.create_crawler()\n- for s in crawler.spiders.list():\n+ for s in sorted(crawler.spiders.list()):\n print(s)\n", "issue": "Sort spider names on 'scrapy list' command\nHey there, \n\ni think the spider names on the `scrapy list` command should be order by name in the console output. \n\n", "before_files": [{"content": "from __future__ import print_function\nfrom scrapy.command import ScrapyCommand\n\nclass Command(ScrapyCommand):\n\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def short_desc(self):\n return \"List available spiders\"\n\n def run(self, args, opts):\n crawler = self.crawler_process.create_crawler()\n for s in crawler.spiders.list():\n print(s)\n", "path": "scrapy/commands/list.py"}]} | 687 | 96 |
gh_patches_debug_12216 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-563 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Making messages for translation has system dependent output
When calling `make makemessages` the output of the generated "djangojs.po" is dependent on the system configuration.
For example in my case it inserts messages from:
"adhocracy4/node_modules/ajv/dist/regenerator.min.js" and
"adhocracy4/node_modules/js-yaml/dist/js-yaml.min.js"
</issue>
<code>
[start of apps/contrib/management/commands/makemessages.py]
1 from os import path
2
3 from django.conf import settings
4 from django.core.management.commands import makemessages
5
6
7 def get_module_dir(name):
8 module = __import__(name)
9 return path.dirname(module.__file__)
10
11
12 class Command(makemessages.Command):
13 msgmerge_options = (
14 makemessages.Command.msgmerge_options + ['--no-fuzzy-matching']
15 )
16
17 def handle(self, *args, **options):
18 if options['domain'] == 'djangojs':
19 if options['extensions'] is None:
20 options['extensions'] = ['js', 'jsx']
21 return super().handle(*args, **options)
22
23 def find_files(self, root):
24 a4js_paths = super().find_files(
25 path.join(settings.BASE_DIR, 'node_modules', 'adhocracy4')
26 )
27 a4_paths = super().find_files(get_module_dir('adhocracy4'))
28 apps_paths = super().find_files(path.relpath(get_module_dir('apps')))
29 meinberlin_paths = super().find_files(
30 path.relpath(get_module_dir('meinberlin'))
31 )
32
33 return a4js_paths + a4_paths + apps_paths + meinberlin_paths
34
[end of apps/contrib/management/commands/makemessages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/contrib/management/commands/makemessages.py b/apps/contrib/management/commands/makemessages.py
--- a/apps/contrib/management/commands/makemessages.py
+++ b/apps/contrib/management/commands/makemessages.py
@@ -21,9 +21,9 @@
return super().handle(*args, **options)
def find_files(self, root):
- a4js_paths = super().find_files(
- path.join(settings.BASE_DIR, 'node_modules', 'adhocracy4')
- )
+ a4js_paths = super().find_files(path.join(
+ settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'
+ ))
a4_paths = super().find_files(get_module_dir('adhocracy4'))
apps_paths = super().find_files(path.relpath(get_module_dir('apps')))
meinberlin_paths = super().find_files(
| {"golden_diff": "diff --git a/apps/contrib/management/commands/makemessages.py b/apps/contrib/management/commands/makemessages.py\n--- a/apps/contrib/management/commands/makemessages.py\n+++ b/apps/contrib/management/commands/makemessages.py\n@@ -21,9 +21,9 @@\n return super().handle(*args, **options)\n \n def find_files(self, root):\n- a4js_paths = super().find_files(\n- path.join(settings.BASE_DIR, 'node_modules', 'adhocracy4')\n- )\n+ a4js_paths = super().find_files(path.join(\n+ settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'\n+ ))\n a4_paths = super().find_files(get_module_dir('adhocracy4'))\n apps_paths = super().find_files(path.relpath(get_module_dir('apps')))\n meinberlin_paths = super().find_files(\n", "issue": "Making messages for translation has system dependent output\nWhen calling `make makemessages` the output of the generated \"djangojs.po\" is dependent on the system configuration. \r\nFor example in my case it inserts messages from:\r\n\"adhocracy4/node_modules/ajv/dist/regenerator.min.js\" and\r\n\"adhocracy4/node_modules/js-yaml/dist/js-yaml.min.js\"\n", "before_files": [{"content": "from os import path\n\nfrom django.conf import settings\nfrom django.core.management.commands import makemessages\n\n\ndef get_module_dir(name):\n module = __import__(name)\n return path.dirname(module.__file__)\n\n\nclass Command(makemessages.Command):\n msgmerge_options = (\n makemessages.Command.msgmerge_options + ['--no-fuzzy-matching']\n )\n\n def handle(self, *args, **options):\n if options['domain'] == 'djangojs':\n if options['extensions'] is None:\n options['extensions'] = ['js', 'jsx']\n return super().handle(*args, **options)\n\n def find_files(self, root):\n a4js_paths = super().find_files(\n path.join(settings.BASE_DIR, 'node_modules', 'adhocracy4')\n )\n a4_paths = super().find_files(get_module_dir('adhocracy4'))\n apps_paths = super().find_files(path.relpath(get_module_dir('apps')))\n meinberlin_paths = super().find_files(\n path.relpath(get_module_dir('meinberlin'))\n )\n\n return a4js_paths + a4_paths + apps_paths + meinberlin_paths\n", "path": "apps/contrib/management/commands/makemessages.py"}]} | 942 | 207 |
gh_patches_debug_11453 | rasdani/github-patches | git_diff | saleor__saleor-3535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Attribute filters are not available in subcategories
### What I'm trying to achieve
I'm trying to filter products in subcategories by attributes of this products
### Steps to reproduce the problem
1. Create category and then create 2 or more subcategory
2. Add product to last subcategory
3. Select category, than subcategory and try to filter products by attributes
### What I expected to happen
Attribute filters are not available in subcategories! Filters by attributes available only in last subcategory. For example, i have category "Phones" with subcategories by companies names -> phone model. If i select last subcategory "Iphone 8", i got all filters, but if i select subcategory "Apple", i got filter only by price range.
So, how to enable these filters? Can i modify some template or python code by myself, or you guys will do it some later?
### Screenshots


</issue>
<code>
[start of saleor/product/filters.py]
1 from collections import OrderedDict
2
3 from django.db.models import Q
4 from django.forms import CheckboxSelectMultiple
5 from django.utils.translation import pgettext_lazy
6 from django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter
7
8 from ..core.filters import SortedFilterSet
9 from .models import Attribute, Product
10
11 SORT_BY_FIELDS = OrderedDict([
12 ('name', pgettext_lazy('Product list sorting option', 'name')),
13 ('price', pgettext_lazy('Product list sorting option', 'price')),
14 ('updated_at', pgettext_lazy(
15 'Product list sorting option', 'last updated'))])
16
17
18 class ProductFilter(SortedFilterSet):
19 sort_by = OrderingFilter(
20 label=pgettext_lazy('Product list sorting form', 'Sort by'),
21 fields=SORT_BY_FIELDS.keys(),
22 field_labels=SORT_BY_FIELDS)
23 price = RangeFilter(
24 label=pgettext_lazy('Currency amount', 'Price'))
25
26 class Meta:
27 model = Product
28 fields = []
29
30 def __init__(self, *args, **kwargs):
31 super().__init__(*args, **kwargs)
32 self.product_attributes, self.variant_attributes = (
33 self._get_attributes())
34 self.filters.update(self._get_product_attributes_filters())
35 self.filters.update(self._get_product_variants_attributes_filters())
36 self.filters = OrderedDict(sorted(self.filters.items()))
37
38 def _get_attributes(self):
39 q_product_attributes = self._get_product_attributes_lookup()
40 q_variant_attributes = self._get_variant_attributes_lookup()
41 product_attributes = (
42 Attribute.objects.all()
43 .prefetch_related('translations', 'values__translations')
44 .filter(q_product_attributes)
45 .distinct())
46 variant_attributes = (
47 Attribute.objects.all()
48 .prefetch_related('translations', 'values__translations')
49 .filter(q_variant_attributes)
50 .distinct())
51 return product_attributes, variant_attributes
52
53 def _get_product_attributes_lookup(self):
54 raise NotImplementedError()
55
56 def _get_variant_attributes_lookup(self):
57 raise NotImplementedError()
58
59 def _get_product_attributes_filters(self):
60 filters = {}
61 for attribute in self.product_attributes:
62 filters[attribute.slug] = MultipleChoiceFilter(
63 field_name='attributes__%s' % attribute.pk,
64 label=attribute.translated.name,
65 widget=CheckboxSelectMultiple,
66 choices=self._get_attribute_choices(attribute))
67 return filters
68
69 def _get_product_variants_attributes_filters(self):
70 filters = {}
71 for attribute in self.variant_attributes:
72 filters[attribute.slug] = MultipleChoiceFilter(
73 field_name='variants__attributes__%s' % attribute.pk,
74 label=attribute.translated.name,
75 widget=CheckboxSelectMultiple,
76 choices=self._get_attribute_choices(attribute))
77 return filters
78
79 def _get_attribute_choices(self, attribute):
80 return [
81 (choice.pk, choice.translated.name)
82 for choice in attribute.values.all()]
83
84
85 class ProductCategoryFilter(ProductFilter):
86 def __init__(self, *args, **kwargs):
87 self.category = kwargs.pop('category')
88 super().__init__(*args, **kwargs)
89
90 def _get_product_attributes_lookup(self):
91 return Q(product_type__products__category=self.category)
92
93 def _get_variant_attributes_lookup(self):
94 return Q(product_variant_type__products__category=self.category)
95
96
97 class ProductCollectionFilter(ProductFilter):
98 def __init__(self, *args, **kwargs):
99 self.collection = kwargs.pop('collection')
100 super().__init__(*args, **kwargs)
101
102 def _get_product_attributes_lookup(self):
103 return Q(product_type__products__collections=self.collection)
104
105 def _get_variant_attributes_lookup(self):
106 return Q(product_variant_type__products__collections=self.collection)
107
[end of saleor/product/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/product/filters.py b/saleor/product/filters.py
--- a/saleor/product/filters.py
+++ b/saleor/product/filters.py
@@ -88,10 +88,12 @@
super().__init__(*args, **kwargs)
def _get_product_attributes_lookup(self):
- return Q(product_type__products__category=self.category)
+ categories = self.category.get_descendants(include_self=True)
+ return Q(product_type__products__category__in=categories)
def _get_variant_attributes_lookup(self):
- return Q(product_variant_type__products__category=self.category)
+ categories = self.category.get_descendants(include_self=True)
+ return Q(product_variant_type__products__category__in=categories)
class ProductCollectionFilter(ProductFilter):
| {"golden_diff": "diff --git a/saleor/product/filters.py b/saleor/product/filters.py\n--- a/saleor/product/filters.py\n+++ b/saleor/product/filters.py\n@@ -88,10 +88,12 @@\n super().__init__(*args, **kwargs)\n \n def _get_product_attributes_lookup(self):\n- return Q(product_type__products__category=self.category)\n+ categories = self.category.get_descendants(include_self=True)\n+ return Q(product_type__products__category__in=categories)\n \n def _get_variant_attributes_lookup(self):\n- return Q(product_variant_type__products__category=self.category)\n+ categories = self.category.get_descendants(include_self=True)\n+ return Q(product_variant_type__products__category__in=categories)\n \n \n class ProductCollectionFilter(ProductFilter):\n", "issue": "Attribute filters are not available in subcategories\n### What I'm trying to achieve\r\n\r\nI'm trying to filter products in subcategories by attributes of this products\r\n\r\n### Steps to reproduce the problem\r\n1. Create category and then create 2 or more subcategory\r\n2. Add product to last subcategory\r\n3. Select category, than subcategory and try to filter products by attributes\r\n\r\n### What I expected to happen\r\n\r\nAttribute filters are not available in subcategories! Filters by attributes available only in last subcategory. For example, i have category \"Phones\" with subcategories by companies names -> phone model. If i select last subcategory \"Iphone 8\", i got all filters, but if i select subcategory \"Apple\", i got filter only by price range.\r\n\r\nSo, how to enable these filters? Can i modify some template or python code by myself, or you guys will do it some later?\r\n\r\n### Screenshots\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.db.models import Q\nfrom django.forms import CheckboxSelectMultiple\nfrom django.utils.translation import pgettext_lazy\nfrom django_filters import MultipleChoiceFilter, OrderingFilter, RangeFilter\n\nfrom ..core.filters import SortedFilterSet\nfrom .models import Attribute, Product\n\nSORT_BY_FIELDS = OrderedDict([\n ('name', pgettext_lazy('Product list sorting option', 'name')),\n ('price', pgettext_lazy('Product list sorting option', 'price')),\n ('updated_at', pgettext_lazy(\n 'Product list sorting option', 'last updated'))])\n\n\nclass ProductFilter(SortedFilterSet):\n sort_by = OrderingFilter(\n label=pgettext_lazy('Product list sorting form', 'Sort by'),\n fields=SORT_BY_FIELDS.keys(),\n field_labels=SORT_BY_FIELDS)\n price = RangeFilter(\n label=pgettext_lazy('Currency amount', 'Price'))\n\n class Meta:\n model = Product\n fields = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.product_attributes, self.variant_attributes = (\n self._get_attributes())\n self.filters.update(self._get_product_attributes_filters())\n self.filters.update(self._get_product_variants_attributes_filters())\n self.filters = OrderedDict(sorted(self.filters.items()))\n\n def _get_attributes(self):\n q_product_attributes = self._get_product_attributes_lookup()\n q_variant_attributes = self._get_variant_attributes_lookup()\n product_attributes = (\n Attribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_product_attributes)\n .distinct())\n variant_attributes = (\n Attribute.objects.all()\n .prefetch_related('translations', 'values__translations')\n .filter(q_variant_attributes)\n .distinct())\n return product_attributes, variant_attributes\n\n def _get_product_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_variant_attributes_lookup(self):\n raise NotImplementedError()\n\n def _get_product_attributes_filters(self):\n filters = {}\n for attribute in self.product_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n field_name='attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_product_variants_attributes_filters(self):\n filters = {}\n for attribute in self.variant_attributes:\n filters[attribute.slug] = MultipleChoiceFilter(\n field_name='variants__attributes__%s' % attribute.pk,\n label=attribute.translated.name,\n widget=CheckboxSelectMultiple,\n choices=self._get_attribute_choices(attribute))\n return filters\n\n def _get_attribute_choices(self, attribute):\n return [\n (choice.pk, choice.translated.name)\n for choice in attribute.values.all()]\n\n\nclass ProductCategoryFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.category = kwargs.pop('category')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_type__products__category=self.category)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_type__products__category=self.category)\n\n\nclass ProductCollectionFilter(ProductFilter):\n def __init__(self, *args, **kwargs):\n self.collection = kwargs.pop('collection')\n super().__init__(*args, **kwargs)\n\n def _get_product_attributes_lookup(self):\n return Q(product_type__products__collections=self.collection)\n\n def _get_variant_attributes_lookup(self):\n return Q(product_variant_type__products__collections=self.collection)\n", "path": "saleor/product/filters.py"}]} | 1,845 | 177 |
gh_patches_debug_35909 | rasdani/github-patches | git_diff | conan-io__conan-center-index-2521 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] pcre/8.44
### Package Details
* Package Name/Version: **pcre/8.44**
* Changelog: **http://www.pcre.org/original/changelog.txt**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
</issue>
<code>
[start of recipes/pcre/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 import os
3
4
5 class PCREConan(ConanFile):
6 name = "pcre"
7 url = "https://github.com/conan-io/conan-center-index"
8 homepage = "https://www.pcre.org"
9 description = "Perl Compatible Regular Expressions"
10 topics = ("regex", "regexp", "PCRE")
11 license = "BSD-3-Clause"
12 exports_sources = ["CMakeLists.txt"]
13 generators = "cmake"
14 settings = "os", "arch", "compiler", "build_type"
15 options = {
16 "shared": [True, False],
17 "fPIC": [True, False],
18 "with_bzip2": [True, False],
19 "with_zlib": [True, False],
20 "with_jit": [True, False],
21 "build_pcrecpp": [True, False],
22 "build_pcregrep": [True, False],
23 "with_utf": [True, False],
24 "with_unicode_properties": [True, False]
25 }
26 default_options = {'shared': False, 'fPIC': True, 'with_bzip2': True, 'with_zlib': True, 'with_jit': False, 'build_pcrecpp': False, 'build_pcregrep': False, 'with_utf': False, 'with_unicode_properties': False}
27 _source_subfolder = "source_subfolder"
28 _build_subfolder = "build_subfolder"
29
30 def config_options(self):
31 if self.settings.os == "Windows":
32 del self.options.fPIC
33
34 def configure(self):
35 if not self.options.build_pcrecpp:
36 del self.settings.compiler.libcxx
37 del self.settings.compiler.cppstd
38 if self.options.with_unicode_properties:
39 self.options.with_utf = True
40
41 def patch_cmake(self):
42 """Patch CMake file to avoid man and share during install stage
43 """
44 cmake_file = os.path.join(self._source_subfolder, "CMakeLists.txt")
45 tools.replace_in_file(cmake_file, "INSTALL(FILES ${man1} DESTINATION man/man1)", "")
46 tools.replace_in_file(cmake_file, "INSTALL(FILES ${man3} DESTINATION man/man3)", "")
47 tools.replace_in_file(cmake_file, "INSTALL(FILES ${html} DESTINATION share/doc/pcre/html)", "")
48
49 def source(self):
50 tools.get(**self.conan_data["sources"][self.version])
51 extracted_dir = self.name + "-" + self.version
52 os.rename(extracted_dir, self._source_subfolder)
53 self.patch_cmake()
54
55 def requirements(self):
56 if self.options.with_bzip2:
57 self.requires("bzip2/1.0.8")
58 if self.options.with_zlib:
59 self.requires("zlib/1.2.11")
60
61 def _configure_cmake(self):
62 cmake = CMake(self)
63 cmake.definitions["PCRE_BUILD_TESTS"] = False
64 cmake.definitions["PCRE_BUILD_PCREGREP"] = self.options.build_pcregrep
65 cmake.definitions["PCRE_BUILD_PCRECPP"] = self.options.build_pcrecpp
66 cmake.definitions["PCRE_SUPPORT_LIBZ"] = self.options.with_zlib
67 cmake.definitions["PCRE_SUPPORT_LIBBZ2"] = self.options.with_bzip2
68 cmake.definitions["PCRE_SUPPORT_JIT"] = self.options.with_jit
69 cmake.definitions["PCRE_SUPPORT_UTF"] = self.options.with_utf
70 cmake.definitions["PCRE_SUPPORT_UNICODE_PROPERTIES"] = self.options.with_unicode_properties
71 cmake.definitions["PCRE_SUPPORT_LIBREADLINE"] = False
72 cmake.definitions["PCRE_SUPPORT_LIBEDIT"] = False
73 if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
74 cmake.definitions["PCRE_STATIC_RUNTIME"] = not self.options.shared and "MT" in self.settings.compiler.runtime
75 cmake.configure(build_folder=self._build_subfolder)
76 return cmake
77
78 def build(self):
79 cmake = self._configure_cmake()
80 cmake.build()
81
82 def package(self):
83 self.copy(pattern="LICENCE", dst="licenses", src=self._source_subfolder)
84 cmake = self._configure_cmake()
85 cmake.install()
86
87 def package_info(self):
88 if self.settings.os == "Windows" and self.settings.build_type == 'Debug':
89 self.cpp_info.libs = ['pcreposixd', 'pcred']
90 else:
91 self.cpp_info.libs = ['pcreposix', 'pcre']
92 if not self.options.shared:
93 self.cpp_info.defines.append("PCRE_STATIC=1")
94 self.cpp_info.names['pkg_config'] = 'libpcre'
95
96 self.cpp_info.names["cmake_find_package"] = "PCRE"
97 self.cpp_info.names["cmake_find_package_multi"] = "PCRE"
98
[end of recipes/pcre/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/pcre/all/conanfile.py b/recipes/pcre/all/conanfile.py
--- a/recipes/pcre/all/conanfile.py
+++ b/recipes/pcre/all/conanfile.py
@@ -23,9 +23,25 @@
"with_utf": [True, False],
"with_unicode_properties": [True, False]
}
- default_options = {'shared': False, 'fPIC': True, 'with_bzip2': True, 'with_zlib': True, 'with_jit': False, 'build_pcrecpp': False, 'build_pcregrep': False, 'with_utf': False, 'with_unicode_properties': False}
- _source_subfolder = "source_subfolder"
- _build_subfolder = "build_subfolder"
+ default_options = {
+ 'shared': False,
+ 'fPIC': True,
+ 'with_bzip2': True,
+ 'with_zlib': True,
+ 'with_jit': False,
+ 'build_pcrecpp': False,
+ 'build_pcregrep': False,
+ 'with_utf': False,
+ 'with_unicode_properties': False
+ }
+
+ @property
+ def _source_subfolder(self):
+ return "source_subfolder"
+
+ @property
+ def _build_subfolder(self):
+ return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
@@ -42,15 +58,17 @@
"""Patch CMake file to avoid man and share during install stage
"""
cmake_file = os.path.join(self._source_subfolder, "CMakeLists.txt")
- tools.replace_in_file(cmake_file, "INSTALL(FILES ${man1} DESTINATION man/man1)", "")
- tools.replace_in_file(cmake_file, "INSTALL(FILES ${man3} DESTINATION man/man3)", "")
- tools.replace_in_file(cmake_file, "INSTALL(FILES ${html} DESTINATION share/doc/pcre/html)", "")
+ tools.replace_in_file(
+ cmake_file, "INSTALL(FILES ${man1} DESTINATION man/man1)", "")
+ tools.replace_in_file(
+ cmake_file, "INSTALL(FILES ${man3} DESTINATION man/man3)", "")
+ tools.replace_in_file(
+ cmake_file, "INSTALL(FILES ${html} DESTINATION share/doc/pcre/html)", "")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
- self.patch_cmake()
def requirements(self):
if self.options.with_bzip2:
@@ -76,6 +94,7 @@
return cmake
def build(self):
+ self.patch_cmake()
cmake = self._configure_cmake()
cmake.build()
| {"golden_diff": "diff --git a/recipes/pcre/all/conanfile.py b/recipes/pcre/all/conanfile.py\n--- a/recipes/pcre/all/conanfile.py\n+++ b/recipes/pcre/all/conanfile.py\n@@ -23,9 +23,25 @@\n \"with_utf\": [True, False],\n \"with_unicode_properties\": [True, False]\n }\n- default_options = {'shared': False, 'fPIC': True, 'with_bzip2': True, 'with_zlib': True, 'with_jit': False, 'build_pcrecpp': False, 'build_pcregrep': False, 'with_utf': False, 'with_unicode_properties': False}\n- _source_subfolder = \"source_subfolder\"\n- _build_subfolder = \"build_subfolder\"\n+ default_options = {\n+ 'shared': False,\n+ 'fPIC': True,\n+ 'with_bzip2': True,\n+ 'with_zlib': True,\n+ 'with_jit': False,\n+ 'build_pcrecpp': False,\n+ 'build_pcregrep': False,\n+ 'with_utf': False,\n+ 'with_unicode_properties': False\n+ }\n+\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n+\n+ @property\n+ def _build_subfolder(self):\n+ return \"build_subfolder\"\n \n def config_options(self):\n if self.settings.os == \"Windows\":\n@@ -42,15 +58,17 @@\n \"\"\"Patch CMake file to avoid man and share during install stage\n \"\"\"\n cmake_file = os.path.join(self._source_subfolder, \"CMakeLists.txt\")\n- tools.replace_in_file(cmake_file, \"INSTALL(FILES ${man1} DESTINATION man/man1)\", \"\")\n- tools.replace_in_file(cmake_file, \"INSTALL(FILES ${man3} DESTINATION man/man3)\", \"\")\n- tools.replace_in_file(cmake_file, \"INSTALL(FILES ${html} DESTINATION share/doc/pcre/html)\", \"\")\n+ tools.replace_in_file(\n+ cmake_file, \"INSTALL(FILES ${man1} DESTINATION man/man1)\", \"\")\n+ tools.replace_in_file(\n+ cmake_file, \"INSTALL(FILES ${man3} DESTINATION man/man3)\", \"\")\n+ tools.replace_in_file(\n+ cmake_file, \"INSTALL(FILES ${html} DESTINATION share/doc/pcre/html)\", \"\")\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n- self.patch_cmake()\n \n def requirements(self):\n if self.options.with_bzip2:\n@@ -76,6 +94,7 @@\n return cmake\n \n def build(self):\n+ self.patch_cmake()\n cmake = self._configure_cmake()\n cmake.build()\n", "issue": "[request] pcre/8.44\n### Package Details\r\n * Package Name/Version: **pcre/8.44**\r\n * Changelog: **http://www.pcre.org/original/changelog.txt**\r\n\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\n\n\nclass PCREConan(ConanFile):\n name = \"pcre\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.pcre.org\"\n description = \"Perl Compatible Regular Expressions\"\n topics = (\"regex\", \"regexp\", \"PCRE\")\n license = \"BSD-3-Clause\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_bzip2\": [True, False],\n \"with_zlib\": [True, False],\n \"with_jit\": [True, False],\n \"build_pcrecpp\": [True, False],\n \"build_pcregrep\": [True, False],\n \"with_utf\": [True, False],\n \"with_unicode_properties\": [True, False]\n }\n default_options = {'shared': False, 'fPIC': True, 'with_bzip2': True, 'with_zlib': True, 'with_jit': False, 'build_pcrecpp': False, 'build_pcregrep': False, 'with_utf': False, 'with_unicode_properties': False}\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if not self.options.build_pcrecpp:\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.options.with_unicode_properties:\n self.options.with_utf = True\n\n def patch_cmake(self):\n \"\"\"Patch CMake file to avoid man and share during install stage\n \"\"\"\n cmake_file = os.path.join(self._source_subfolder, \"CMakeLists.txt\")\n tools.replace_in_file(cmake_file, \"INSTALL(FILES ${man1} DESTINATION man/man1)\", \"\")\n tools.replace_in_file(cmake_file, \"INSTALL(FILES ${man3} DESTINATION man/man3)\", \"\")\n tools.replace_in_file(cmake_file, \"INSTALL(FILES ${html} DESTINATION share/doc/pcre/html)\", \"\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n self.patch_cmake()\n\n def requirements(self):\n if self.options.with_bzip2:\n self.requires(\"bzip2/1.0.8\")\n if self.options.with_zlib:\n self.requires(\"zlib/1.2.11\")\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"PCRE_BUILD_TESTS\"] = False\n cmake.definitions[\"PCRE_BUILD_PCREGREP\"] = self.options.build_pcregrep\n cmake.definitions[\"PCRE_BUILD_PCRECPP\"] = self.options.build_pcrecpp\n cmake.definitions[\"PCRE_SUPPORT_LIBZ\"] = self.options.with_zlib\n cmake.definitions[\"PCRE_SUPPORT_LIBBZ2\"] = self.options.with_bzip2\n cmake.definitions[\"PCRE_SUPPORT_JIT\"] = self.options.with_jit\n cmake.definitions[\"PCRE_SUPPORT_UTF\"] = self.options.with_utf\n cmake.definitions[\"PCRE_SUPPORT_UNICODE_PROPERTIES\"] = self.options.with_unicode_properties\n cmake.definitions[\"PCRE_SUPPORT_LIBREADLINE\"] = False\n cmake.definitions[\"PCRE_SUPPORT_LIBEDIT\"] = False\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n cmake.definitions[\"PCRE_STATIC_RUNTIME\"] = not self.options.shared and \"MT\" in self.settings.compiler.runtime\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENCE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n if self.settings.os == \"Windows\" and self.settings.build_type == 'Debug':\n self.cpp_info.libs = ['pcreposixd', 'pcred']\n else:\n self.cpp_info.libs = ['pcreposix', 'pcre']\n if not self.options.shared:\n self.cpp_info.defines.append(\"PCRE_STATIC=1\")\n self.cpp_info.names['pkg_config'] = 'libpcre'\n\n self.cpp_info.names[\"cmake_find_package\"] = \"PCRE\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"PCRE\"\n", "path": "recipes/pcre/all/conanfile.py"}]} | 1,847 | 643 |
gh_patches_debug_37341 | rasdani/github-patches | git_diff | python__mypy-440 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor overloads away in 'random' stubs
It seems that all the `@overload` decorators in `stubs/3.2/random.py` could be represented without overloading, such as by using union types.
</issue>
<code>
[start of stubs/3.2/random.py]
1 # Stubs for random
2 # Ron Murawski <[email protected]>
3 # Updated by Jukka Lehtosalo
4
5 # based on http://docs.python.org/3.2/library/random.html
6
7 # ----- random classes -----
8
9 import _random
10 from typing import (
11 Any, overload, typevar, Sequence, List, Function, AbstractSet
12 )
13
14 t = typevar('t')
15
16 class Random(_random.Random):
17 def __init__(self, x: Any = None) -> None: pass
18 def seed(self, a: Any = None, version: int = 2) -> None: pass
19 def getstate(self) -> tuple: pass
20 def setstate(self, state: tuple) -> None: pass
21 def getrandbits(self, k: int) -> int: pass
22
23 @overload
24 def randrange(self, stop: int) -> int: pass
25 @overload
26 def randrange(self, start: int, stop: int, step: int = 1) -> int: pass
27
28 def randint(self, a: int, b: int) -> int: pass
29 def choice(self, seq: Sequence[t]) -> t: pass
30
31 @overload
32 def shuffle(self, x: List[Any]) -> None: pass
33 @overload
34 def shuffle(self, x: List[Any], random: Function[[], float]) -> None: pass
35
36 @overload
37 def sample(self, population: Sequence[t], k: int) -> List[t]: pass
38 @overload
39 def sample(self, population: AbstractSet[t], k: int) -> List[t]: pass
40
41 def random(self) -> float: pass
42 def uniform(self, a: float, b: float) -> float: pass
43 def triangular(self, low: float = 0.0, high: float = 1.0,
44 mode: float = None) -> float: pass
45 def betavariate(self, alpha: float, beta: float) -> float: pass
46 def expovariate(self, lambd: float) -> float: pass
47 def gammavariate(self, alpha: float, beta: float) -> float: pass
48 def gauss(self, mu: float, sigma: float) -> float: pass
49 def lognormvariate(self, mu: float, sigma: float) -> float: pass
50 def normalvariate(self, mu: float, sigma: float) -> float: pass
51 def vonmisesvariate(self, mu: float, kappa: float) -> float: pass
52 def paretovariate(self, alpha: float) -> float: pass
53 def weibullvariate(self, alpha: float, beta: float) -> float: pass
54
55 # SystemRandom is not implemented for all OS's; good on Windows & Linux
56 class SystemRandom:
57 def __init__(self, randseed: object = None) -> None: pass
58 def random(self) -> float: pass
59 def getrandbits(self, k: int) -> int: pass
60 def seed(self, arg: object) -> None: pass
61
62 # ----- random function stubs -----
63 def seed(a: Any = None, version: int = 2) -> None: pass
64 def getstate() -> object: pass
65 def setstate(state: object) -> None: pass
66 def getrandbits(k: int) -> int: pass
67
68 @overload
69 def randrange(stop: int) -> int: pass
70 @overload
71 def randrange(start: int, stop: int, step: int = 1) -> int: pass
72
73 def randint(a: int, b: int) -> int: pass
74 def choice(seq: Sequence[t]) -> t: pass
75
76 @overload
77 def shuffle(x: List[Any]) -> None: pass
78 @overload
79 def shuffle(x: List[Any], random: Function[[], float]) -> None: pass
80
81 @overload
82 def sample(population: Sequence[t], k: int) -> List[t]: pass
83 @overload
84 def sample(population: AbstractSet[t], k: int) -> List[t]: pass
85
86 def random() -> float: pass
87 def uniform(a: float, b: float) -> float: pass
88 def triangular(low: float = 0.0, high: float = 1.0,
89 mode: float = None) -> float: pass
90 def betavariate(alpha: float, beta: float) -> float: pass
91 def expovariate(lambd: float) -> float: pass
92 def gammavariate(alpha: float, beta: float) -> float: pass
93 def gauss(mu: float, sigma: float) -> float: pass
94 def lognormvariate(mu: float, sigma: float) -> float: pass
95 def normalvariate(mu: float, sigma: float) -> float: pass
96 def vonmisesvariate(mu: float, kappa: float) -> float: pass
97 def paretovariate(alpha: float) -> float: pass
98 def weibullvariate(alpha: float, beta: float) -> float: pass
99
[end of stubs/3.2/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/stubs/3.2/random.py b/stubs/3.2/random.py
--- a/stubs/3.2/random.py
+++ b/stubs/3.2/random.py
@@ -8,7 +8,7 @@
import _random
from typing import (
- Any, overload, typevar, Sequence, List, Function, AbstractSet
+ Any, typevar, Sequence, List, Function, AbstractSet, Union
)
t = typevar('t')
@@ -19,25 +19,11 @@
def getstate(self) -> tuple: pass
def setstate(self, state: tuple) -> None: pass
def getrandbits(self, k: int) -> int: pass
-
- @overload
- def randrange(self, stop: int) -> int: pass
- @overload
- def randrange(self, start: int, stop: int, step: int = 1) -> int: pass
-
+ def randrange(self, start: int, stop: Union[int, None] = None, step: int = 1) -> int: pass
def randint(self, a: int, b: int) -> int: pass
def choice(self, seq: Sequence[t]) -> t: pass
-
- @overload
- def shuffle(self, x: List[Any]) -> None: pass
- @overload
- def shuffle(self, x: List[Any], random: Function[[], float]) -> None: pass
-
- @overload
- def sample(self, population: Sequence[t], k: int) -> List[t]: pass
- @overload
- def sample(self, population: AbstractSet[t], k: int) -> List[t]: pass
-
+ def shuffle(self, x: List[Any], random: Union[Function[[], float], None] = None) -> None: pass
+ def sample(self, population: Union[Sequence[t], AbstractSet[t]], k: int) -> List[t]: pass
def random(self) -> float: pass
def uniform(self, a: float, b: float) -> float: pass
def triangular(self, low: float = 0.0, high: float = 1.0,
@@ -64,25 +50,11 @@
def getstate() -> object: pass
def setstate(state: object) -> None: pass
def getrandbits(k: int) -> int: pass
-
-@overload
-def randrange(stop: int) -> int: pass
-@overload
-def randrange(start: int, stop: int, step: int = 1) -> int: pass
-
+def randrange(start: int, stop: Union[None, int] = None, step: int = 1) -> int: pass
def randint(a: int, b: int) -> int: pass
def choice(seq: Sequence[t]) -> t: pass
-
-@overload
-def shuffle(x: List[Any]) -> None: pass
-@overload
-def shuffle(x: List[Any], random: Function[[], float]) -> None: pass
-
-@overload
-def sample(population: Sequence[t], k: int) -> List[t]: pass
-@overload
-def sample(population: AbstractSet[t], k: int) -> List[t]: pass
-
+def shuffle(x: List[Any], random: Union[Function[[], float], None] = None) -> None: pass
+def sample(population: Union[Sequence[t], AbstractSet[t]], k: int) -> List[t]: pass
def random() -> float: pass
def uniform(a: float, b: float) -> float: pass
def triangular(low: float = 0.0, high: float = 1.0,
| {"golden_diff": "diff --git a/stubs/3.2/random.py b/stubs/3.2/random.py\n--- a/stubs/3.2/random.py\n+++ b/stubs/3.2/random.py\n@@ -8,7 +8,7 @@\n \n import _random\n from typing import (\n- Any, overload, typevar, Sequence, List, Function, AbstractSet\n+ Any, typevar, Sequence, List, Function, AbstractSet, Union\n )\n \n t = typevar('t')\n@@ -19,25 +19,11 @@\n def getstate(self) -> tuple: pass\n def setstate(self, state: tuple) -> None: pass\n def getrandbits(self, k: int) -> int: pass\n-\n- @overload\n- def randrange(self, stop: int) -> int: pass\n- @overload\n- def randrange(self, start: int, stop: int, step: int = 1) -> int: pass\n-\n+ def randrange(self, start: int, stop: Union[int, None] = None, step: int = 1) -> int: pass\n def randint(self, a: int, b: int) -> int: pass\n def choice(self, seq: Sequence[t]) -> t: pass\n-\n- @overload\n- def shuffle(self, x: List[Any]) -> None: pass\n- @overload\n- def shuffle(self, x: List[Any], random: Function[[], float]) -> None: pass\n-\n- @overload\n- def sample(self, population: Sequence[t], k: int) -> List[t]: pass\n- @overload\n- def sample(self, population: AbstractSet[t], k: int) -> List[t]: pass\n-\n+ def shuffle(self, x: List[Any], random: Union[Function[[], float], None] = None) -> None: pass\n+ def sample(self, population: Union[Sequence[t], AbstractSet[t]], k: int) -> List[t]: pass\n def random(self) -> float: pass\n def uniform(self, a: float, b: float) -> float: pass\n def triangular(self, low: float = 0.0, high: float = 1.0,\n@@ -64,25 +50,11 @@\n def getstate() -> object: pass\n def setstate(state: object) -> None: pass\n def getrandbits(k: int) -> int: pass\n-\n-@overload\n-def randrange(stop: int) -> int: pass\n-@overload\n-def randrange(start: int, stop: int, step: int = 1) -> int: pass\n-\n+def randrange(start: int, stop: Union[None, int] = None, step: int = 1) -> int: pass\n def randint(a: int, b: int) -> int: pass\n def choice(seq: Sequence[t]) -> t: pass\n-\n-@overload\n-def shuffle(x: List[Any]) -> None: pass\n-@overload\n-def shuffle(x: List[Any], random: Function[[], float]) -> None: pass\n-\n-@overload\n-def sample(population: Sequence[t], k: int) -> List[t]: pass\n-@overload\n-def sample(population: AbstractSet[t], k: int) -> List[t]: pass\n-\n+def shuffle(x: List[Any], random: Union[Function[[], float], None] = None) -> None: pass\n+def sample(population: Union[Sequence[t], AbstractSet[t]], k: int) -> List[t]: pass\n def random() -> float: pass\n def uniform(a: float, b: float) -> float: pass\n def triangular(low: float = 0.0, high: float = 1.0,\n", "issue": "Refactor overloads away in 'random' stubs\nIt seems that all the `@overload` decorators in `stubs/3.2/random.py` could be represented without overloading, such as by using union types.\n\n", "before_files": [{"content": "# Stubs for random\n# Ron Murawski <[email protected]>\n# Updated by Jukka Lehtosalo\n\n# based on http://docs.python.org/3.2/library/random.html\n\n# ----- random classes -----\n\nimport _random\nfrom typing import (\n Any, overload, typevar, Sequence, List, Function, AbstractSet\n)\n\nt = typevar('t')\n\nclass Random(_random.Random):\n def __init__(self, x: Any = None) -> None: pass\n def seed(self, a: Any = None, version: int = 2) -> None: pass\n def getstate(self) -> tuple: pass\n def setstate(self, state: tuple) -> None: pass\n def getrandbits(self, k: int) -> int: pass\n\n @overload\n def randrange(self, stop: int) -> int: pass\n @overload\n def randrange(self, start: int, stop: int, step: int = 1) -> int: pass\n\n def randint(self, a: int, b: int) -> int: pass\n def choice(self, seq: Sequence[t]) -> t: pass\n\n @overload\n def shuffle(self, x: List[Any]) -> None: pass\n @overload\n def shuffle(self, x: List[Any], random: Function[[], float]) -> None: pass\n\n @overload\n def sample(self, population: Sequence[t], k: int) -> List[t]: pass\n @overload\n def sample(self, population: AbstractSet[t], k: int) -> List[t]: pass\n\n def random(self) -> float: pass\n def uniform(self, a: float, b: float) -> float: pass\n def triangular(self, low: float = 0.0, high: float = 1.0,\n mode: float = None) -> float: pass\n def betavariate(self, alpha: float, beta: float) -> float: pass\n def expovariate(self, lambd: float) -> float: pass\n def gammavariate(self, alpha: float, beta: float) -> float: pass\n def gauss(self, mu: float, sigma: float) -> float: pass\n def lognormvariate(self, mu: float, sigma: float) -> float: pass\n def normalvariate(self, mu: float, sigma: float) -> float: pass\n def vonmisesvariate(self, mu: float, kappa: float) -> float: pass\n def paretovariate(self, alpha: float) -> float: pass\n def weibullvariate(self, alpha: float, beta: float) -> float: pass\n\n# SystemRandom is not implemented for all OS's; good on Windows & Linux\nclass SystemRandom:\n def __init__(self, randseed: object = None) -> None: pass\n def random(self) -> float: pass\n def getrandbits(self, k: int) -> int: pass\n def seed(self, arg: object) -> None: pass\n\n# ----- random function stubs -----\ndef seed(a: Any = None, version: int = 2) -> None: pass\ndef getstate() -> object: pass\ndef setstate(state: object) -> None: pass\ndef getrandbits(k: int) -> int: pass\n\n@overload\ndef randrange(stop: int) -> int: pass\n@overload\ndef randrange(start: int, stop: int, step: int = 1) -> int: pass\n\ndef randint(a: int, b: int) -> int: pass\ndef choice(seq: Sequence[t]) -> t: pass\n\n@overload\ndef shuffle(x: List[Any]) -> None: pass\n@overload\ndef shuffle(x: List[Any], random: Function[[], float]) -> None: pass\n\n@overload\ndef sample(population: Sequence[t], k: int) -> List[t]: pass\n@overload\ndef sample(population: AbstractSet[t], k: int) -> List[t]: pass\n\ndef random() -> float: pass\ndef uniform(a: float, b: float) -> float: pass\ndef triangular(low: float = 0.0, high: float = 1.0,\n mode: float = None) -> float: pass\ndef betavariate(alpha: float, beta: float) -> float: pass\ndef expovariate(lambd: float) -> float: pass\ndef gammavariate(alpha: float, beta: float) -> float: pass\ndef gauss(mu: float, sigma: float) -> float: pass\ndef lognormvariate(mu: float, sigma: float) -> float: pass\ndef normalvariate(mu: float, sigma: float) -> float: pass\ndef vonmisesvariate(mu: float, kappa: float) -> float: pass\ndef paretovariate(alpha: float) -> float: pass\ndef weibullvariate(alpha: float, beta: float) -> float: pass\n", "path": "stubs/3.2/random.py"}]} | 1,893 | 844 |
gh_patches_debug_5826 | rasdani/github-patches | git_diff | voxel51__fiftyone-2588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Sorting by similarity does not work on develop
On `develop`, nothing happens when clicking `Apply` below to sort by similarity in the App:
```py
import fiftyone as fo
import fiftyone.brain as fob
import fiftyone.zoo as foz
dataset = foz.load_zoo_dataset("quickstart")
model = foz.load_zoo_model("clip-vit-base32-torch")
fob.compute_similarity(dataset, model=model, brain_key="clip")
session = fo.launch_app(dataset)
```
<img width="628" alt="Screen Shot 2023-01-30 at 11 32 57 AM" src="https://user-images.githubusercontent.com/25985824/215537611-86a2385a-9279-410d-ac36-4ec5c7537551.png">
</issue>
<code>
[start of fiftyone/server/routes/sort.py]
1 """
2 FiftyOne Server /sort route
3
4 | Copyright 2017-2023, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 from starlette.endpoints import HTTPEndpoint
9 from starlette.requests import Request
10
11 import fiftyone.core.dataset as fod
12 import fiftyone.core.fields as fof
13 import fiftyone.core.view as fov
14
15 from fiftyone.server.decorators import route
16 import fiftyone.server.events as fose
17 from fiftyone.server.query import serialize_dataset
18 import fiftyone.server.view as fosv
19
20
21 class Sort(HTTPEndpoint):
22 @route
23 async def post(self, request: Request, data: dict):
24 dataset_name = data.get("dataset", None)
25 filters = data.get("filters", {})
26 stages = data.get("view", None)
27 extended = data.get("extended", None)
28 dist_field = data.get("dist_field", None)
29
30 dataset = fod.load_dataset(dataset_name)
31
32 changed = False
33 if dist_field and not dataset.get_field(dist_field):
34 dataset.add_sample_field(dist_field, fof.FloatField)
35 changed = True
36
37 fosv.get_view(dataset_name, stages=stages, filters=filters)
38
39 state = fose.get_state().copy()
40 view = fosv.get_view(dataset_name, stages=stages, filters=filters)
41 state.dataset = view._dataset
42
43 if isinstance(view, fov.DatasetView):
44 state.view = view
45 else:
46 view = None
47
48 return {
49 "dataset": await serialize_dataset(
50 dataset_name=dataset_name,
51 serialized_view=stages,
52 view_name=view.name,
53 )
54 if changed
55 else None,
56 "state": state.serialize(),
57 }
58
[end of fiftyone/server/routes/sort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fiftyone/server/routes/sort.py b/fiftyone/server/routes/sort.py
--- a/fiftyone/server/routes/sort.py
+++ b/fiftyone/server/routes/sort.py
@@ -24,9 +24,7 @@
dataset_name = data.get("dataset", None)
filters = data.get("filters", {})
stages = data.get("view", None)
- extended = data.get("extended", None)
dist_field = data.get("dist_field", None)
-
dataset = fod.load_dataset(dataset_name)
changed = False
| {"golden_diff": "diff --git a/fiftyone/server/routes/sort.py b/fiftyone/server/routes/sort.py\n--- a/fiftyone/server/routes/sort.py\n+++ b/fiftyone/server/routes/sort.py\n@@ -24,9 +24,7 @@\n dataset_name = data.get(\"dataset\", None)\n filters = data.get(\"filters\", {})\n stages = data.get(\"view\", None)\n- extended = data.get(\"extended\", None)\n dist_field = data.get(\"dist_field\", None)\n-\n dataset = fod.load_dataset(dataset_name)\n \n changed = False\n", "issue": "[BUG] Sorting by similarity does not work on develop\nOn `develop`, nothing happens when clicking `Apply` below to sort by similarity in the App:\r\n\r\n```py\r\nimport fiftyone as fo\r\nimport fiftyone.brain as fob\r\nimport fiftyone.zoo as foz\r\n\r\ndataset = foz.load_zoo_dataset(\"quickstart\")\r\n\r\nmodel = foz.load_zoo_model(\"clip-vit-base32-torch\")\r\nfob.compute_similarity(dataset, model=model, brain_key=\"clip\")\r\n\r\nsession = fo.launch_app(dataset)\r\n```\r\n\r\n<img width=\"628\" alt=\"Screen Shot 2023-01-30 at 11 32 57 AM\" src=\"https://user-images.githubusercontent.com/25985824/215537611-86a2385a-9279-410d-ac36-4ec5c7537551.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne Server /sort route\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom starlette.endpoints import HTTPEndpoint\nfrom starlette.requests import Request\n\nimport fiftyone.core.dataset as fod\nimport fiftyone.core.fields as fof\nimport fiftyone.core.view as fov\n\nfrom fiftyone.server.decorators import route\nimport fiftyone.server.events as fose\nfrom fiftyone.server.query import serialize_dataset\nimport fiftyone.server.view as fosv\n\n\nclass Sort(HTTPEndpoint):\n @route\n async def post(self, request: Request, data: dict):\n dataset_name = data.get(\"dataset\", None)\n filters = data.get(\"filters\", {})\n stages = data.get(\"view\", None)\n extended = data.get(\"extended\", None)\n dist_field = data.get(\"dist_field\", None)\n\n dataset = fod.load_dataset(dataset_name)\n\n changed = False\n if dist_field and not dataset.get_field(dist_field):\n dataset.add_sample_field(dist_field, fof.FloatField)\n changed = True\n\n fosv.get_view(dataset_name, stages=stages, filters=filters)\n\n state = fose.get_state().copy()\n view = fosv.get_view(dataset_name, stages=stages, filters=filters)\n state.dataset = view._dataset\n\n if isinstance(view, fov.DatasetView):\n state.view = view\n else:\n view = None\n\n return {\n \"dataset\": await serialize_dataset(\n dataset_name=dataset_name,\n serialized_view=stages,\n view_name=view.name,\n )\n if changed\n else None,\n \"state\": state.serialize(),\n }\n", "path": "fiftyone/server/routes/sort.py"}]} | 1,243 | 126 |
gh_patches_debug_61235 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve examples documentation to make it clear that they are standalone
## 🚀 Feature
Tangentially to #532, I think it would be good to add a "Usage" section to examples/README.md that makes it clear that these example scripts can be used through pip-installed CompilerGym, and possibly split the examples rules out of the top level makefile into an examples/Makefile file for standalone usage.
## Motivation
It is not clear whether the included examples require building from source (they don't) or can be used on their own (they can).
</issue>
<code>
[start of examples/setup.py]
1 #!/usr/bin/env python3
2 #
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 #
5 # This source code is licensed under the MIT license found in the
6 # LICENSE file in the root directory of this source tree.
7
8 import distutils.util
9
10 import setuptools
11
12 with open("../VERSION") as f:
13 version = f.read().strip()
14 with open("requirements.txt") as f:
15 requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
16
17 setuptools.setup(
18 name="compiler_gym_examples",
19 version=version,
20 description="Example code for CompilerGym",
21 author="Facebook AI Research",
22 url="https://github.com/facebookresearch/CompilerGym",
23 license="MIT",
24 install_requires=requirements,
25 packages=[
26 "llvm_autotuning",
27 "llvm_autotuning.autotuners",
28 "llvm_rl",
29 "llvm_rl.model",
30 ],
31 python_requires=">=3.8",
32 platforms=[distutils.util.get_platform()],
33 zip_safe=False,
34 )
35
[end of examples/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/setup.py b/examples/setup.py
--- a/examples/setup.py
+++ b/examples/setup.py
@@ -13,6 +13,8 @@
version = f.read().strip()
with open("requirements.txt") as f:
requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
+with open("../tests/requirements.txt") as f:
+ requirements += [ln.split("#")[0].rstrip() for ln in f.readlines()]
setuptools.setup(
name="compiler_gym_examples",
| {"golden_diff": "diff --git a/examples/setup.py b/examples/setup.py\n--- a/examples/setup.py\n+++ b/examples/setup.py\n@@ -13,6 +13,8 @@\n version = f.read().strip()\n with open(\"requirements.txt\") as f:\n requirements = [ln.split(\"#\")[0].rstrip() for ln in f.readlines()]\n+with open(\"../tests/requirements.txt\") as f:\n+ requirements += [ln.split(\"#\")[0].rstrip() for ln in f.readlines()]\n \n setuptools.setup(\n name=\"compiler_gym_examples\",\n", "issue": "Improve examples documentation to make it clear that they are standalone\n## \ud83d\ude80 Feature\r\n\r\nTangentially to #532, I think it would be good to add a \"Usage\" section to examples/README.md that makes it clear that these example scripts can be used through pip-installed CompilerGym, and possibly split the examples rules out of the top level makefile into an examples/Makefile file for standalone usage.\r\n\r\n## Motivation\r\n\r\nIt is not clear whether the included examples require building from source (they don't) or can be used on their own (they can).\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport distutils.util\n\nimport setuptools\n\nwith open(\"../VERSION\") as f:\n version = f.read().strip()\nwith open(\"requirements.txt\") as f:\n requirements = [ln.split(\"#\")[0].rstrip() for ln in f.readlines()]\n\nsetuptools.setup(\n name=\"compiler_gym_examples\",\n version=version,\n description=\"Example code for CompilerGym\",\n author=\"Facebook AI Research\",\n url=\"https://github.com/facebookresearch/CompilerGym\",\n license=\"MIT\",\n install_requires=requirements,\n packages=[\n \"llvm_autotuning\",\n \"llvm_autotuning.autotuners\",\n \"llvm_rl\",\n \"llvm_rl.model\",\n ],\n python_requires=\">=3.8\",\n platforms=[distutils.util.get_platform()],\n zip_safe=False,\n)\n", "path": "examples/setup.py"}]} | 934 | 115 |
gh_patches_debug_23256 | rasdani/github-patches | git_diff | deepset-ai__haystack-3901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better model security with new PyTorch version
When loading PyTorch models from the modelhub, arbitrary code can be executed. See [here.](https://huggingface.co/docs/hub/security-pickle#pickle-scanning)
Seems like PyTorch already solved this:
- Issue on the PyTorch Repo: https://github.com/pytorch/pytorch/issues/52596
- By default, this is not available in torch==1.13. TORCH_FORCE_WEIGHTS_ONLY_LOAD needs to be set to True which allows global override to safe only model loading via.
Test
Test haystack tests with new flags
**Solution:**
- Bump up PyTorch version to 1.13
**Behaviour**
- Secure by default TORCH_FORCE_WEIGHTS_ONLY_LOAD always set to true when Haystack loads any models
</issue>
<code>
[start of haystack/__init__.py]
1 # pylint: disable=wrong-import-position,wrong-import-order
2
3 from typing import Union
4 from types import ModuleType
5
6 try:
7 from importlib import metadata
8 except (ModuleNotFoundError, ImportError):
9 # Python <= 3.7
10 import importlib_metadata as metadata # type: ignore
11
12 __version__: str = str(metadata.version("farm-haystack"))
13
14
15 # Logging is not configured here on purpose, see https://github.com/deepset-ai/haystack/issues/2485
16 import logging
17
18 import pandas as pd
19
20 from haystack.schema import Document, Answer, Label, MultiLabel, Span, EvaluationResult
21 from haystack.nodes.base import BaseComponent
22 from haystack.pipelines.base import Pipeline
23
24
25 pd.options.display.max_colwidth = 80
26
[end of haystack/__init__.py]
[start of haystack/environment.py]
1 import os
2 import platform
3 import sys
4 from typing import Any, Dict
5 import torch
6 import transformers
7
8 from haystack import __version__
9
10
11 HAYSTACK_EXECUTION_CONTEXT = "HAYSTACK_EXECUTION_CONTEXT"
12 HAYSTACK_DOCKER_CONTAINER = "HAYSTACK_DOCKER_CONTAINER"
13
14 # Any remote API (OpenAI, Cohere etc.)
15 HAYSTACK_REMOTE_API_BACKOFF_SEC = "HAYSTACK_REMOTE_API_BACKOFF_SEC"
16 HAYSTACK_REMOTE_API_MAX_RETRIES = "HAYSTACK_REMOTE_API_MAX_RETRIES"
17
18 env_meta_data: Dict[str, Any] = {}
19
20
21 def get_or_create_env_meta_data() -> Dict[str, Any]:
22 """
23 Collects meta data about the setup that is used with Haystack, such as: operating system, python version, Haystack version, transformers version, pytorch version, number of GPUs, execution environment, and the value stored in the env variable HAYSTACK_EXECUTION_CONTEXT.
24 """
25 global env_meta_data # pylint: disable=global-statement
26 if not env_meta_data:
27 env_meta_data = {
28 "os_version": platform.release(),
29 "os_family": platform.system(),
30 "os_machine": platform.machine(),
31 "python_version": platform.python_version(),
32 "haystack_version": __version__,
33 "transformers_version": transformers.__version__,
34 "torch_version": torch.__version__,
35 "torch_cuda_version": torch.version.cuda if torch.cuda.is_available() else 0,
36 "n_gpu": torch.cuda.device_count() if torch.cuda.is_available() else 0,
37 "n_cpu": os.cpu_count(),
38 "context": os.environ.get(HAYSTACK_EXECUTION_CONTEXT),
39 "execution_env": _get_execution_environment(),
40 }
41 return env_meta_data
42
43
44 def _get_execution_environment():
45 """
46 Identifies the execution environment that Haystack is running in.
47 Options are: colab notebook, kubernetes, CPU/GPU docker container, test environment, jupyter notebook, python script
48 """
49 if os.environ.get("CI", "False").lower() == "true":
50 execution_env = "ci"
51 elif "google.colab" in sys.modules:
52 execution_env = "colab"
53 elif "KUBERNETES_SERVICE_HOST" in os.environ:
54 execution_env = "kubernetes"
55 elif HAYSTACK_DOCKER_CONTAINER in os.environ:
56 execution_env = os.environ.get(HAYSTACK_DOCKER_CONTAINER)
57 # check if pytest is imported
58 elif "pytest" in sys.modules:
59 execution_env = "test"
60 else:
61 try:
62 execution_env = get_ipython().__class__.__name__ # pylint: disable=undefined-variable
63 except NameError:
64 execution_env = "script"
65 return execution_env
66
[end of haystack/environment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/__init__.py b/haystack/__init__.py
--- a/haystack/__init__.py
+++ b/haystack/__init__.py
@@ -20,6 +20,8 @@
from haystack.schema import Document, Answer, Label, MultiLabel, Span, EvaluationResult
from haystack.nodes.base import BaseComponent
from haystack.pipelines.base import Pipeline
+from haystack.environment import set_pytorch_secure_model_loading
pd.options.display.max_colwidth = 80
+set_pytorch_secure_model_loading()
diff --git a/haystack/environment.py b/haystack/environment.py
--- a/haystack/environment.py
+++ b/haystack/environment.py
@@ -1,3 +1,4 @@
+import logging
import os
import platform
import sys
@@ -17,6 +18,18 @@
env_meta_data: Dict[str, Any] = {}
+logger = logging.getLogger(__name__)
+
+
+def set_pytorch_secure_model_loading(flag_val="1"):
+ # To load secure only model pytorch requires value of
+ # TORCH_FORCE_WEIGHTS_ONLY_LOAD to be ["1", "y", "yes", "true"]
+ os_flag_val = os.getenv("TORCH_FORCE_WEIGHTS_ONLY_LOAD")
+ if os_flag_val is None:
+ os.environ["TORCH_FORCE_WEIGHTS_ONLY_LOAD"] = flag_val
+ else:
+ logger.info("TORCH_FORCE_WEIGHTS_ONLY_LOAD is already set to %s, Haystack will use the same.", os_flag_val)
+
def get_or_create_env_meta_data() -> Dict[str, Any]:
"""
| {"golden_diff": "diff --git a/haystack/__init__.py b/haystack/__init__.py\n--- a/haystack/__init__.py\n+++ b/haystack/__init__.py\n@@ -20,6 +20,8 @@\n from haystack.schema import Document, Answer, Label, MultiLabel, Span, EvaluationResult\n from haystack.nodes.base import BaseComponent\n from haystack.pipelines.base import Pipeline\n+from haystack.environment import set_pytorch_secure_model_loading\n \n \n pd.options.display.max_colwidth = 80\n+set_pytorch_secure_model_loading()\ndiff --git a/haystack/environment.py b/haystack/environment.py\n--- a/haystack/environment.py\n+++ b/haystack/environment.py\n@@ -1,3 +1,4 @@\n+import logging\n import os\n import platform\n import sys\n@@ -17,6 +18,18 @@\n \n env_meta_data: Dict[str, Any] = {}\n \n+logger = logging.getLogger(__name__)\n+\n+\n+def set_pytorch_secure_model_loading(flag_val=\"1\"):\n+ # To load secure only model pytorch requires value of\n+ # TORCH_FORCE_WEIGHTS_ONLY_LOAD to be [\"1\", \"y\", \"yes\", \"true\"]\n+ os_flag_val = os.getenv(\"TORCH_FORCE_WEIGHTS_ONLY_LOAD\")\n+ if os_flag_val is None:\n+ os.environ[\"TORCH_FORCE_WEIGHTS_ONLY_LOAD\"] = flag_val\n+ else:\n+ logger.info(\"TORCH_FORCE_WEIGHTS_ONLY_LOAD is already set to %s, Haystack will use the same.\", os_flag_val)\n+\n \n def get_or_create_env_meta_data() -> Dict[str, Any]:\n \"\"\"\n", "issue": "Better model security with new PyTorch version\nWhen loading PyTorch models from the modelhub, arbitrary code can be executed. See [here.](https://huggingface.co/docs/hub/security-pickle#pickle-scanning)\r\n\r\nSeems like PyTorch already solved this:\r\n- Issue on the PyTorch Repo: https://github.com/pytorch/pytorch/issues/52596\r\n- By default, this is not available in torch==1.13. TORCH_FORCE_WEIGHTS_ONLY_LOAD needs to be set to True which allows global override to safe only model loading via.\r\n\r\nTest\r\nTest haystack tests with new flags\r\n\r\n**Solution:**\r\n- Bump up PyTorch version to 1.13\r\n\r\n**Behaviour**\r\n- Secure by default TORCH_FORCE_WEIGHTS_ONLY_LOAD always set to true when Haystack loads any models\r\n\r\n\n", "before_files": [{"content": "# pylint: disable=wrong-import-position,wrong-import-order\n\nfrom typing import Union\nfrom types import ModuleType\n\ntry:\n from importlib import metadata\nexcept (ModuleNotFoundError, ImportError):\n # Python <= 3.7\n import importlib_metadata as metadata # type: ignore\n\n__version__: str = str(metadata.version(\"farm-haystack\"))\n\n\n# Logging is not configured here on purpose, see https://github.com/deepset-ai/haystack/issues/2485\nimport logging\n\nimport pandas as pd\n\nfrom haystack.schema import Document, Answer, Label, MultiLabel, Span, EvaluationResult\nfrom haystack.nodes.base import BaseComponent\nfrom haystack.pipelines.base import Pipeline\n\n\npd.options.display.max_colwidth = 80\n", "path": "haystack/__init__.py"}, {"content": "import os\nimport platform\nimport sys\nfrom typing import Any, Dict\nimport torch\nimport transformers\n\nfrom haystack import __version__\n\n\nHAYSTACK_EXECUTION_CONTEXT = \"HAYSTACK_EXECUTION_CONTEXT\"\nHAYSTACK_DOCKER_CONTAINER = \"HAYSTACK_DOCKER_CONTAINER\"\n\n# Any remote API (OpenAI, Cohere etc.)\nHAYSTACK_REMOTE_API_BACKOFF_SEC = \"HAYSTACK_REMOTE_API_BACKOFF_SEC\"\nHAYSTACK_REMOTE_API_MAX_RETRIES = \"HAYSTACK_REMOTE_API_MAX_RETRIES\"\n\nenv_meta_data: Dict[str, Any] = {}\n\n\ndef get_or_create_env_meta_data() -> Dict[str, Any]:\n \"\"\"\n Collects meta data about the setup that is used with Haystack, such as: operating system, python version, Haystack version, transformers version, pytorch version, number of GPUs, execution environment, and the value stored in the env variable HAYSTACK_EXECUTION_CONTEXT.\n \"\"\"\n global env_meta_data # pylint: disable=global-statement\n if not env_meta_data:\n env_meta_data = {\n \"os_version\": platform.release(),\n \"os_family\": platform.system(),\n \"os_machine\": platform.machine(),\n \"python_version\": platform.python_version(),\n \"haystack_version\": __version__,\n \"transformers_version\": transformers.__version__,\n \"torch_version\": torch.__version__,\n \"torch_cuda_version\": torch.version.cuda if torch.cuda.is_available() else 0,\n \"n_gpu\": torch.cuda.device_count() if torch.cuda.is_available() else 0,\n \"n_cpu\": os.cpu_count(),\n \"context\": os.environ.get(HAYSTACK_EXECUTION_CONTEXT),\n \"execution_env\": _get_execution_environment(),\n }\n return env_meta_data\n\n\ndef _get_execution_environment():\n \"\"\"\n Identifies the execution environment that Haystack is running in.\n Options are: colab notebook, kubernetes, CPU/GPU docker container, test environment, jupyter notebook, python script\n \"\"\"\n if os.environ.get(\"CI\", \"False\").lower() == \"true\":\n execution_env = \"ci\"\n elif \"google.colab\" in sys.modules:\n execution_env = \"colab\"\n elif \"KUBERNETES_SERVICE_HOST\" in os.environ:\n execution_env = \"kubernetes\"\n elif HAYSTACK_DOCKER_CONTAINER in os.environ:\n execution_env = os.environ.get(HAYSTACK_DOCKER_CONTAINER)\n # check if pytest is imported\n elif \"pytest\" in sys.modules:\n execution_env = \"test\"\n else:\n try:\n execution_env = get_ipython().__class__.__name__ # pylint: disable=undefined-variable\n except NameError:\n execution_env = \"script\"\n return execution_env\n", "path": "haystack/environment.py"}]} | 1,647 | 356 |
gh_patches_debug_1621 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1707 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Explorations should not auto-save
New Explorations are currently persistent, any change made immediately saves the exploration. This behaviour is not preferred since we'd like the user to be able to run and discard queries.
[Mail thread containing related discussion](https://groups.google.com/a/mathesar.org/g/mathesar-developers/c/RQJSiDQu1Tg/m/uLHj30yFAgAJ).
New behaviour proposed:
* New Exploration: Auto-save is not preferred
- User opens Data Explorer
- User joins tables, does any number of operations
- This should not get saved automatically
- It should get saved when user manually clicks Save button
* Editing existing Exploration: ~~Auto-save is preferred~~ Auto save is not preferred (Refer https://github.com/centerofci/mathesar/issues/1590#issuecomment-1238204655)
- Users edits an existing exploration in the Data Explorer
- User makes changes to it
- ~~The changes are auto-saved~~ User has to click the Save button or Ctrl+s to save the changes
- We have undo-redo to improve the user's editing experience
Implement Exploration Page functionality
This is a placeholder issue to implement a page to view a single exploration.
</issue>
<code>
[start of mathesar/views.py]
1 from django.shortcuts import render, redirect, get_object_or_404
2
3 from mathesar.models.base import Database, Schema, Table
4 from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer
5 from mathesar.api.serializers.schemas import SchemaSerializer
6 from mathesar.api.serializers.tables import TableSerializer
7 from mathesar.api.serializers.queries import QuerySerializer
8 from mathesar.database.types import UIType
9 from mathesar.models.query import UIQuery
10
11
12 def get_schema_list(request, database):
13 schema_serializer = SchemaSerializer(
14 Schema.objects.filter(database=database),
15 many=True,
16 context={'request': request}
17 )
18 return schema_serializer.data
19
20
21 def get_database_list(request):
22 database_serializer = DatabaseSerializer(
23 Database.objects.all(),
24 many=True,
25 context={'request': request}
26 )
27 return database_serializer.data
28
29
30 def get_table_list(request, schema):
31 if schema is None:
32 return []
33 table_serializer = TableSerializer(
34 Table.objects.filter(schema=schema),
35 many=True,
36 context={'request': request}
37 )
38 return table_serializer.data
39
40
41 def get_queries_list(request, schema):
42 if schema is None:
43 return []
44 query_serializer = QuerySerializer(
45 UIQuery.objects.all(),
46 many=True,
47 context={'request': request}
48 )
49 return query_serializer.data
50
51
52 def get_ui_type_list(request, database):
53 if database is None:
54 return []
55 type_serializer = TypeSerializer(
56 UIType,
57 many=True,
58 context={'request': request}
59 )
60 return type_serializer.data
61
62
63 def get_common_data(request, database, schema=None):
64 return {
65 'current_db': database.name if database else None,
66 'current_schema': schema.id if schema else None,
67 'schemas': get_schema_list(request, database),
68 'databases': get_database_list(request),
69 'tables': get_table_list(request, schema),
70 'queries': get_queries_list(request, schema),
71 'abstract_types': get_ui_type_list(request, database)
72 }
73
74
75 def get_current_database(request, db_name):
76 # if there's a DB name passed in, try to retrieve the database, or return a 404 error.
77 if db_name is not None:
78 return get_object_or_404(Database, name=db_name)
79 else:
80 try:
81 # Try to get the first database available
82 return Database.objects.order_by('id').first()
83 except Database.DoesNotExist:
84 return None
85
86
87 def get_current_schema(request, schema_id, database):
88 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.
89 if schema_id is not None:
90 return get_object_or_404(Schema, id=schema_id)
91 else:
92 try:
93 # Try to get the first schema in the DB
94 return Schema.objects.filter(database=database).order_by('id').first()
95 except Schema.DoesNotExist:
96 return None
97
98
99 def render_schema(request, database, schema):
100 # if there's no schema available, redirect to the schemas page.
101 if not schema:
102 return redirect('schemas', db_name=database.name)
103 else:
104 # We are redirecting so that the correct URL is passed to the frontend.
105 return redirect('schema_home', db_name=database.name, schema_id=schema.id)
106
107
108 def home(request):
109 database = get_current_database(request, None)
110 return redirect('schemas', db_name=database.name)
111
112
113 def schema_home(request, db_name, schema_id, **kwargs):
114 database = get_current_database(request, db_name)
115 schema = get_current_schema(request, schema_id, database)
116 return render(request, 'mathesar/index.html', {
117 'common_data': get_common_data(request, database, schema)
118 })
119
120
121 def schemas(request, db_name):
122 database = get_current_database(request, db_name)
123 return render(request, 'mathesar/index.html', {
124 'common_data': get_common_data(request, database, None)
125 })
126
[end of mathesar/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/views.py b/mathesar/views.py
--- a/mathesar/views.py
+++ b/mathesar/views.py
@@ -42,7 +42,7 @@
if schema is None:
return []
query_serializer = QuerySerializer(
- UIQuery.objects.all(),
+ UIQuery.objects.filter(base_table__schema=schema),
many=True,
context={'request': request}
)
| {"golden_diff": "diff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -42,7 +42,7 @@\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n- UIQuery.objects.all(),\n+ UIQuery.objects.filter(base_table__schema=schema),\n many=True,\n context={'request': request}\n )\n", "issue": "Explorations should not auto-save\nNew Explorations are currently persistent, any change made immediately saves the exploration. This behaviour is not preferred since we'd like the user to be able to run and discard queries.\r\n\r\n[Mail thread containing related discussion](https://groups.google.com/a/mathesar.org/g/mathesar-developers/c/RQJSiDQu1Tg/m/uLHj30yFAgAJ).\r\n\r\nNew behaviour proposed:\r\n\r\n* New Exploration: Auto-save is not preferred\r\n - User opens Data Explorer\r\n - User joins tables, does any number of operations\r\n - This should not get saved automatically\r\n - It should get saved when user manually clicks Save button\r\n\r\n* Editing existing Exploration: ~~Auto-save is preferred~~ Auto save is not preferred (Refer https://github.com/centerofci/mathesar/issues/1590#issuecomment-1238204655)\r\n - Users edits an existing exploration in the Data Explorer\r\n - User makes changes to it\r\n - ~~The changes are auto-saved~~ User has to click the Save button or Ctrl+s to save the changes\r\n - We have undo-redo to improve the user's editing experience\nImplement Exploration Page functionality\nThis is a placeholder issue to implement a page to view a single exploration.\n", "before_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\n\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.query import UIQuery\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.all(),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py"}]} | 1,937 | 90 |
gh_patches_debug_19645 | rasdani/github-patches | git_diff | Nitrate__Nitrate-607 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Abstract jQuery.ajax calls
Lots of jQuery.ajax calls in JavaScript code, that repeat same structure and part of the same request arguments.
</issue>
<code>
[start of src/tcms/comments/views.py]
1 # -*- coding: utf-8 -*-
2
3 import logging
4
5 from django.conf import settings
6 from django.contrib.auth.mixins import PermissionRequiredMixin
7 from django.http import JsonResponse
8 from django.shortcuts import render
9 from django.views import generic
10 from django.views.decorators.http import require_POST
11
12 import django_comments.signals
13 from django_comments.views.moderation import perform_delete
14
15 from tcms.comments.models import post_comment
16 from tcms.comments.exceptions import InvalidCommentPostRequest
17 from tcms.core.responses import JsonResponseBadRequest
18
19 log = logging.getLogger(__name__)
20
21
22 @require_POST
23 def post(request, template_name='comments/comments.html'):
24 """Post a comment"""
25 data = request.POST.copy()
26 try:
27 target, _ = post_comment(
28 data, request.user, request.META.get('REMOTE_ADDR'))
29 except InvalidCommentPostRequest as e:
30 target = e.target
31 return render(request, template_name, context={'object': target})
32
33
34 class DeleteCommentView(PermissionRequiredMixin, generic.View):
35 """Delete comment from given objects"""
36
37 permission_required = 'django_comments.can_moderate'
38
39 def post(self, request):
40 comments = django_comments.get_model().objects.filter(
41 pk__in=request.POST.getlist('comment_id'),
42 site__pk=settings.SITE_ID,
43 is_removed=False,
44 user_id=request.user.id
45 )
46
47 if not comments:
48 return JsonResponseBadRequest({
49 'message': 'No incoming comment id exists.'
50 })
51
52 # Flag the comment as deleted instead of actually deleting it.
53 for comment in comments:
54 perform_delete(request, comment)
55
56 return JsonResponse({})
57
[end of src/tcms/comments/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/tcms/comments/views.py b/src/tcms/comments/views.py
--- a/src/tcms/comments/views.py
+++ b/src/tcms/comments/views.py
@@ -5,7 +5,6 @@
from django.conf import settings
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import JsonResponse
-from django.shortcuts import render
from django.views import generic
from django.views.decorators.http import require_POST
@@ -27,8 +26,10 @@
target, _ = post_comment(
data, request.user, request.META.get('REMOTE_ADDR'))
except InvalidCommentPostRequest as e:
- target = e.target
- return render(request, template_name, context={'object': target})
+ msg = f'Fail to add comment to object {e.target}'
+ log.exception(msg)
+ return JsonResponseBadRequest({'message': msg})
+ return JsonResponse({})
class DeleteCommentView(PermissionRequiredMixin, generic.View):
| {"golden_diff": "diff --git a/src/tcms/comments/views.py b/src/tcms/comments/views.py\n--- a/src/tcms/comments/views.py\n+++ b/src/tcms/comments/views.py\n@@ -5,7 +5,6 @@\n from django.conf import settings\n from django.contrib.auth.mixins import PermissionRequiredMixin\n from django.http import JsonResponse\n-from django.shortcuts import render\n from django.views import generic\n from django.views.decorators.http import require_POST\n \n@@ -27,8 +26,10 @@\n target, _ = post_comment(\n data, request.user, request.META.get('REMOTE_ADDR'))\n except InvalidCommentPostRequest as e:\n- target = e.target\n- return render(request, template_name, context={'object': target})\n+ msg = f'Fail to add comment to object {e.target}'\n+ log.exception(msg)\n+ return JsonResponseBadRequest({'message': msg})\n+ return JsonResponse({})\n \n \n class DeleteCommentView(PermissionRequiredMixin, generic.View):\n", "issue": "Abstract jQuery.ajax calls\nLots of jQuery.ajax calls in JavaScript code, that repeat same structure and part of the same request arguments.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views import generic\nfrom django.views.decorators.http import require_POST\n\nimport django_comments.signals\nfrom django_comments.views.moderation import perform_delete\n\nfrom tcms.comments.models import post_comment\nfrom tcms.comments.exceptions import InvalidCommentPostRequest\nfrom tcms.core.responses import JsonResponseBadRequest\n\nlog = logging.getLogger(__name__)\n\n\n@require_POST\ndef post(request, template_name='comments/comments.html'):\n \"\"\"Post a comment\"\"\"\n data = request.POST.copy()\n try:\n target, _ = post_comment(\n data, request.user, request.META.get('REMOTE_ADDR'))\n except InvalidCommentPostRequest as e:\n target = e.target\n return render(request, template_name, context={'object': target})\n\n\nclass DeleteCommentView(PermissionRequiredMixin, generic.View):\n \"\"\"Delete comment from given objects\"\"\"\n\n permission_required = 'django_comments.can_moderate'\n\n def post(self, request):\n comments = django_comments.get_model().objects.filter(\n pk__in=request.POST.getlist('comment_id'),\n site__pk=settings.SITE_ID,\n is_removed=False,\n user_id=request.user.id\n )\n\n if not comments:\n return JsonResponseBadRequest({\n 'message': 'No incoming comment id exists.'\n })\n\n # Flag the comment as deleted instead of actually deleting it.\n for comment in comments:\n perform_delete(request, comment)\n\n return JsonResponse({})\n", "path": "src/tcms/comments/views.py"}]} | 1,008 | 205 |
gh_patches_debug_30601 | rasdani/github-patches | git_diff | wagtail__wagtail-1888 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors from embed chooser are not reported back to the user
It seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press "Insert" I get nothing and browser displays this error:
(Firefox) TypeError: a is undefined rangy-core.js:33:479
(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined
But network data says everything is OK:
POST .../admin/embeds/chooser/upload/ 200 OK
After closing the modal window, embed link is not present.
Errors from embed chooser are not reported back to the user
It seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press "Insert" I get nothing and browser displays this error:
(Firefox) TypeError: a is undefined rangy-core.js:33:479
(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined
But network data says everything is OK:
POST .../admin/embeds/chooser/upload/ 200 OK
After closing the modal window, embed link is not present.
</issue>
<code>
[start of wagtail/wagtailembeds/views/chooser.py]
1 from django.forms.utils import ErrorList
2 from django.utils.translation import ugettext as _
3
4 from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
5 from wagtail.wagtailembeds.forms import EmbedForm
6 from wagtail.wagtailembeds.format import embed_to_editor_html
7
8 from wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException
9
10
11
12 def chooser(request):
13 form = EmbedForm()
14
15 return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
16 'form': form,
17 })
18
19
20 def chooser_upload(request):
21 if request.POST:
22 form = EmbedForm(request.POST, request.FILES)
23
24 if form.is_valid():
25 error = None
26 try:
27 embed_html = embed_to_editor_html(form.cleaned_data['url'])
28 return render_modal_workflow(
29 request, None, 'wagtailembeds/chooser/embed_chosen.js',
30 {'embed_html': embed_html}
31 )
32 except AccessDeniedEmbedlyException:
33 error = _("There seems to be a problem with your embedly API key. Please check your settings.")
34 except EmbedNotFoundException:
35 error = _("Cannot find an embed for this URL.")
36 except EmbedlyException:
37 error = _("There seems to be an error with Embedly while trying to embed this URL. Please try again later.")
38
39 if error:
40 errors = form._errors.setdefault('url', ErrorList())
41 errors.append(error)
42 return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
43 'form': form,
44 })
45 else:
46 form = EmbedForm()
47
48 return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
49 'form': form,
50 })
51
[end of wagtail/wagtailembeds/views/chooser.py]
[start of wagtail/wagtailembeds/rich_text.py]
1 from wagtail.wagtailembeds import format
2
3
4 class MediaEmbedHandler(object):
5 """
6 MediaEmbedHandler will be invoked whenever we encounter an element in HTML content
7 with an attribute of data-embedtype="media". The resulting element in the database
8 representation will be:
9 <embed embedtype="media" url="http://vimeo.com/XXXXX">
10 """
11 @staticmethod
12 def get_db_attributes(tag):
13 """
14 Given a tag that we've identified as a media embed (because it has a
15 data-embedtype="media" attribute), return a dict of the attributes we should
16 have on the resulting <embed> element.
17 """
18 return {
19 'url': tag['data-url'],
20 }
21
22 @staticmethod
23 def expand_db_attributes(attrs, for_editor):
24 """
25 Given a dict of attributes from the <embed> tag, return the real HTML
26 representation.
27 """
28 if for_editor:
29 return format.embed_to_editor_html(attrs['url'])
30 else:
31 return format.embed_to_frontend_html(attrs['url'])
32
[end of wagtail/wagtailembeds/rich_text.py]
[start of wagtail/wagtailembeds/format.py]
1 from __future__ import division # Use true division
2
3 from django.template.loader import render_to_string
4
5 from wagtail.wagtailembeds import embeds
6
7
8 def embed_to_frontend_html(url):
9 try:
10 embed = embeds.get_embed(url)
11
12 # Work out ratio
13 if embed.width and embed.height:
14 ratio = str(embed.height / embed.width * 100) + "%"
15 else:
16 ratio = "0"
17
18 # Render template
19 return render_to_string('wagtailembeds/embed_frontend.html', {
20 'embed': embed,
21 'ratio': ratio,
22 })
23 except embeds.EmbedException:
24 return ''
25
26
27 def embed_to_editor_html(url):
28 try:
29 embed = embeds.get_embed(url)
30
31 # Render template
32 return render_to_string('wagtailembeds/embed_editor.html', {
33 'embed': embed,
34 })
35 except embeds.EmbedException:
36 # Could be replaced with a nice error message
37 return ''
38
[end of wagtail/wagtailembeds/format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailembeds/format.py b/wagtail/wagtailembeds/format.py
--- a/wagtail/wagtailembeds/format.py
+++ b/wagtail/wagtailembeds/format.py
@@ -21,17 +21,15 @@
'ratio': ratio,
})
except embeds.EmbedException:
+ # silently ignore failed embeds, rather than letting them crash the page
return ''
def embed_to_editor_html(url):
- try:
- embed = embeds.get_embed(url)
+ embed = embeds.get_embed(url)
+ # catching EmbedException is the responsibility of the caller
- # Render template
- return render_to_string('wagtailembeds/embed_editor.html', {
- 'embed': embed,
- })
- except embeds.EmbedException:
- # Could be replaced with a nice error message
- return ''
+ # Render template
+ return render_to_string('wagtailembeds/embed_editor.html', {
+ 'embed': embed,
+ })
diff --git a/wagtail/wagtailembeds/rich_text.py b/wagtail/wagtailembeds/rich_text.py
--- a/wagtail/wagtailembeds/rich_text.py
+++ b/wagtail/wagtailembeds/rich_text.py
@@ -1,4 +1,4 @@
-from wagtail.wagtailembeds import format
+from wagtail.wagtailembeds import format, embeds
class MediaEmbedHandler(object):
@@ -26,6 +26,10 @@
representation.
"""
if for_editor:
- return format.embed_to_editor_html(attrs['url'])
+ try:
+ return format.embed_to_editor_html(attrs['url'])
+ except embeds.EmbedException:
+ # Could be replaced with a nice error message
+ return ''
else:
return format.embed_to_frontend_html(attrs['url'])
diff --git a/wagtail/wagtailembeds/views/chooser.py b/wagtail/wagtailembeds/views/chooser.py
--- a/wagtail/wagtailembeds/views/chooser.py
+++ b/wagtail/wagtailembeds/views/chooser.py
@@ -8,7 +8,6 @@
from wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException
-
def chooser(request):
form = EmbedForm()
| {"golden_diff": "diff --git a/wagtail/wagtailembeds/format.py b/wagtail/wagtailembeds/format.py\n--- a/wagtail/wagtailembeds/format.py\n+++ b/wagtail/wagtailembeds/format.py\n@@ -21,17 +21,15 @@\n 'ratio': ratio,\n })\n except embeds.EmbedException:\n+ # silently ignore failed embeds, rather than letting them crash the page\n return ''\n \n \n def embed_to_editor_html(url):\n- try:\n- embed = embeds.get_embed(url)\n+ embed = embeds.get_embed(url)\n+ # catching EmbedException is the responsibility of the caller\n \n- # Render template\n- return render_to_string('wagtailembeds/embed_editor.html', {\n- 'embed': embed,\n- })\n- except embeds.EmbedException:\n- # Could be replaced with a nice error message\n- return ''\n+ # Render template\n+ return render_to_string('wagtailembeds/embed_editor.html', {\n+ 'embed': embed,\n+ })\ndiff --git a/wagtail/wagtailembeds/rich_text.py b/wagtail/wagtailembeds/rich_text.py\n--- a/wagtail/wagtailembeds/rich_text.py\n+++ b/wagtail/wagtailembeds/rich_text.py\n@@ -1,4 +1,4 @@\n-from wagtail.wagtailembeds import format\n+from wagtail.wagtailembeds import format, embeds\n \n \n class MediaEmbedHandler(object):\n@@ -26,6 +26,10 @@\n representation.\n \"\"\"\n if for_editor:\n- return format.embed_to_editor_html(attrs['url'])\n+ try:\n+ return format.embed_to_editor_html(attrs['url'])\n+ except embeds.EmbedException:\n+ # Could be replaced with a nice error message\n+ return ''\n else:\n return format.embed_to_frontend_html(attrs['url'])\ndiff --git a/wagtail/wagtailembeds/views/chooser.py b/wagtail/wagtailembeds/views/chooser.py\n--- a/wagtail/wagtailembeds/views/chooser.py\n+++ b/wagtail/wagtailembeds/views/chooser.py\n@@ -8,7 +8,6 @@\n from wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException\n \n \n-\n def chooser(request):\n form = EmbedForm()\n", "issue": "Errors from embed chooser are not reported back to the user\nIt seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press \"Insert\" I get nothing and browser displays this error:\n\n(Firefox) TypeError: a is undefined rangy-core.js:33:479\n(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined\n\nBut network data says everything is OK:\nPOST .../admin/embeds/chooser/upload/ 200 OK\nAfter closing the modal window, embed link is not present.\n\nErrors from embed chooser are not reported back to the user\nIt seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press \"Insert\" I get nothing and browser displays this error:\n\n(Firefox) TypeError: a is undefined rangy-core.js:33:479\n(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined\n\nBut network data says everything is OK:\nPOST .../admin/embeds/chooser/upload/ 200 OK\nAfter closing the modal window, embed link is not present.\n\n", "before_files": [{"content": "from django.forms.utils import ErrorList\nfrom django.utils.translation import ugettext as _\n\nfrom wagtail.wagtailadmin.modal_workflow import render_modal_workflow\nfrom wagtail.wagtailembeds.forms import EmbedForm\nfrom wagtail.wagtailembeds.format import embed_to_editor_html\n\nfrom wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException\n\n\n\ndef chooser(request):\n form = EmbedForm()\n\n return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {\n 'form': form,\n })\n\n\ndef chooser_upload(request):\n if request.POST:\n form = EmbedForm(request.POST, request.FILES)\n\n if form.is_valid():\n error = None\n try:\n embed_html = embed_to_editor_html(form.cleaned_data['url'])\n return render_modal_workflow(\n request, None, 'wagtailembeds/chooser/embed_chosen.js',\n {'embed_html': embed_html}\n )\n except AccessDeniedEmbedlyException:\n error = _(\"There seems to be a problem with your embedly API key. Please check your settings.\")\n except EmbedNotFoundException:\n error = _(\"Cannot find an embed for this URL.\")\n except EmbedlyException:\n error = _(\"There seems to be an error with Embedly while trying to embed this URL. Please try again later.\")\n\n if error:\n errors = form._errors.setdefault('url', ErrorList())\n errors.append(error)\n return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {\n 'form': form,\n })\n else:\n form = EmbedForm()\n\n return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {\n 'form': form,\n })\n", "path": "wagtail/wagtailembeds/views/chooser.py"}, {"content": "from wagtail.wagtailembeds import format\n\n\nclass MediaEmbedHandler(object):\n \"\"\"\n MediaEmbedHandler will be invoked whenever we encounter an element in HTML content\n with an attribute of data-embedtype=\"media\". The resulting element in the database\n representation will be:\n <embed embedtype=\"media\" url=\"http://vimeo.com/XXXXX\">\n \"\"\"\n @staticmethod\n def get_db_attributes(tag):\n \"\"\"\n Given a tag that we've identified as a media embed (because it has a\n data-embedtype=\"media\" attribute), return a dict of the attributes we should\n have on the resulting <embed> element.\n \"\"\"\n return {\n 'url': tag['data-url'],\n }\n\n @staticmethod\n def expand_db_attributes(attrs, for_editor):\n \"\"\"\n Given a dict of attributes from the <embed> tag, return the real HTML\n representation.\n \"\"\"\n if for_editor:\n return format.embed_to_editor_html(attrs['url'])\n else:\n return format.embed_to_frontend_html(attrs['url'])\n", "path": "wagtail/wagtailembeds/rich_text.py"}, {"content": "from __future__ import division # Use true division\n\nfrom django.template.loader import render_to_string\n\nfrom wagtail.wagtailembeds import embeds\n\n\ndef embed_to_frontend_html(url):\n try:\n embed = embeds.get_embed(url)\n\n # Work out ratio\n if embed.width and embed.height:\n ratio = str(embed.height / embed.width * 100) + \"%\"\n else:\n ratio = \"0\"\n\n # Render template\n return render_to_string('wagtailembeds/embed_frontend.html', {\n 'embed': embed,\n 'ratio': ratio,\n })\n except embeds.EmbedException:\n return ''\n\n\ndef embed_to_editor_html(url):\n try:\n embed = embeds.get_embed(url)\n\n # Render template\n return render_to_string('wagtailembeds/embed_editor.html', {\n 'embed': embed,\n })\n except embeds.EmbedException:\n # Could be replaced with a nice error message\n return ''\n", "path": "wagtail/wagtailembeds/format.py"}]} | 1,993 | 569 |
gh_patches_debug_16372 | rasdani/github-patches | git_diff | sanic-org__sanic-2595 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auto reloader consumes full CPU core
**Describe the bug**
Enabling the auto reloader (`auto_reload=True` or `--auto-reload`) will consume a full CPU core, because it is looping over all files in all loaded modules and `stat`-ing every file. 100% usage on a single core is observed on macOS and Linux.
The problematic loop is here:
https://github.com/sanic-org/sanic/blob/f891995b487f01ff1207afcd241ae359725a8e3c/sanic/worker/reloader.py#L46-L64
**Code snippet**
Run the following minimal example with `python3 test.py` or `sanic test:app --auto-reload` and watch CPU usage
```python
from sanic import Sanic, text
app = Sanic("test")
@app.route("/")
def hello_world(request):
return text("hello, world")
if __name__ == "__main__":
app.run(auto_reload=True)
```
**Expected behavior**
The reloader should not consume a full CPU core, but should watch the file system for changes more intelligently, using platform-specific methods (inotify, FSEvents, kqueue, etc). See the [watchdog](https://github.com/gorakhargosh/watchdog/) project for inspiration. Maybe `watchdog` could be included as an optional dependency and used if available?
For instance, [another popular framework has implemented two reloader loops](https://github.com/pallets/werkzeug/blob/2.2.2/src/werkzeug/_reloader.py) and will select the `watchdog` loop if the package is available.
Alternatively, simply add a short sleep step to the reloader loop. I think a one second delay in reloading is acceptable.
**Environment (please complete the following information):**
- OS: macOS 12.6, Ubuntu Linux 22.04 (likely also Windows, but not tested)
- Sanic Version: 22.9.0
</issue>
<code>
[start of sanic/worker/reloader.py]
1 from __future__ import annotations
2
3 import os
4 import sys
5
6 from asyncio import new_event_loop
7 from itertools import chain
8 from multiprocessing.connection import Connection
9 from pathlib import Path
10 from signal import SIGINT, SIGTERM
11 from signal import signal as signal_func
12 from typing import Dict, Set
13
14 from sanic.server.events import trigger_events
15 from sanic.worker.loader import AppLoader
16
17
18 class Reloader:
19 def __init__(
20 self,
21 publisher: Connection,
22 interval: float,
23 reload_dirs: Set[Path],
24 app_loader: AppLoader,
25 ):
26 self._publisher = publisher
27 self.interval = interval
28 self.reload_dirs = reload_dirs
29 self.run = True
30 self.app_loader = app_loader
31
32 def __call__(self) -> None:
33 app = self.app_loader.load()
34 signal_func(SIGINT, self.stop)
35 signal_func(SIGTERM, self.stop)
36 mtimes: Dict[str, float] = {}
37
38 reloader_start = app.listeners.get("reload_process_start")
39 reloader_stop = app.listeners.get("reload_process_stop")
40 before_trigger = app.listeners.get("before_reload_trigger")
41 after_trigger = app.listeners.get("after_reload_trigger")
42 loop = new_event_loop()
43 if reloader_start:
44 trigger_events(reloader_start, loop, app)
45
46 while self.run:
47 changed = set()
48 for filename in self.files():
49 try:
50 if self.check_file(filename, mtimes):
51 path = (
52 filename
53 if isinstance(filename, str)
54 else filename.resolve()
55 )
56 changed.add(str(path))
57 except OSError:
58 continue
59 if changed:
60 if before_trigger:
61 trigger_events(before_trigger, loop, app)
62 self.reload(",".join(changed) if changed else "unknown")
63 if after_trigger:
64 trigger_events(after_trigger, loop, app)
65 else:
66 if reloader_stop:
67 trigger_events(reloader_stop, loop, app)
68
69 def stop(self, *_):
70 self.run = False
71
72 def reload(self, reloaded_files):
73 message = f"__ALL_PROCESSES__:{reloaded_files}"
74 self._publisher.send(message)
75
76 def files(self):
77 return chain(
78 self.python_files(),
79 *(d.glob("**/*") for d in self.reload_dirs),
80 )
81
82 def python_files(self): # no cov
83 """This iterates over all relevant Python files.
84
85 It goes through all
86 loaded files from modules, all files in folders of already loaded
87 modules as well as all files reachable through a package.
88 """
89 # The list call is necessary on Python 3 in case the module
90 # dictionary modifies during iteration.
91 for module in list(sys.modules.values()):
92 if module is None:
93 continue
94 filename = getattr(module, "__file__", None)
95 if filename:
96 old = None
97 while not os.path.isfile(filename):
98 old = filename
99 filename = os.path.dirname(filename)
100 if filename == old:
101 break
102 else:
103 if filename[-4:] in (".pyc", ".pyo"):
104 filename = filename[:-1]
105 yield filename
106
107 @staticmethod
108 def check_file(filename, mtimes) -> bool:
109 need_reload = False
110
111 mtime = os.stat(filename).st_mtime
112 old_time = mtimes.get(filename)
113 if old_time is None:
114 mtimes[filename] = mtime
115 elif mtime > old_time:
116 mtimes[filename] = mtime
117 need_reload = True
118
119 return need_reload
120
[end of sanic/worker/reloader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/worker/reloader.py b/sanic/worker/reloader.py
--- a/sanic/worker/reloader.py
+++ b/sanic/worker/reloader.py
@@ -9,6 +9,7 @@
from pathlib import Path
from signal import SIGINT, SIGTERM
from signal import signal as signal_func
+from time import sleep
from typing import Dict, Set
from sanic.server.events import trigger_events
@@ -62,6 +63,7 @@
self.reload(",".join(changed) if changed else "unknown")
if after_trigger:
trigger_events(after_trigger, loop, app)
+ sleep(self.interval)
else:
if reloader_stop:
trigger_events(reloader_stop, loop, app)
| {"golden_diff": "diff --git a/sanic/worker/reloader.py b/sanic/worker/reloader.py\n--- a/sanic/worker/reloader.py\n+++ b/sanic/worker/reloader.py\n@@ -9,6 +9,7 @@\n from pathlib import Path\n from signal import SIGINT, SIGTERM\n from signal import signal as signal_func\n+from time import sleep\n from typing import Dict, Set\n \n from sanic.server.events import trigger_events\n@@ -62,6 +63,7 @@\n self.reload(\",\".join(changed) if changed else \"unknown\")\n if after_trigger:\n trigger_events(after_trigger, loop, app)\n+ sleep(self.interval)\n else:\n if reloader_stop:\n trigger_events(reloader_stop, loop, app)\n", "issue": "Auto reloader consumes full CPU core\n**Describe the bug**\r\n\r\nEnabling the auto reloader (`auto_reload=True` or `--auto-reload`) will consume a full CPU core, because it is looping over all files in all loaded modules and `stat`-ing every file. 100% usage on a single core is observed on macOS and Linux.\r\n\r\nThe problematic loop is here:\r\n\r\nhttps://github.com/sanic-org/sanic/blob/f891995b487f01ff1207afcd241ae359725a8e3c/sanic/worker/reloader.py#L46-L64\r\n\r\n**Code snippet**\r\n\r\nRun the following minimal example with `python3 test.py` or `sanic test:app --auto-reload` and watch CPU usage\r\n\r\n```python\r\nfrom sanic import Sanic, text\r\n\r\napp = Sanic(\"test\")\r\n\r\n\r\[email protected](\"/\")\r\ndef hello_world(request):\r\n return text(\"hello, world\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(auto_reload=True)\r\n```\r\n\r\n\r\n**Expected behavior**\r\n\r\nThe reloader should not consume a full CPU core, but should watch the file system for changes more intelligently, using platform-specific methods (inotify, FSEvents, kqueue, etc). See the [watchdog](https://github.com/gorakhargosh/watchdog/) project for inspiration. Maybe `watchdog` could be included as an optional dependency and used if available?\r\n\r\nFor instance, [another popular framework has implemented two reloader loops](https://github.com/pallets/werkzeug/blob/2.2.2/src/werkzeug/_reloader.py) and will select the `watchdog` loop if the package is available.\r\n\r\nAlternatively, simply add a short sleep step to the reloader loop. I think a one second delay in reloading is acceptable.\r\n\r\n**Environment (please complete the following information):**\r\n - OS: macOS 12.6, Ubuntu Linux 22.04 (likely also Windows, but not tested)\r\n - Sanic Version: 22.9.0\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom asyncio import new_event_loop\nfrom itertools import chain\nfrom multiprocessing.connection import Connection\nfrom pathlib import Path\nfrom signal import SIGINT, SIGTERM\nfrom signal import signal as signal_func\nfrom typing import Dict, Set\n\nfrom sanic.server.events import trigger_events\nfrom sanic.worker.loader import AppLoader\n\n\nclass Reloader:\n def __init__(\n self,\n publisher: Connection,\n interval: float,\n reload_dirs: Set[Path],\n app_loader: AppLoader,\n ):\n self._publisher = publisher\n self.interval = interval\n self.reload_dirs = reload_dirs\n self.run = True\n self.app_loader = app_loader\n\n def __call__(self) -> None:\n app = self.app_loader.load()\n signal_func(SIGINT, self.stop)\n signal_func(SIGTERM, self.stop)\n mtimes: Dict[str, float] = {}\n\n reloader_start = app.listeners.get(\"reload_process_start\")\n reloader_stop = app.listeners.get(\"reload_process_stop\")\n before_trigger = app.listeners.get(\"before_reload_trigger\")\n after_trigger = app.listeners.get(\"after_reload_trigger\")\n loop = new_event_loop()\n if reloader_start:\n trigger_events(reloader_start, loop, app)\n\n while self.run:\n changed = set()\n for filename in self.files():\n try:\n if self.check_file(filename, mtimes):\n path = (\n filename\n if isinstance(filename, str)\n else filename.resolve()\n )\n changed.add(str(path))\n except OSError:\n continue\n if changed:\n if before_trigger:\n trigger_events(before_trigger, loop, app)\n self.reload(\",\".join(changed) if changed else \"unknown\")\n if after_trigger:\n trigger_events(after_trigger, loop, app)\n else:\n if reloader_stop:\n trigger_events(reloader_stop, loop, app)\n\n def stop(self, *_):\n self.run = False\n\n def reload(self, reloaded_files):\n message = f\"__ALL_PROCESSES__:{reloaded_files}\"\n self._publisher.send(message)\n\n def files(self):\n return chain(\n self.python_files(),\n *(d.glob(\"**/*\") for d in self.reload_dirs),\n )\n\n def python_files(self): # no cov\n \"\"\"This iterates over all relevant Python files.\n\n It goes through all\n loaded files from modules, all files in folders of already loaded\n modules as well as all files reachable through a package.\n \"\"\"\n # The list call is necessary on Python 3 in case the module\n # dictionary modifies during iteration.\n for module in list(sys.modules.values()):\n if module is None:\n continue\n filename = getattr(module, \"__file__\", None)\n if filename:\n old = None\n while not os.path.isfile(filename):\n old = filename\n filename = os.path.dirname(filename)\n if filename == old:\n break\n else:\n if filename[-4:] in (\".pyc\", \".pyo\"):\n filename = filename[:-1]\n yield filename\n\n @staticmethod\n def check_file(filename, mtimes) -> bool:\n need_reload = False\n\n mtime = os.stat(filename).st_mtime\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n elif mtime > old_time:\n mtimes[filename] = mtime\n need_reload = True\n\n return need_reload\n", "path": "sanic/worker/reloader.py"}]} | 2,004 | 163 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.