Dataset Viewer
Auto-converted to Parquet
problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_32737
rasdani/github-patches
git_diff
dask__dask-586
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Additional improvement for da.image.imread I think following 2 improvements make the function better. #### 1. Allow custom `imread` function. Some prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better. #### 2. Allow custom preprocessing function. In image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility. </issue> <code> [start of dask/array/image.py] 1 from glob import glob 2 import os 3 4 try: 5 from skimage.io import imread as sk_imread 6 except ImportError: 7 pass 8 9 from .core import Array 10 from ..base import tokenize 11 12 def add_leading_dimension(x): 13 return x[None, ...] 14 15 16 def imread(filename): 17 """ Read a stack of images into a dask array 18 19 Parameters 20 ---------- 21 22 filename: string 23 A globstring like 'myfile.*.png' 24 25 Example 26 ------- 27 28 >>> from dask.array.image import imread 29 >>> im = imread('2015-*-*.png') # doctest: +SKIP 30 >>> im.shape # doctest: +SKIP 31 (365, 1000, 1000, 3) 32 33 Returns 34 ------- 35 36 Dask array of all images stacked along the first dimension. All images 37 will be treated as individual chunks 38 """ 39 filenames = sorted(glob(filename)) 40 if not filenames: 41 raise ValueError("No files found under name %s" % filename) 42 43 name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) 44 45 sample = sk_imread(filenames[0]) 46 47 dsk = dict(((name, i) + (0,) * len(sample.shape), 48 (add_leading_dimension, (sk_imread, filename))) 49 for i, filename in enumerate(filenames)) 50 51 chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape) 52 53 return Array(dsk, name, chunks, sample.dtype) 54 [end of dask/array/image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dask/array/image.py b/dask/array/image.py --- a/dask/array/image.py +++ b/dask/array/image.py @@ -13,7 +13,7 @@ return x[None, ...] -def imread(filename): +def imread(filename, imread=None, preprocess=None): """ Read a stack of images into a dask array Parameters @@ -21,6 +21,13 @@ filename: string A globstring like 'myfile.*.png' + imread: function (optional) + Optionally provide custom imread function. + Function should expect a filename and produce a numpy array. + Defaults to ``skimage.io.imread``. + preprocess: function (optional) + Optionally provide custom function to preprocess the image. + Function should expect a numpy array for a single image. Example ------- @@ -36,17 +43,25 @@ Dask array of all images stacked along the first dimension. All images will be treated as individual chunks """ + imread = imread or sk_imread filenames = sorted(glob(filename)) if not filenames: raise ValueError("No files found under name %s" % filename) name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) - sample = sk_imread(filenames[0]) - - dsk = dict(((name, i) + (0,) * len(sample.shape), - (add_leading_dimension, (sk_imread, filename))) - for i, filename in enumerate(filenames)) + sample = imread(filenames[0]) + if preprocess: + sample = preprocess(sample) + + keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))] + if preprocess: + values = [(add_leading_dimension, (preprocess, (imread, filename))) + for filename in filenames] + else: + values = [(add_leading_dimension, (imread, filename)) + for filename in filenames] + dsk = dict(zip(keys, values)) chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
{"golden_diff": "diff --git a/dask/array/image.py b/dask/array/image.py\n--- a/dask/array/image.py\n+++ b/dask/array/image.py\n@@ -13,7 +13,7 @@\n return x[None, ...]\n \n \n-def imread(filename):\n+def imread(filename, imread=None, preprocess=None):\n \"\"\" Read a stack of images into a dask array\n \n Parameters\n@@ -21,6 +21,13 @@\n \n filename: string\n A globstring like 'myfile.*.png'\n+ imread: function (optional)\n+ Optionally provide custom imread function.\n+ Function should expect a filename and produce a numpy array.\n+ Defaults to ``skimage.io.imread``.\n+ preprocess: function (optional)\n+ Optionally provide custom function to preprocess the image.\n+ Function should expect a numpy array for a single image.\n \n Example\n -------\n@@ -36,17 +43,25 @@\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n+ imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n \n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n \n- sample = sk_imread(filenames[0])\n-\n- dsk = dict(((name, i) + (0,) * len(sample.shape),\n- (add_leading_dimension, (sk_imread, filename)))\n- for i, filename in enumerate(filenames))\n+ sample = imread(filenames[0])\n+ if preprocess:\n+ sample = preprocess(sample)\n+\n+ keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n+ if preprocess:\n+ values = [(add_leading_dimension, (preprocess, (imread, filename)))\n+ for filename in filenames]\n+ else:\n+ values = [(add_leading_dimension, (imread, filename))\n+ for filename in filenames]\n+ dsk = dict(zip(keys, values))\n \n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n", "issue": "Additional improvement for da.image.imread\nI think following 2 improvements make the function better.\n#### 1. Allow custom `imread` function.\n\nSome prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better.\n#### 2. Allow custom preprocessing function.\n\nIn image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility.\n\n", "before_files": [{"content": "from glob import glob\nimport os\n\ntry:\n from skimage.io import imread as sk_imread\nexcept ImportError:\n pass\n\nfrom .core import Array\nfrom ..base import tokenize\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename):\n \"\"\" Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n\n Example\n -------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = sk_imread(filenames[0])\n\n dsk = dict(((name, i) + (0,) * len(sample.shape),\n (add_leading_dimension, (sk_imread, filename)))\n for i, filename in enumerate(filenames))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)\n", "path": "dask/array/image.py"}]}
1,120
498
gh_patches_debug_3876
rasdani/github-patches
git_diff
xorbitsai__inference-299
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FEAT: Disable Gradio Telemetry Pull requests are disabled but see here: https://github.com/arch-btw/inference/pull/1 </issue> <code> [start of examples/gradio_chatinterface.py] 1 from typing import Dict, List 2 3 import gradio as gr 4 5 from xinference.client import Client 6 7 if __name__ == "__main__": 8 import argparse 9 import textwrap 10 11 parser = argparse.ArgumentParser( 12 formatter_class=argparse.RawDescriptionHelpFormatter, 13 epilog=textwrap.dedent( 14 """\ 15 instructions to run: 16 1. Install Xinference and Llama-cpp-python 17 2. Run 'xinference --host "localhost" --port 9997' in terminal 18 3. Run this python file in new terminal window 19 20 e.g. (feel free to copy) 21 python gradio_chatinterface.py \\ 22 --endpoint http://localhost:9997 \\ 23 --model_name vicuna-v1.3 \\ 24 --model_size_in_billions 7 \\ 25 --model_format ggmlv3 \\ 26 --quantization q2_K 27 28 If you decide to change the port number in step 2, 29 please also change the endpoint in the arguments 30 """ 31 ), 32 ) 33 34 parser.add_argument( 35 "--endpoint", type=str, required=True, help="Xinference endpoint, required" 36 ) 37 parser.add_argument( 38 "--model_name", type=str, required=True, help="Name of the model, required" 39 ) 40 parser.add_argument( 41 "--model_size_in_billions", 42 type=int, 43 required=False, 44 help="Size of the model in billions", 45 ) 46 parser.add_argument( 47 "--model_format", 48 type=str, 49 required=False, 50 help="Format of the model", 51 ) 52 parser.add_argument( 53 "--quantization", type=str, required=False, help="Quantization of the model" 54 ) 55 56 args = parser.parse_args() 57 58 endpoint = args.endpoint 59 model_name = args.model_name 60 model_size_in_billions = args.model_size_in_billions 61 model_format = args.model_format 62 quantization = args.quantization 63 64 print(f"Xinference endpoint: {endpoint}") 65 print(f"Model Name: {model_name}") 66 print(f"Model Size (in billions): {model_size_in_billions}") 67 print(f"Model Format: {model_format}") 68 print(f"Quantization: {quantization}") 69 70 client = Client(endpoint) 71 model_uid = client.launch_model( 72 model_name, 73 model_size_in_billions=model_size_in_billions, 74 model_format=model_format, 75 quantization=quantization, 76 n_ctx=2048, 77 ) 78 model = client.get_model(model_uid) 79 80 def flatten(matrix: List[List[str]]) -> List[str]: 81 flat_list = [] 82 for row in matrix: 83 flat_list += row 84 return flat_list 85 86 def to_chat(lst: List[str]) -> List[Dict[str, str]]: 87 res = [] 88 for i in range(len(lst)): 89 role = "assistant" if i % 2 == 1 else "user" 90 res.append( 91 { 92 "role": role, 93 "content": lst[i], 94 } 95 ) 96 return res 97 98 def generate_wrapper(message: str, history: List[List[str]]) -> str: 99 output = model.chat( 100 prompt=message, 101 chat_history=to_chat(flatten(history)), 102 generate_config={"max_tokens": 512, "stream": False}, 103 ) 104 return output["choices"][0]["message"]["content"] 105 106 demo = gr.ChatInterface( 107 fn=generate_wrapper, 108 examples=[ 109 "Show me a two sentence horror story with a plot twist", 110 "Generate a Haiku poem using trignometry as the central theme", 111 "Write three sentences of scholarly description regarding a supernatural beast", 112 "Prove there does not exist a largest integer", 113 ], 114 title="Xinference Chat Bot", 115 ) 116 demo.launch() 117 [end of examples/gradio_chatinterface.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.py --- a/examples/gradio_chatinterface.py +++ b/examples/gradio_chatinterface.py @@ -105,6 +105,7 @@ demo = gr.ChatInterface( fn=generate_wrapper, + analytics_enabled=False, examples=[ "Show me a two sentence horror story with a plot twist", "Generate a Haiku poem using trignometry as the central theme",
{"golden_diff": "diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.py\n--- a/examples/gradio_chatinterface.py\n+++ b/examples/gradio_chatinterface.py\n@@ -105,6 +105,7 @@\n \n demo = gr.ChatInterface(\n fn=generate_wrapper,\n+ analytics_enabled=False,\n examples=[\n \"Show me a two sentence horror story with a plot twist\",\n \"Generate a Haiku poem using trignometry as the central theme\",\n", "issue": "FEAT: Disable Gradio Telemetry\nPull requests are disabled but see here:\r\n\r\nhttps://github.com/arch-btw/inference/pull/1\n", "before_files": [{"content": "from typing import Dict, List\n\nimport gradio as gr\n\nfrom xinference.client import Client\n\nif __name__ == \"__main__\":\n import argparse\n import textwrap\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent(\n \"\"\"\\\n instructions to run:\n 1. Install Xinference and Llama-cpp-python\n 2. Run 'xinference --host \"localhost\" --port 9997' in terminal\n 3. Run this python file in new terminal window\n\n e.g. (feel free to copy)\n python gradio_chatinterface.py \\\\\n --endpoint http://localhost:9997 \\\\\n --model_name vicuna-v1.3 \\\\\n --model_size_in_billions 7 \\\\\n --model_format ggmlv3 \\\\\n --quantization q2_K\n\n If you decide to change the port number in step 2,\n please also change the endpoint in the arguments\n \"\"\"\n ),\n )\n\n parser.add_argument(\n \"--endpoint\", type=str, required=True, help=\"Xinference endpoint, required\"\n )\n parser.add_argument(\n \"--model_name\", type=str, required=True, help=\"Name of the model, required\"\n )\n parser.add_argument(\n \"--model_size_in_billions\",\n type=int,\n required=False,\n help=\"Size of the model in billions\",\n )\n parser.add_argument(\n \"--model_format\",\n type=str,\n required=False,\n help=\"Format of the model\",\n )\n parser.add_argument(\n \"--quantization\", type=str, required=False, help=\"Quantization of the model\"\n )\n\n args = parser.parse_args()\n\n endpoint = args.endpoint\n model_name = args.model_name\n model_size_in_billions = args.model_size_in_billions\n model_format = args.model_format\n quantization = args.quantization\n\n print(f\"Xinference endpoint: {endpoint}\")\n print(f\"Model Name: {model_name}\")\n print(f\"Model Size (in billions): {model_size_in_billions}\")\n print(f\"Model Format: {model_format}\")\n print(f\"Quantization: {quantization}\")\n\n client = Client(endpoint)\n model_uid = client.launch_model(\n model_name,\n model_size_in_billions=model_size_in_billions,\n model_format=model_format,\n quantization=quantization,\n n_ctx=2048,\n )\n model = client.get_model(model_uid)\n\n def flatten(matrix: List[List[str]]) -> List[str]:\n flat_list = []\n for row in matrix:\n flat_list += row\n return flat_list\n\n def to_chat(lst: List[str]) -> List[Dict[str, str]]:\n res = []\n for i in range(len(lst)):\n role = \"assistant\" if i % 2 == 1 else \"user\"\n res.append(\n {\n \"role\": role,\n \"content\": lst[i],\n }\n )\n return res\n\n def generate_wrapper(message: str, history: List[List[str]]) -> str:\n output = model.chat(\n prompt=message,\n chat_history=to_chat(flatten(history)),\n generate_config={\"max_tokens\": 512, \"stream\": False},\n )\n return output[\"choices\"][0][\"message\"][\"content\"]\n\n demo = gr.ChatInterface(\n fn=generate_wrapper,\n examples=[\n \"Show me a two sentence horror story with a plot twist\",\n \"Generate a Haiku poem using trignometry as the central theme\",\n \"Write three sentences of scholarly description regarding a supernatural beast\",\n \"Prove there does not exist a largest integer\",\n ],\n title=\"Xinference Chat Bot\",\n )\n demo.launch()\n", "path": "examples/gradio_chatinterface.py"}]}
1,629
104
gh_patches_debug_4863
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-1210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PDF Export URL pattern ### Describe the Bug The web app calls `/REGION/LANG/wp-json/ig-mpdf/v1/pdf` to export a PDF which returns a 404. Our API currently uses `REGION/LANG/pdf`. The normal mapping does not work, as we ### Steps to Reproduce ```shell curl 'https://malte-test.tuerantuer.org/joerdenstorf/de/wp-json/ig-mpdf/v1/pdf' ``` ### Expected Behavior Map old URL pattern to new endpoint. ### Actual Behavior 404 </issue> <code> [start of integreat_cms/api/urls.py] 1 """ 2 Expansion of API-Endpoints for the CMS 3 """ 4 from django.urls import include, path, re_path 5 6 from .v3.events import events 7 from .v3.feedback import ( 8 page_feedback, 9 search_result_feedback, 10 region_feedback, 11 offer_feedback, 12 offer_list_feedback, 13 event_list_feedback, 14 event_feedback, 15 poi_feedback, 16 map_feedback, 17 imprint_page_feedback, 18 legacy_feedback_endpoint, 19 ) 20 from .v3.imprint import imprint 21 from .v3.languages import languages 22 from .v3.locations import locations 23 from .v3.pages import pages, children, parents, single_page 24 from .v3.pdf_export import pdf_export 25 from .v3.push_notifications import sent_push_notifications 26 from .v3.regions import regions, liveregions, hiddenregions 27 from .v3.offers import offers 28 29 30 #: The namespace for this URL config (see :attr:`django.urls.ResolverMatch.app_name`) 31 app_name = "api" 32 33 content_api_urlpatterns = [ 34 path("pages/", pages, name="pages"), 35 path("locations/", locations, name="locations"), 36 path("events/", events, name="events"), 37 path("page/", single_page, name="single_page"), 38 path("post/", single_page, name="single_page"), 39 path("children/", children, name="children"), 40 path("parents/", parents, name="parents"), 41 path("pdf/", pdf_export, name="pdf_export"), 42 path( 43 "sent_push_notifications/", 44 sent_push_notifications, 45 name="sent_push_notifications", 46 ), 47 path("imprint/", imprint, name="imprint"), 48 path("disclaimer/", imprint, name="imprint"), 49 path("offers/", offers, name="offers"), 50 path("extras/", offers, name="offers"), 51 re_path( 52 r"^feedback/?$", 53 legacy_feedback_endpoint.legacy_feedback_endpoint, 54 name="legacy_feedback_endpoint", 55 ), 56 path( 57 "feedback/", 58 include( 59 [ 60 re_path( 61 r"^categories/?$", 62 region_feedback.region_feedback, 63 name="region_feedback", 64 ), 65 re_path(r"^page/?$", page_feedback.page_feedback, name="page_feedback"), 66 re_path(r"^poi/?$", poi_feedback.poi_feedback, name="poi_feedback"), 67 re_path( 68 r"^event/?$", event_feedback.event_feedback, name="event_feedback" 69 ), 70 re_path( 71 r"^events/?$", 72 event_list_feedback.event_list_feedback, 73 name="event_list_feedback", 74 ), 75 re_path( 76 r"^imprint-page/?$", 77 imprint_page_feedback.imprint_page_feedback, 78 name="imprint_page_feedbacks", 79 ), 80 re_path(r"^map/?$", map_feedback.map_feedback, name="map_feedback"), 81 re_path( 82 r"^search/?$", 83 search_result_feedback.search_result_feedback, 84 name="search_result_feedback", 85 ), 86 re_path( 87 r"^offers/?$", 88 offer_list_feedback.offer_list_feedback, 89 name="offer_list_feedback", 90 ), 91 re_path( 92 r"^extras/?$", 93 offer_list_feedback.offer_list_feedback, 94 name="offer_list_feedback", 95 ), 96 re_path( 97 r"^offer/?$", offer_feedback.offer_feedback, name="offer_feedback" 98 ), 99 re_path( 100 r"^extra/?$", offer_feedback.offer_feedback, name="offer_feedback" 101 ), 102 ] 103 ), 104 ), 105 ] 106 107 region_api_urlpatterns = [ 108 path("", regions, name="regions"), 109 path("live/", liveregions, name="regions_live"), 110 path("hidden/", hiddenregions, name="regions_hidden"), 111 ] 112 113 #: The url patterns of this module (see :doc:`topics/http/urls`) 114 urlpatterns = [ 115 path("api/regions/", include(region_api_urlpatterns)), 116 path("wp-json/extensions/v3/sites/", include(region_api_urlpatterns)), 117 path( 118 "api/<slug:region_slug>/", 119 include( 120 [ 121 path("languages/", languages, name="languages"), 122 path("offers/", offers, name="offers"), 123 path("extras/", offers, name="offers"), 124 path("<slug:language_slug>/", include(content_api_urlpatterns)), 125 ] 126 ), 127 ), 128 path( 129 "<slug:region_slug>/", 130 include( 131 [ 132 path( 133 "de/wp-json/extensions/v3/languages/", languages, name="languages" 134 ), 135 path( 136 "<slug:language_slug>/wp-json/extensions/v3/", 137 include(content_api_urlpatterns), 138 ), 139 ] 140 ), 141 ), 142 ] 143 [end of integreat_cms/api/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/integreat_cms/api/urls.py b/integreat_cms/api/urls.py --- a/integreat_cms/api/urls.py +++ b/integreat_cms/api/urls.py @@ -136,6 +136,11 @@ "<slug:language_slug>/wp-json/extensions/v3/", include(content_api_urlpatterns), ), + path( + "<slug:language_slug>/wp-json/ig-mpdf/v1/pdf/", + pdf_export, + name="pdf_export", + ), ] ), ),
{"golden_diff": "diff --git a/integreat_cms/api/urls.py b/integreat_cms/api/urls.py\n--- a/integreat_cms/api/urls.py\n+++ b/integreat_cms/api/urls.py\n@@ -136,6 +136,11 @@\n \"<slug:language_slug>/wp-json/extensions/v3/\",\n include(content_api_urlpatterns),\n ),\n+ path(\n+ \"<slug:language_slug>/wp-json/ig-mpdf/v1/pdf/\",\n+ pdf_export,\n+ name=\"pdf_export\",\n+ ),\n ]\n ),\n ),\n", "issue": "PDF Export URL pattern\n### Describe the Bug\r\nThe web app calls `/REGION/LANG/wp-json/ig-mpdf/v1/pdf` to export a PDF which returns a 404. Our API currently uses `REGION/LANG/pdf`.\r\n\r\nThe normal mapping does not work, as we\r\n\r\n### Steps to Reproduce\r\n\r\n```shell\r\ncurl 'https://malte-test.tuerantuer.org/joerdenstorf/de/wp-json/ig-mpdf/v1/pdf'\r\n```\r\n\r\n### Expected Behavior\r\nMap old URL pattern to new endpoint.\r\n\r\n\r\n### Actual Behavior\r\n404\n", "before_files": [{"content": "\"\"\"\nExpansion of API-Endpoints for the CMS\n\"\"\"\nfrom django.urls import include, path, re_path\n\nfrom .v3.events import events\nfrom .v3.feedback import (\n page_feedback,\n search_result_feedback,\n region_feedback,\n offer_feedback,\n offer_list_feedback,\n event_list_feedback,\n event_feedback,\n poi_feedback,\n map_feedback,\n imprint_page_feedback,\n legacy_feedback_endpoint,\n)\nfrom .v3.imprint import imprint\nfrom .v3.languages import languages\nfrom .v3.locations import locations\nfrom .v3.pages import pages, children, parents, single_page\nfrom .v3.pdf_export import pdf_export\nfrom .v3.push_notifications import sent_push_notifications\nfrom .v3.regions import regions, liveregions, hiddenregions\nfrom .v3.offers import offers\n\n\n#: The namespace for this URL config (see :attr:`django.urls.ResolverMatch.app_name`)\napp_name = \"api\"\n\ncontent_api_urlpatterns = [\n path(\"pages/\", pages, name=\"pages\"),\n path(\"locations/\", locations, name=\"locations\"),\n path(\"events/\", events, name=\"events\"),\n path(\"page/\", single_page, name=\"single_page\"),\n path(\"post/\", single_page, name=\"single_page\"),\n path(\"children/\", children, name=\"children\"),\n path(\"parents/\", parents, name=\"parents\"),\n path(\"pdf/\", pdf_export, name=\"pdf_export\"),\n path(\n \"sent_push_notifications/\",\n sent_push_notifications,\n name=\"sent_push_notifications\",\n ),\n path(\"imprint/\", imprint, name=\"imprint\"),\n path(\"disclaimer/\", imprint, name=\"imprint\"),\n path(\"offers/\", offers, name=\"offers\"),\n path(\"extras/\", offers, name=\"offers\"),\n re_path(\n r\"^feedback/?$\",\n legacy_feedback_endpoint.legacy_feedback_endpoint,\n name=\"legacy_feedback_endpoint\",\n ),\n path(\n \"feedback/\",\n include(\n [\n re_path(\n r\"^categories/?$\",\n region_feedback.region_feedback,\n name=\"region_feedback\",\n ),\n re_path(r\"^page/?$\", page_feedback.page_feedback, name=\"page_feedback\"),\n re_path(r\"^poi/?$\", poi_feedback.poi_feedback, name=\"poi_feedback\"),\n re_path(\n r\"^event/?$\", event_feedback.event_feedback, name=\"event_feedback\"\n ),\n re_path(\n r\"^events/?$\",\n event_list_feedback.event_list_feedback,\n name=\"event_list_feedback\",\n ),\n re_path(\n r\"^imprint-page/?$\",\n imprint_page_feedback.imprint_page_feedback,\n name=\"imprint_page_feedbacks\",\n ),\n re_path(r\"^map/?$\", map_feedback.map_feedback, name=\"map_feedback\"),\n re_path(\n r\"^search/?$\",\n search_result_feedback.search_result_feedback,\n name=\"search_result_feedback\",\n ),\n re_path(\n r\"^offers/?$\",\n offer_list_feedback.offer_list_feedback,\n name=\"offer_list_feedback\",\n ),\n re_path(\n r\"^extras/?$\",\n offer_list_feedback.offer_list_feedback,\n name=\"offer_list_feedback\",\n ),\n re_path(\n r\"^offer/?$\", offer_feedback.offer_feedback, name=\"offer_feedback\"\n ),\n re_path(\n r\"^extra/?$\", offer_feedback.offer_feedback, name=\"offer_feedback\"\n ),\n ]\n ),\n ),\n]\n\nregion_api_urlpatterns = [\n path(\"\", regions, name=\"regions\"),\n path(\"live/\", liveregions, name=\"regions_live\"),\n path(\"hidden/\", hiddenregions, name=\"regions_hidden\"),\n]\n\n#: The url patterns of this module (see :doc:`topics/http/urls`)\nurlpatterns = [\n path(\"api/regions/\", include(region_api_urlpatterns)),\n path(\"wp-json/extensions/v3/sites/\", include(region_api_urlpatterns)),\n path(\n \"api/<slug:region_slug>/\",\n include(\n [\n path(\"languages/\", languages, name=\"languages\"),\n path(\"offers/\", offers, name=\"offers\"),\n path(\"extras/\", offers, name=\"offers\"),\n path(\"<slug:language_slug>/\", include(content_api_urlpatterns)),\n ]\n ),\n ),\n path(\n \"<slug:region_slug>/\",\n include(\n [\n path(\n \"de/wp-json/extensions/v3/languages/\", languages, name=\"languages\"\n ),\n path(\n \"<slug:language_slug>/wp-json/extensions/v3/\",\n include(content_api_urlpatterns),\n ),\n ]\n ),\n ),\n]\n", "path": "integreat_cms/api/urls.py"}]}
1,935
130
gh_patches_debug_29434
rasdani/github-patches
git_diff
plone__Products.CMFPlone-1515
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Resources from third party add-ons are not being included in compiled plone-legacy bundle Seems JS resources registered in Plone 5 using old approach (`jsregistry.xml`) are not included in the final compilation: I installed an add-on and, even as I can see the JS resources listed in `default.js`, the source code is not present. If I enable development mode, then I can see the source code included in `plone-legacy-compiled.js` and it's executed normally. </issue> <code> [start of Products/CMFPlone/resources/browser/combine.py] 1 from zExceptions import NotFound 2 from Acquisition import aq_base 3 from datetime import datetime 4 from plone.registry.interfaces import IRegistry 5 from plone.resource.file import FilesystemFile 6 from plone.resource.interfaces import IResourceDirectory 7 from Products.CMFPlone.interfaces import IBundleRegistry 8 from Products.CMFPlone.interfaces.resources import ( 9 OVERRIDE_RESOURCE_DIRECTORY_NAME, 10 ) 11 from StringIO import StringIO 12 from zope.component import getUtility 13 from zope.component import queryUtility 14 15 PRODUCTION_RESOURCE_DIRECTORY = "production" 16 17 18 def get_production_resource_directory(): 19 persistent_directory = queryUtility(IResourceDirectory, name="persistent") 20 if persistent_directory is None: 21 return '' 22 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] 23 try: 24 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY] 25 except NotFound: 26 return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY 27 timestamp = production_folder.readFile('timestamp.txt') 28 return "%s/++unique++%s" % ( 29 PRODUCTION_RESOURCE_DIRECTORY, timestamp) 30 31 32 def get_resource(context, path): 33 resource = context.unrestrictedTraverse(path) 34 if isinstance(resource, FilesystemFile): 35 (directory, sep, filename) = path.rpartition('/') 36 return context.unrestrictedTraverse(directory).readFile(filename) 37 else: 38 if hasattr(aq_base(resource), 'GET'): 39 # for FileResource 40 return resource.GET() 41 else: 42 # any BrowserView 43 return resource() 44 45 46 def write_js(context, folder, meta_bundle): 47 registry = getUtility(IRegistry) 48 resources = [] 49 50 # default resources 51 if meta_bundle == 'default' and registry.records.get( 52 'plone.resources/jquery.js' 53 ): 54 resources.append(get_resource(context, 55 registry.records['plone.resources/jquery.js'].value)) 56 resources.append(get_resource(context, 57 registry.records['plone.resources.requirejs'].value)) 58 resources.append(get_resource(context, 59 registry.records['plone.resources.configjs'].value)) 60 61 # bundles 62 bundles = registry.collectionOfInterface( 63 IBundleRegistry, prefix="plone.bundles", check=False) 64 for bundle in bundles.values(): 65 if bundle.merge_with == meta_bundle: 66 resources.append(get_resource(context, bundle.jscompilation)) 67 68 fi = StringIO() 69 for script in resources: 70 fi.write(script + '\n') 71 folder.writeFile(meta_bundle + ".js", fi) 72 73 74 def write_css(context, folder, meta_bundle): 75 registry = getUtility(IRegistry) 76 resources = [] 77 78 bundles = registry.collectionOfInterface( 79 IBundleRegistry, prefix="plone.bundles", check=False) 80 for bundle in bundles.values(): 81 if bundle.merge_with == meta_bundle: 82 resources.append(get_resource(context, bundle.csscompilation)) 83 84 fi = StringIO() 85 for script in resources: 86 fi.write(script + '\n') 87 folder.writeFile(meta_bundle + ".css", fi) 88 89 90 def combine_bundles(context): 91 persistent_directory = queryUtility(IResourceDirectory, name="persistent") 92 if persistent_directory is None: 93 return 94 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory: 95 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) 96 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] 97 if PRODUCTION_RESOURCE_DIRECTORY not in container: 98 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY) 99 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY] 100 101 # store timestamp 102 fi = StringIO() 103 fi.write(datetime.now().isoformat()) 104 production_folder.writeFile("timestamp.txt", fi) 105 106 # generate new combined bundles 107 write_js(context, production_folder, 'default') 108 write_js(context, production_folder, 'logged-in') 109 write_css(context, production_folder, 'default') 110 write_css(context, production_folder, 'logged-in') 111 [end of Products/CMFPlone/resources/browser/combine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py --- a/Products/CMFPlone/resources/browser/combine.py +++ b/Products/CMFPlone/resources/browser/combine.py @@ -30,6 +30,14 @@ def get_resource(context, path): + if path.startswith('++plone++'): + # ++plone++ resources can be customized, we return their override + # value if any + overrides = get_override_directory(context) + filepath = path[9:] + if overrides.isFile(filepath): + return overrides.readFile(filepath) + resource = context.unrestrictedTraverse(path) if isinstance(resource, FilesystemFile): (directory, sep, filename) = path.rpartition('/') @@ -87,13 +95,17 @@ folder.writeFile(meta_bundle + ".css", fi) -def combine_bundles(context): +def get_override_directory(context): persistent_directory = queryUtility(IResourceDirectory, name="persistent") if persistent_directory is None: return if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory: persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) - container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] + return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] + + +def combine_bundles(context): + container = get_override_directory(context) if PRODUCTION_RESOURCE_DIRECTORY not in container: container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY) production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
{"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -30,6 +30,14 @@\n \n \n def get_resource(context, path):\n+ if path.startswith('++plone++'):\n+ # ++plone++ resources can be customized, we return their override\n+ # value if any\n+ overrides = get_override_directory(context)\n+ filepath = path[9:]\n+ if overrides.isFile(filepath):\n+ return overrides.readFile(filepath)\n+\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n@@ -87,13 +95,17 @@\n folder.writeFile(meta_bundle + \".css\", fi)\n \n \n-def combine_bundles(context):\n+def get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n- container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n+ return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n+\n+\n+def combine_bundles(context):\n+ container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n", "issue": "Resources from third party add-ons are not being included in compiled plone-legacy bundle\nSeems JS resources registered in Plone 5 using old approach (`jsregistry.xml`) are not included in the final compilation: I installed an add-on and, even as I can see the JS resources listed in `default.js`, the source code is not present.\n\nIf I enable development mode, then I can see the source code included in `plone-legacy-compiled.js` and it's executed normally.\n\n", "before_files": [{"content": "from zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.csscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef combine_bundles(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]}
1,666
339
gh_patches_debug_22011
rasdani/github-patches
git_diff
docker__docker-py-1330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add docker network IPAM options parameter IPAM driver missing options supports an options field in the IPAM config It introduced in API v1.22. ``` POST /networks/create Now supports an options field in the IPAM config that provides options for custom IPAM plugins. ``` </issue> <code> [start of docker/types/networks.py] 1 from .. import errors 2 from ..utils import normalize_links, version_lt 3 4 5 class EndpointConfig(dict): 6 def __init__(self, version, aliases=None, links=None, ipv4_address=None, 7 ipv6_address=None, link_local_ips=None): 8 if version_lt(version, '1.22'): 9 raise errors.InvalidVersion( 10 'Endpoint config is not supported for API version < 1.22' 11 ) 12 13 if aliases: 14 self["Aliases"] = aliases 15 16 if links: 17 self["Links"] = normalize_links(links) 18 19 ipam_config = {} 20 if ipv4_address: 21 ipam_config['IPv4Address'] = ipv4_address 22 23 if ipv6_address: 24 ipam_config['IPv6Address'] = ipv6_address 25 26 if link_local_ips is not None: 27 if version_lt(version, '1.24'): 28 raise errors.InvalidVersion( 29 'link_local_ips is not supported for API version < 1.24' 30 ) 31 ipam_config['LinkLocalIPs'] = link_local_ips 32 33 if ipam_config: 34 self['IPAMConfig'] = ipam_config 35 36 37 class NetworkingConfig(dict): 38 def __init__(self, endpoints_config=None): 39 if endpoints_config: 40 self["EndpointsConfig"] = endpoints_config 41 42 43 class IPAMConfig(dict): 44 """ 45 Create an IPAM (IP Address Management) config dictionary to be used with 46 :py:meth:`~docker.api.network.NetworkApiMixin.create_network`. 47 48 Args: 49 50 driver (str): The IPAM driver to use. Defaults to ``default``. 51 pool_configs (list): A list of pool configurations 52 (:py:class:`~docker.types.IPAMPool`). Defaults to empty list. 53 54 Example: 55 56 >>> ipam_config = docker.types.IPAMConfig(driver='default') 57 >>> network = client.create_network('network1', ipam=ipam_config) 58 59 """ 60 def __init__(self, driver='default', pool_configs=None): 61 self.update({ 62 'Driver': driver, 63 'Config': pool_configs or [] 64 }) 65 66 67 class IPAMPool(dict): 68 """ 69 Create an IPAM pool config dictionary to be added to the 70 ``pool_configs`` parameter of 71 :py:class:`~docker.types.IPAMConfig`. 72 73 Args: 74 75 subnet (str): Custom subnet for this IPAM pool using the CIDR 76 notation. Defaults to ``None``. 77 iprange (str): Custom IP range for endpoints in this IPAM pool using 78 the CIDR notation. Defaults to ``None``. 79 gateway (str): Custom IP address for the pool's gateway. 80 aux_addresses (dict): A dictionary of ``key -> ip_address`` 81 relationships specifying auxiliary addresses that need to be 82 allocated by the IPAM driver. 83 84 Example: 85 86 >>> ipam_pool = docker.types.IPAMPool( 87 subnet='124.42.0.0/16', 88 iprange='124.42.0.0/24', 89 gateway='124.42.0.254', 90 aux_addresses={ 91 'reserved1': '124.42.1.1' 92 } 93 ) 94 >>> ipam_config = docker.types.IPAMConfig( 95 pool_configs=[ipam_pool]) 96 """ 97 def __init__(self, subnet=None, iprange=None, gateway=None, 98 aux_addresses=None): 99 self.update({ 100 'Subnet': subnet, 101 'IPRange': iprange, 102 'Gateway': gateway, 103 'AuxiliaryAddresses': aux_addresses 104 }) 105 [end of docker/types/networks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/types/networks.py b/docker/types/networks.py --- a/docker/types/networks.py +++ b/docker/types/networks.py @@ -50,6 +50,8 @@ driver (str): The IPAM driver to use. Defaults to ``default``. pool_configs (list): A list of pool configurations (:py:class:`~docker.types.IPAMPool`). Defaults to empty list. + options (dict): Driver options as a key-value dictionary. + Defaults to `None`. Example: @@ -57,12 +59,17 @@ >>> network = client.create_network('network1', ipam=ipam_config) """ - def __init__(self, driver='default', pool_configs=None): + def __init__(self, driver='default', pool_configs=None, options=None): self.update({ 'Driver': driver, 'Config': pool_configs or [] }) + if options: + if not isinstance(options, dict): + raise TypeError('IPAMConfig options must be a dictionary') + self['Options'] = options + class IPAMPool(dict): """
{"golden_diff": "diff --git a/docker/types/networks.py b/docker/types/networks.py\n--- a/docker/types/networks.py\n+++ b/docker/types/networks.py\n@@ -50,6 +50,8 @@\n driver (str): The IPAM driver to use. Defaults to ``default``.\n pool_configs (list): A list of pool configurations\n (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.\n+ options (dict): Driver options as a key-value dictionary.\n+ Defaults to `None`.\n \n Example:\n \n@@ -57,12 +59,17 @@\n >>> network = client.create_network('network1', ipam=ipam_config)\n \n \"\"\"\n- def __init__(self, driver='default', pool_configs=None):\n+ def __init__(self, driver='default', pool_configs=None, options=None):\n self.update({\n 'Driver': driver,\n 'Config': pool_configs or []\n })\n \n+ if options:\n+ if not isinstance(options, dict):\n+ raise TypeError('IPAMConfig options must be a dictionary')\n+ self['Options'] = options\n+\n \n class IPAMPool(dict):\n \"\"\"\n", "issue": "Add docker network IPAM options parameter\nIPAM driver missing options\n\nsupports an options field in the IPAM config \nIt introduced in API v1.22.\n\n```\nPOST /networks/create Now supports an options field in the IPAM config that provides options for custom IPAM plugins.\n```\n\n", "before_files": [{"content": "from .. import errors\nfrom ..utils import normalize_links, version_lt\n\n\nclass EndpointConfig(dict):\n def __init__(self, version, aliases=None, links=None, ipv4_address=None,\n ipv6_address=None, link_local_ips=None):\n if version_lt(version, '1.22'):\n raise errors.InvalidVersion(\n 'Endpoint config is not supported for API version < 1.22'\n )\n\n if aliases:\n self[\"Aliases\"] = aliases\n\n if links:\n self[\"Links\"] = normalize_links(links)\n\n ipam_config = {}\n if ipv4_address:\n ipam_config['IPv4Address'] = ipv4_address\n\n if ipv6_address:\n ipam_config['IPv6Address'] = ipv6_address\n\n if link_local_ips is not None:\n if version_lt(version, '1.24'):\n raise errors.InvalidVersion(\n 'link_local_ips is not supported for API version < 1.24'\n )\n ipam_config['LinkLocalIPs'] = link_local_ips\n\n if ipam_config:\n self['IPAMConfig'] = ipam_config\n\n\nclass NetworkingConfig(dict):\n def __init__(self, endpoints_config=None):\n if endpoints_config:\n self[\"EndpointsConfig\"] = endpoints_config\n\n\nclass IPAMConfig(dict):\n \"\"\"\n Create an IPAM (IP Address Management) config dictionary to be used with\n :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.\n\n Args:\n\n driver (str): The IPAM driver to use. Defaults to ``default``.\n pool_configs (list): A list of pool configurations\n (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.\n\n Example:\n\n >>> ipam_config = docker.types.IPAMConfig(driver='default')\n >>> network = client.create_network('network1', ipam=ipam_config)\n\n \"\"\"\n def __init__(self, driver='default', pool_configs=None):\n self.update({\n 'Driver': driver,\n 'Config': pool_configs or []\n })\n\n\nclass IPAMPool(dict):\n \"\"\"\n Create an IPAM pool config dictionary to be added to the\n ``pool_configs`` parameter of\n :py:class:`~docker.types.IPAMConfig`.\n\n Args:\n\n subnet (str): Custom subnet for this IPAM pool using the CIDR\n notation. Defaults to ``None``.\n iprange (str): Custom IP range for endpoints in this IPAM pool using\n the CIDR notation. Defaults to ``None``.\n gateway (str): Custom IP address for the pool's gateway.\n aux_addresses (dict): A dictionary of ``key -> ip_address``\n relationships specifying auxiliary addresses that need to be\n allocated by the IPAM driver.\n\n Example:\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='124.42.0.0/16',\n iprange='124.42.0.0/24',\n gateway='124.42.0.254',\n aux_addresses={\n 'reserved1': '124.42.1.1'\n }\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool])\n \"\"\"\n def __init__(self, subnet=None, iprange=None, gateway=None,\n aux_addresses=None):\n self.update({\n 'Subnet': subnet,\n 'IPRange': iprange,\n 'Gateway': gateway,\n 'AuxiliaryAddresses': aux_addresses\n })\n", "path": "docker/types/networks.py"}]}
1,579
255
gh_patches_debug_2452
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-2225
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> missing hidden import for skimage When packaging an application that imports skimage.feature (and nothing else), the app would not run due to an ImportError on the "transform" module. This can be fixed by adding one item to the hiddenimports in hook-skimage.transform.py file (bolded below): > hiddenimports = ['skimage.draw.draw', > 'skimage._shared.geometry', > 'skimage.filters.rank.core_cy', > **'skimage._shared.transform'**] > > datas = collect_data_files('skimage') PyInstaller 3.2, Windows 7 64 bit, Python 2.7.12, Anaconda 4.1.1 distribution. </issue> <code> [start of PyInstaller/hooks/hook-skimage.transform.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2014-2016, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 from PyInstaller.utils.hooks import collect_data_files 10 11 # Hook tested with scikit-image (skimage) 0.9.3 on Mac OS 10.9 and Windows 7 12 # 64-bit 13 hiddenimports = ['skimage.draw.draw', 14 'skimage._shared.geometry', 15 'skimage.filters.rank.core_cy'] 16 17 datas = collect_data_files('skimage') 18 [end of PyInstaller/hooks/hook-skimage.transform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-skimage.transform.py b/PyInstaller/hooks/hook-skimage.transform.py --- a/PyInstaller/hooks/hook-skimage.transform.py +++ b/PyInstaller/hooks/hook-skimage.transform.py @@ -12,6 +12,7 @@ # 64-bit hiddenimports = ['skimage.draw.draw', 'skimage._shared.geometry', + 'skimage._shared.transform', 'skimage.filters.rank.core_cy'] datas = collect_data_files('skimage')
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-skimage.transform.py b/PyInstaller/hooks/hook-skimage.transform.py\n--- a/PyInstaller/hooks/hook-skimage.transform.py\n+++ b/PyInstaller/hooks/hook-skimage.transform.py\n@@ -12,6 +12,7 @@\n # 64-bit\n hiddenimports = ['skimage.draw.draw',\n 'skimage._shared.geometry',\n+ 'skimage._shared.transform',\n 'skimage.filters.rank.core_cy']\n \n datas = collect_data_files('skimage')\n", "issue": "missing hidden import for skimage\nWhen packaging an application that imports skimage.feature (and nothing else), the app would not run due to an ImportError on the \"transform\" module. This can be fixed by adding one item to the hiddenimports in hook-skimage.transform.py file (bolded below):\n\n> hiddenimports = ['skimage.draw.draw',\n> 'skimage._shared.geometry',\n> 'skimage.filters.rank.core_cy',\n> **'skimage._shared.transform'**] \n> \n> datas = collect_data_files('skimage')\n\nPyInstaller 3.2, Windows 7 64 bit, Python 2.7.12, Anaconda 4.1.1 distribution.\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2014-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_data_files\n\n# Hook tested with scikit-image (skimage) 0.9.3 on Mac OS 10.9 and Windows 7\n# 64-bit\nhiddenimports = ['skimage.draw.draw',\n 'skimage._shared.geometry',\n 'skimage.filters.rank.core_cy']\n\ndatas = collect_data_files('skimage')\n", "path": "PyInstaller/hooks/hook-skimage.transform.py"}]}
869
117
gh_patches_debug_7034
rasdani/github-patches
git_diff
aws__aws-cli-5019
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for PyYAML 5.3 Closes: https://github.com/aws/aws-cli/issues/4828 Signed-off-by: Igor Raits <[email protected]> *Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import codecs 3 import os.path 4 import re 5 import sys 6 7 from setuptools import setup, find_packages 8 9 10 here = os.path.abspath(os.path.dirname(__file__)) 11 12 13 def read(*parts): 14 return codecs.open(os.path.join(here, *parts), 'r').read() 15 16 17 def find_version(*file_paths): 18 version_file = read(*file_paths) 19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 20 version_file, re.M) 21 if version_match: 22 return version_match.group(1) 23 raise RuntimeError("Unable to find version string.") 24 25 26 install_requires = [ 27 'botocore==1.15.10', 28 'docutils>=0.10,<0.16', 29 'rsa>=3.1.2,<=3.5.0', 30 's3transfer>=0.3.0,<0.4.0', 31 'PyYAML>=3.10,<5.3', 32 ] 33 34 35 if sys.version_info[:2] == (3, 4): 36 install_requires.append('colorama>=0.2.5,<0.4.2') 37 else: 38 install_requires.append('colorama>=0.2.5,<0.4.4') 39 40 41 setup_options = dict( 42 name='awscli', 43 version=find_version("awscli", "__init__.py"), 44 description='Universal Command Line Environment for AWS.', 45 long_description=read('README.rst'), 46 author='Amazon Web Services', 47 url='http://aws.amazon.com/cli/', 48 scripts=['bin/aws', 'bin/aws.cmd', 49 'bin/aws_completer', 'bin/aws_zsh_completer.sh', 50 'bin/aws_bash_completer'], 51 packages=find_packages(exclude=['tests*']), 52 package_data={'awscli': ['data/*.json', 'examples/*/*.rst', 53 'examples/*/*.txt', 'examples/*/*/*.txt', 54 'examples/*/*/*.rst', 'topics/*.rst', 55 'topics/*.json']}, 56 install_requires=install_requires, 57 extras_require={}, 58 license="Apache License 2.0", 59 classifiers=[ 60 'Development Status :: 5 - Production/Stable', 61 'Intended Audience :: Developers', 62 'Intended Audience :: System Administrators', 63 'Natural Language :: English', 64 'License :: OSI Approved :: Apache Software License', 65 'Programming Language :: Python', 66 'Programming Language :: Python :: 2', 67 'Programming Language :: Python :: 2.7', 68 'Programming Language :: Python :: 3', 69 'Programming Language :: Python :: 3.4', 70 'Programming Language :: Python :: 3.5', 71 'Programming Language :: Python :: 3.6', 72 'Programming Language :: Python :: 3.7', 73 'Programming Language :: Python :: 3.8', 74 ], 75 ) 76 77 78 if 'py2exe' in sys.argv: 79 # This will actually give us a py2exe command. 80 import py2exe 81 # And we have some py2exe specific options. 82 setup_options['options'] = { 83 'py2exe': { 84 'optimize': 0, 85 'skip_archive': True, 86 'dll_excludes': ['crypt32.dll'], 87 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser', 88 'awscli', 'ConfigParser', 'xml.etree', 'pipes'], 89 } 90 } 91 setup_options['console'] = ['bin/aws'] 92 93 94 setup(**setup_options) 95 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,13 +28,14 @@ 'docutils>=0.10,<0.16', 'rsa>=3.1.2,<=3.5.0', 's3transfer>=0.3.0,<0.4.0', - 'PyYAML>=3.10,<5.3', ] if sys.version_info[:2] == (3, 4): + install_requires.append('PyYAML>=3.10,<5.3') install_requires.append('colorama>=0.2.5,<0.4.2') else: + install_requires.append('PyYAML>=3.10,<5.4') install_requires.append('colorama>=0.2.5,<0.4.4')
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,13 +28,14 @@\n 'docutils>=0.10,<0.16',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.3.0,<0.4.0',\n- 'PyYAML>=3.10,<5.3',\n ]\n \n \n if sys.version_info[:2] == (3, 4):\n+ install_requires.append('PyYAML>=3.10,<5.3')\n install_requires.append('colorama>=0.2.5,<0.4.2')\n else:\n+ install_requires.append('PyYAML>=3.10,<5.4')\n install_requires.append('colorama>=0.2.5,<0.4.4')\n", "issue": "Add support for PyYAML 5.3\nCloses: https://github.com/aws/aws-cli/issues/4828\r\nSigned-off-by: Igor Raits <[email protected]>\r\n\r\n*Issue #, if available:*\r\n\r\n*Description of changes:*\r\n\r\n\r\nBy submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ninstall_requires = [\n 'botocore==1.15.10',\n 'docutils>=0.10,<0.16',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.3.0,<0.4.0',\n 'PyYAML>=3.10,<5.3',\n]\n\n\nif sys.version_info[:2] == (3, 4):\n install_requires.append('colorama>=0.2.5,<0.4.2')\nelse:\n install_requires.append('colorama>=0.2.5,<0.4.4')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=install_requires,\n extras_require={},\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]}
1,562
197
gh_patches_debug_19776
rasdani/github-patches
git_diff
azavea__raster-vision-1484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Local runner should write makefile to temporary dir Instead, it writes it to the `root_uri` which might be an S3 URI, and `make`, which is used by the local runner cannot handle that. Makefile error when `root_uri` is an S3 path ## 🐛 Bug When running training command and having `root_uri` set to an S3 folder, this error shows up: ``` make: s3://<random_bucket_name>/predictions/Makefile: No such file or directory make: *** No rule to make target 's3://<random_bucket_name>/predictions/Makefile'. Stop. ``` This error disappears when `root_uri` is a local path. AWS config is right as it is able to read and write the files. ## To Reproduce Steps to reproduce the behavior: 1. I ran the following command inside the container: `python -m rastervision.pipeline.cli run local code/local_exp.py -a raw_uri s3://<random_bucket_name>/datafortesting/data/ -a root_uri s3://<random_bucket_name>/predictions -a test False` <!-- Please provide the command executed, source of the get_config() function, error messages, and/or full stack traces if at all possible --> ## Expected behavior It should run normally like it is running when `root_uri` is a local path. ## Environment Running with docker. **Image**: quay.io/azavea/raster-vision:pytorch-v0.13.1 ## Additional context This might be a relevant issue: #991 </issue> <code> [start of rastervision_pipeline/rastervision/pipeline/runner/local_runner.py] 1 import sys 2 from os.path import dirname, join 3 from subprocess import Popen 4 5 from rastervision.pipeline.file_system import str_to_file 6 from rastervision.pipeline.runner.runner import Runner 7 from rastervision.pipeline.utils import terminate_at_exit 8 9 LOCAL = 'local' 10 11 12 class LocalRunner(Runner): 13 """Runs each command locally using different processes for each command/split. 14 15 This is implemented by generating a Makefile and then running it using make. 16 """ 17 18 def run(self, 19 cfg_json_uri, 20 pipeline, 21 commands, 22 num_splits=1, 23 pipeline_run_name: str = 'raster-vision'): 24 num_commands = 0 25 for command in commands: 26 if command in pipeline.split_commands and num_splits > 1: 27 num_commands += num_splits 28 else: 29 num_commands += 1 30 31 makefile = '.PHONY: ' 32 makefile += ' '.join([str(ci) for ci in range(num_commands)]) 33 makefile += '\n\n' 34 35 makefile += 'all: ' 36 makefile += ' '.join([str(ci) for ci in range(num_commands)]) 37 makefile += '\n\n' 38 39 prev_command_inds = [] 40 curr_command_ind = 0 41 for command in commands: 42 43 curr_command_inds = [] 44 if command in pipeline.split_commands and num_splits > 1: 45 for split_ind in range(num_splits): 46 makefile += '{}: '.format(curr_command_ind) 47 makefile += ' '.join([str(ci) for ci in prev_command_inds]) 48 makefile += '\n' 49 invocation = ( 50 'python -m rastervision.pipeline.cli run_command ' 51 '{} {} --split-ind {} --num-splits {}').format( 52 cfg_json_uri, command, split_ind, num_splits) 53 makefile += '\t{}\n\n'.format(invocation) 54 curr_command_inds.append(curr_command_ind) 55 curr_command_ind += 1 56 else: 57 makefile += '{}: '.format(curr_command_ind) 58 makefile += ' '.join([str(ci) for ci in prev_command_inds]) 59 makefile += '\n' 60 invocation = ( 61 'python -m rastervision.pipeline.cli run_command ' 62 '{} {}'.format(cfg_json_uri, command)) 63 makefile += '\t{}\n\n'.format(invocation) 64 curr_command_inds.append(curr_command_ind) 65 curr_command_ind += 1 66 67 prev_command_inds = curr_command_inds 68 69 makefile_path = join(dirname(cfg_json_uri), 'Makefile') 70 str_to_file(makefile, makefile_path) 71 process = Popen(['make', '-j', '-f', makefile_path]) 72 terminate_at_exit(process) 73 exitcode = process.wait() 74 if exitcode != 0: 75 sys.exit(exitcode) 76 else: 77 return 0 78 [end of rastervision_pipeline/rastervision/pipeline/runner/local_runner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py --- a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py +++ b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py @@ -2,7 +2,7 @@ from os.path import dirname, join from subprocess import Popen -from rastervision.pipeline.file_system import str_to_file +from rastervision.pipeline.file_system import str_to_file, download_if_needed from rastervision.pipeline.runner.runner import Runner from rastervision.pipeline.utils import terminate_at_exit @@ -68,7 +68,8 @@ makefile_path = join(dirname(cfg_json_uri), 'Makefile') str_to_file(makefile, makefile_path) - process = Popen(['make', '-j', '-f', makefile_path]) + makefile_path_local = download_if_needed(makefile_path) + process = Popen(['make', '-j', '-f', makefile_path_local]) terminate_at_exit(process) exitcode = process.wait() if exitcode != 0:
{"golden_diff": "diff --git a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py\n--- a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py\n+++ b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py\n@@ -2,7 +2,7 @@\n from os.path import dirname, join\n from subprocess import Popen\n \n-from rastervision.pipeline.file_system import str_to_file\n+from rastervision.pipeline.file_system import str_to_file, download_if_needed\n from rastervision.pipeline.runner.runner import Runner\n from rastervision.pipeline.utils import terminate_at_exit\n \n@@ -68,7 +68,8 @@\n \n makefile_path = join(dirname(cfg_json_uri), 'Makefile')\n str_to_file(makefile, makefile_path)\n- process = Popen(['make', '-j', '-f', makefile_path])\n+ makefile_path_local = download_if_needed(makefile_path)\n+ process = Popen(['make', '-j', '-f', makefile_path_local])\n terminate_at_exit(process)\n exitcode = process.wait()\n if exitcode != 0:\n", "issue": "Local runner should write makefile to temporary dir\nInstead, it writes it to the `root_uri` which might be an S3 URI, and `make`, which is used by the local runner cannot handle that.\nMakefile error when `root_uri` is an S3 path\n## \ud83d\udc1b Bug\r\nWhen running training command and having `root_uri` set to an S3 folder, this error shows up:\r\n```\r\nmake: s3://<random_bucket_name>/predictions/Makefile: No such file or directory\r\nmake: *** No rule to make target 's3://<random_bucket_name>/predictions/Makefile'. Stop.\r\n```\r\n\r\nThis error disappears when `root_uri` is a local path. AWS config is right as it is able to read and write the files.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. I ran the following command inside the container:\r\n`python -m rastervision.pipeline.cli run local code/local_exp.py -a raw_uri s3://<random_bucket_name>/datafortesting/data/ -a root_uri s3://<random_bucket_name>/predictions -a test False`\r\n\r\n<!-- Please provide the command executed, source of the get_config() function, error messages, and/or full stack traces if at all possible -->\r\n\r\n## Expected behavior\r\n\r\nIt should run normally like it is running when `root_uri` is a local path.\r\n\r\n## Environment\r\n\r\nRunning with docker. **Image**: quay.io/azavea/raster-vision:pytorch-v0.13.1\r\n\r\n## Additional context\r\n\r\nThis might be a relevant issue: #991 \r\n\n", "before_files": [{"content": "import sys\nfrom os.path import dirname, join\nfrom subprocess import Popen\n\nfrom rastervision.pipeline.file_system import str_to_file\nfrom rastervision.pipeline.runner.runner import Runner\nfrom rastervision.pipeline.utils import terminate_at_exit\n\nLOCAL = 'local'\n\n\nclass LocalRunner(Runner):\n \"\"\"Runs each command locally using different processes for each command/split.\n\n This is implemented by generating a Makefile and then running it using make.\n \"\"\"\n\n def run(self,\n cfg_json_uri,\n pipeline,\n commands,\n num_splits=1,\n pipeline_run_name: str = 'raster-vision'):\n num_commands = 0\n for command in commands:\n if command in pipeline.split_commands and num_splits > 1:\n num_commands += num_splits\n else:\n num_commands += 1\n\n makefile = '.PHONY: '\n makefile += ' '.join([str(ci) for ci in range(num_commands)])\n makefile += '\\n\\n'\n\n makefile += 'all: '\n makefile += ' '.join([str(ci) for ci in range(num_commands)])\n makefile += '\\n\\n'\n\n prev_command_inds = []\n curr_command_ind = 0\n for command in commands:\n\n curr_command_inds = []\n if command in pipeline.split_commands and num_splits > 1:\n for split_ind in range(num_splits):\n makefile += '{}: '.format(curr_command_ind)\n makefile += ' '.join([str(ci) for ci in prev_command_inds])\n makefile += '\\n'\n invocation = (\n 'python -m rastervision.pipeline.cli run_command '\n '{} {} --split-ind {} --num-splits {}').format(\n cfg_json_uri, command, split_ind, num_splits)\n makefile += '\\t{}\\n\\n'.format(invocation)\n curr_command_inds.append(curr_command_ind)\n curr_command_ind += 1\n else:\n makefile += '{}: '.format(curr_command_ind)\n makefile += ' '.join([str(ci) for ci in prev_command_inds])\n makefile += '\\n'\n invocation = (\n 'python -m rastervision.pipeline.cli run_command '\n '{} {}'.format(cfg_json_uri, command))\n makefile += '\\t{}\\n\\n'.format(invocation)\n curr_command_inds.append(curr_command_ind)\n curr_command_ind += 1\n\n prev_command_inds = curr_command_inds\n\n makefile_path = join(dirname(cfg_json_uri), 'Makefile')\n str_to_file(makefile, makefile_path)\n process = Popen(['make', '-j', '-f', makefile_path])\n terminate_at_exit(process)\n exitcode = process.wait()\n if exitcode != 0:\n sys.exit(exitcode)\n else:\n return 0\n", "path": "rastervision_pipeline/rastervision/pipeline/runner/local_runner.py"}]}
1,651
268
gh_patches_debug_26415
rasdani/github-patches
git_diff
bokeh__bokeh-9163
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update dataset for parallel coords plot examples The example currently does this: ``` df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv") ``` Which is not really great. We should add a data set to `sampledata` and use that. </issue> <code> [start of examples/custom/parallel_plot/parallel_plot.py] 1 import numpy as np 2 import pandas as pd 3 4 from bokeh.plotting import figure 5 from bokeh.layouts import column 6 from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis, 7 LinearColorMapper, MultiLine, 8 FixedTicker, BasicTickFormatter, FuncTickFormatter) 9 10 from parallel_selection_tool import ParallelSelectionTool 11 from parallel_reset import ParallelResetTool 12 13 14 def parallel_plot(df, color=None, palette=None): 15 """From a dataframe create a parallel coordinate plot 16 """ 17 npts = df.shape[0] 18 ndims = len(df.columns) 19 20 if color is None: 21 color = np.ones(npts) 22 if palette is None: 23 palette = ['#ff0000'] 24 25 cmap = LinearColorMapper(high=color.min(), 26 low=color.max(), 27 palette=palette) 28 29 data_source = ColumnDataSource(dict( 30 xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(), 31 ys=np.array((df-df.min())/(df.max()-df.min())).tolist(), 32 color=color)) 33 34 p = figure(x_range=(-1, ndims), 35 y_range=(0, 1), 36 width=1000, 37 tools="pan, box_zoom") 38 39 # Create x axis ticks from columns contained in dataframe 40 fixed_x_ticks = FixedTicker( 41 ticks=np.arange(ndims), minor_ticks=[]) 42 formatter_x_ticks = FuncTickFormatter( 43 code="return columns[index]", args={"columns": df.columns}) 44 p.xaxis.ticker = fixed_x_ticks 45 p.xaxis.formatter = formatter_x_ticks 46 47 p.yaxis.visible = False 48 p.y_range.start = 0 49 p.y_range.end = 1 50 p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis 51 p.xgrid.visible = False 52 p.ygrid.visible = False 53 54 # Create extra y axis for each dataframe column 55 tickformatter = BasicTickFormatter(precision=1) 56 for index, col in enumerate(df.columns): 57 start = df[col].min() 58 end = df[col].max() 59 bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start) 60 bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end) 61 p.extra_y_ranges.update( 62 {col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))}) 63 64 fixedticks = FixedTicker( 65 ticks=np.linspace(start, end, 8), minor_ticks=[]) 66 67 p.add_layout(LinearAxis(fixed_location=index, y_range_name=col, 68 ticker=fixedticks, formatter=tickformatter), 'right') 69 70 # create the data renderer ( MultiLine ) 71 # specify selected and non selected style 72 non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5) 73 74 selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1) 75 76 parallel_renderer = p.multi_line( 77 xs="xs", ys="ys", source=data_source, **non_selected_line_style) 78 79 # Specify selection style 80 selected_lines = MultiLine(**selected_line_style) 81 82 # Specify non selection style 83 nonselected_lines = MultiLine(**non_selected_line_style) 84 85 parallel_renderer.selection_glyph = selected_lines 86 parallel_renderer.nonselection_glyph = nonselected_lines 87 p.y_range.start = p.y_range.bounds[0] 88 p.y_range.end = p.y_range.bounds[1] 89 90 rect_source = ColumnDataSource({ 91 'x': [], 'y': [], 'width': [], 'height': [] 92 }) 93 94 # add rectangle selections 95 selection_renderer = p.rect(x='x', y='y', width='width', height='height', 96 source=rect_source, 97 fill_alpha=0.7, fill_color='#009933') 98 selection_tool = ParallelSelectionTool( 99 renderer_select=selection_renderer, renderer_data=parallel_renderer, 100 box_width=10) 101 # custom resets (reset only axes not selections) 102 reset_axes = ParallelResetTool() 103 104 # add tools and activate selection ones 105 p.add_tools(selection_tool, reset_axes) 106 p.toolbar.active_drag = selection_tool 107 return p 108 109 if __name__ == '__main__': 110 from bokeh.palettes import Viridis256 111 from bokeh.io import show 112 df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv") 113 p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256) 114 div = Div(text="Select up and down column grid lines to define filters. Double click a filter to reset it.") 115 show(column(div, p)) 116 [end of examples/custom/parallel_plot/parallel_plot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/custom/parallel_plot/parallel_plot.py b/examples/custom/parallel_plot/parallel_plot.py --- a/examples/custom/parallel_plot/parallel_plot.py +++ b/examples/custom/parallel_plot/parallel_plot.py @@ -1,11 +1,11 @@ import numpy as np -import pandas as pd from bokeh.plotting import figure from bokeh.layouts import column from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis, LinearColorMapper, MultiLine, FixedTicker, BasicTickFormatter, FuncTickFormatter) +from bokeh.sampledata.autompg import autompg_clean as df from parallel_selection_tool import ParallelSelectionTool from parallel_reset import ParallelResetTool @@ -109,7 +109,9 @@ if __name__ == '__main__': from bokeh.palettes import Viridis256 from bokeh.io import show - df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv") + del df['origin'] + del df['mfr'] + del df['name'] p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256) div = Div(text="Select up and down column grid lines to define filters. Double click a filter to reset it.") show(column(div, p))
{"golden_diff": "diff --git a/examples/custom/parallel_plot/parallel_plot.py b/examples/custom/parallel_plot/parallel_plot.py\n--- a/examples/custom/parallel_plot/parallel_plot.py\n+++ b/examples/custom/parallel_plot/parallel_plot.py\n@@ -1,11 +1,11 @@\n import numpy as np\n-import pandas as pd\n \n from bokeh.plotting import figure\n from bokeh.layouts import column\n from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis,\n LinearColorMapper, MultiLine,\n FixedTicker, BasicTickFormatter, FuncTickFormatter)\n+from bokeh.sampledata.autompg import autompg_clean as df\n \n from parallel_selection_tool import ParallelSelectionTool\n from parallel_reset import ParallelResetTool\n@@ -109,7 +109,9 @@\n if __name__ == '__main__':\n from bokeh.palettes import Viridis256\n from bokeh.io import show\n- df = pd.read_csv(\"https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv\")\n+ del df['origin']\n+ del df['mfr']\n+ del df['name']\n p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256)\n div = Div(text=\"Select up and down column grid lines to define filters. Double click a filter to reset it.\")\n show(column(div, p))\n", "issue": "Update dataset for parallel coords plot examples\nThe example currently does this:\r\n```\r\ndf = pd.read_csv(\"https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv\")\r\n```\r\n\r\nWhich is not really great. We should add a data set to `sampledata` and use that. \r\n\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\n\nfrom bokeh.plotting import figure\nfrom bokeh.layouts import column\nfrom bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis,\n LinearColorMapper, MultiLine,\n FixedTicker, BasicTickFormatter, FuncTickFormatter)\n\nfrom parallel_selection_tool import ParallelSelectionTool\nfrom parallel_reset import ParallelResetTool\n\n\ndef parallel_plot(df, color=None, palette=None):\n \"\"\"From a dataframe create a parallel coordinate plot\n \"\"\"\n npts = df.shape[0]\n ndims = len(df.columns)\n\n if color is None:\n color = np.ones(npts)\n if palette is None:\n palette = ['#ff0000']\n\n cmap = LinearColorMapper(high=color.min(),\n low=color.max(),\n palette=palette)\n\n data_source = ColumnDataSource(dict(\n xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(),\n ys=np.array((df-df.min())/(df.max()-df.min())).tolist(),\n color=color))\n\n p = figure(x_range=(-1, ndims),\n y_range=(0, 1),\n width=1000,\n tools=\"pan, box_zoom\")\n\n # Create x axis ticks from columns contained in dataframe\n fixed_x_ticks = FixedTicker(\n ticks=np.arange(ndims), minor_ticks=[])\n formatter_x_ticks = FuncTickFormatter(\n code=\"return columns[index]\", args={\"columns\": df.columns})\n p.xaxis.ticker = fixed_x_ticks\n p.xaxis.formatter = formatter_x_ticks\n\n p.yaxis.visible = False\n p.y_range.start = 0\n p.y_range.end = 1\n p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis\n p.xgrid.visible = False\n p.ygrid.visible = False\n\n # Create extra y axis for each dataframe column\n tickformatter = BasicTickFormatter(precision=1)\n for index, col in enumerate(df.columns):\n start = df[col].min()\n end = df[col].max()\n bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start)\n bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end)\n p.extra_y_ranges.update(\n {col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))})\n\n fixedticks = FixedTicker(\n ticks=np.linspace(start, end, 8), minor_ticks=[])\n\n p.add_layout(LinearAxis(fixed_location=index, y_range_name=col,\n ticker=fixedticks, formatter=tickformatter), 'right')\n\n # create the data renderer ( MultiLine )\n # specify selected and non selected style\n non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5)\n\n selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1)\n\n parallel_renderer = p.multi_line(\n xs=\"xs\", ys=\"ys\", source=data_source, **non_selected_line_style)\n\n # Specify selection style\n selected_lines = MultiLine(**selected_line_style)\n\n # Specify non selection style\n nonselected_lines = MultiLine(**non_selected_line_style)\n\n parallel_renderer.selection_glyph = selected_lines\n parallel_renderer.nonselection_glyph = nonselected_lines\n p.y_range.start = p.y_range.bounds[0]\n p.y_range.end = p.y_range.bounds[1]\n\n rect_source = ColumnDataSource({\n 'x': [], 'y': [], 'width': [], 'height': []\n })\n\n # add rectangle selections\n selection_renderer = p.rect(x='x', y='y', width='width', height='height',\n source=rect_source,\n fill_alpha=0.7, fill_color='#009933')\n selection_tool = ParallelSelectionTool(\n renderer_select=selection_renderer, renderer_data=parallel_renderer,\n box_width=10)\n # custom resets (reset only axes not selections)\n reset_axes = ParallelResetTool()\n\n # add tools and activate selection ones\n p.add_tools(selection_tool, reset_axes)\n p.toolbar.active_drag = selection_tool\n return p\n\nif __name__ == '__main__':\n from bokeh.palettes import Viridis256\n from bokeh.io import show\n df = pd.read_csv(\"https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv\")\n p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256)\n div = Div(text=\"Select up and down column grid lines to define filters. Double click a filter to reset it.\")\n show(column(div, p))\n", "path": "examples/custom/parallel_plot/parallel_plot.py"}]}
1,905
303
gh_patches_debug_5990
rasdani/github-patches
git_diff
googleapis__python-bigquery-672
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError in `resource_name_to_date()` samples fixture See this samples [test run](https://source.cloud.google.com/results/invocations/e5c424d5-84a6-4505-ae44-3bc70fa94e44/targets/cloud-devrel%2Fclient-libraries%2Fpython%2Fgoogleapis%2Fpython-bigquery%2Fsamples%2Fpython3.6%2Fpresubmit/log). A bug in [this fixture](https://github.com/googleapis/python-bigquery/blob/bd7dbdae5c972b16bafc53c67911eeaa3255a880/samples/snippets/conftest.py#L33-L36) made all of them fail. > AttributeError: module 'datetime' has no attribute 'strptime' I wonder how this got in in the first place? </issue> <code> [start of samples/snippets/conftest.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import datetime 16 import random 17 18 from google.cloud import bigquery 19 import pytest 20 21 22 RESOURCE_PREFIX = "python_bigquery_samples_snippets" 23 RESOURCE_DATE_FORMAT = "%Y%m%d_%H%M%S" 24 RESOURCE_DATE_LENGTH = 4 + 2 + 2 + 1 + 2 + 2 + 2 25 26 27 def resource_prefix() -> str: 28 timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT) 29 random_string = hex(random.randrange(1000000))[2:] 30 return f"{RESOURCE_PREFIX}_{timestamp}_{random_string}" 31 32 33 def resource_name_to_date(resource_name: str): 34 start_date = len(RESOURCE_PREFIX) + 1 35 date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH] 36 return datetime.strptime(date_string, RESOURCE_DATE_FORMAT) 37 38 39 @pytest.fixture(scope="session", autouse=True) 40 def cleanup_datasets(bigquery_client: bigquery.Client): 41 yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1) 42 for dataset in bigquery_client.list_datasets(): 43 if ( 44 dataset.dataset_id.startswith(RESOURCE_PREFIX) 45 and resource_name_to_date(dataset.dataset_id) < yesterday 46 ): 47 bigquery_client.delete_dataset( 48 dataset, delete_contents=True, not_found_ok=True 49 ) 50 51 52 @pytest.fixture(scope="session") 53 def bigquery_client(): 54 bigquery_client = bigquery.Client() 55 return bigquery_client 56 57 58 @pytest.fixture(scope="session") 59 def project_id(bigquery_client): 60 return bigquery_client.project 61 62 63 @pytest.fixture(scope="session") 64 def dataset_id(bigquery_client: bigquery.Client, project_id: str): 65 dataset_id = resource_prefix() 66 full_dataset_id = f"{project_id}.{dataset_id}" 67 dataset = bigquery.Dataset(full_dataset_id) 68 bigquery_client.create_dataset(dataset) 69 yield dataset_id 70 bigquery_client.delete_dataset(dataset, delete_contents=True, not_found_ok=True) 71 72 73 @pytest.fixture 74 def bigquery_client_patch(monkeypatch, bigquery_client): 75 monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client) 76 [end of samples/snippets/conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/samples/snippets/conftest.py b/samples/snippets/conftest.py --- a/samples/snippets/conftest.py +++ b/samples/snippets/conftest.py @@ -33,7 +33,7 @@ def resource_name_to_date(resource_name: str): start_date = len(RESOURCE_PREFIX) + 1 date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH] - return datetime.strptime(date_string, RESOURCE_DATE_FORMAT) + return datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT) @pytest.fixture(scope="session", autouse=True)
{"golden_diff": "diff --git a/samples/snippets/conftest.py b/samples/snippets/conftest.py\n--- a/samples/snippets/conftest.py\n+++ b/samples/snippets/conftest.py\n@@ -33,7 +33,7 @@\n def resource_name_to_date(resource_name: str):\n start_date = len(RESOURCE_PREFIX) + 1\n date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]\n- return datetime.strptime(date_string, RESOURCE_DATE_FORMAT)\n+ return datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT)\n \n \n @pytest.fixture(scope=\"session\", autouse=True)\n", "issue": "AttributeError in `resource_name_to_date()` samples fixture\nSee this samples [test run](https://source.cloud.google.com/results/invocations/e5c424d5-84a6-4505-ae44-3bc70fa94e44/targets/cloud-devrel%2Fclient-libraries%2Fpython%2Fgoogleapis%2Fpython-bigquery%2Fsamples%2Fpython3.6%2Fpresubmit/log). A bug in [this fixture](https://github.com/googleapis/python-bigquery/blob/bd7dbdae5c972b16bafc53c67911eeaa3255a880/samples/snippets/conftest.py#L33-L36) made all of them fail.\r\n\r\n> AttributeError: module 'datetime' has no attribute 'strptime'\r\n\r\nI wonder how this got in in the first place?\r\n\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport random\n\nfrom google.cloud import bigquery\nimport pytest\n\n\nRESOURCE_PREFIX = \"python_bigquery_samples_snippets\"\nRESOURCE_DATE_FORMAT = \"%Y%m%d_%H%M%S\"\nRESOURCE_DATE_LENGTH = 4 + 2 + 2 + 1 + 2 + 2 + 2\n\n\ndef resource_prefix() -> str:\n timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT)\n random_string = hex(random.randrange(1000000))[2:]\n return f\"{RESOURCE_PREFIX}_{timestamp}_{random_string}\"\n\n\ndef resource_name_to_date(resource_name: str):\n start_date = len(RESOURCE_PREFIX) + 1\n date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]\n return datetime.strptime(date_string, RESOURCE_DATE_FORMAT)\n\n\[email protected](scope=\"session\", autouse=True)\ndef cleanup_datasets(bigquery_client: bigquery.Client):\n yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)\n for dataset in bigquery_client.list_datasets():\n if (\n dataset.dataset_id.startswith(RESOURCE_PREFIX)\n and resource_name_to_date(dataset.dataset_id) < yesterday\n ):\n bigquery_client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True\n )\n\n\[email protected](scope=\"session\")\ndef bigquery_client():\n bigquery_client = bigquery.Client()\n return bigquery_client\n\n\[email protected](scope=\"session\")\ndef project_id(bigquery_client):\n return bigquery_client.project\n\n\[email protected](scope=\"session\")\ndef dataset_id(bigquery_client: bigquery.Client, project_id: str):\n dataset_id = resource_prefix()\n full_dataset_id = f\"{project_id}.{dataset_id}\"\n dataset = bigquery.Dataset(full_dataset_id)\n bigquery_client.create_dataset(dataset)\n yield dataset_id\n bigquery_client.delete_dataset(dataset, delete_contents=True, not_found_ok=True)\n\n\[email protected]\ndef bigquery_client_patch(monkeypatch, bigquery_client):\n monkeypatch.setattr(bigquery, \"Client\", lambda: bigquery_client)\n", "path": "samples/snippets/conftest.py"}]}
1,461
133
gh_patches_debug_60681
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1830
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Organization view pages result in 500 error Only on stag. I tested several different orgs. ![image](https://cloud.githubusercontent.com/assets/1654485/5165739/e9046902-73e8-11e4-9358-19cd8652c0c8.png) </issue> <code> [start of ckanext-hdx_search/ckanext/hdx_search/plugin.py] 1 import logging, re 2 import ckan.plugins as plugins 3 import ckan.plugins.toolkit as tk 4 import ckan.lib.plugins as lib_plugins 5 6 def convert_country(q): 7 for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}): 8 if re.findall(c['display_name'].lower(),q.lower()): 9 q += ' '+c['name'] 10 return q 11 12 class HDXSearchPlugin(plugins.SingletonPlugin): 13 plugins.implements(plugins.IConfigurer, inherit=False) 14 plugins.implements(plugins.IRoutes, inherit=True) 15 plugins.implements(plugins.ITemplateHelpers, inherit=False) 16 plugins.implements(plugins.IPackageController, inherit=True) 17 18 def update_config(self, config): 19 tk.add_template_directory(config, 'templates') 20 21 def get_helpers(self): 22 return {} 23 24 def before_map(self, map): 25 map.connect('search', '/search', 26 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search') 27 map.connect('simple_search', 28 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search') 29 return map 30 31 def after_map(self, map): 32 map.connect('search', '/search', 33 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search') 34 map.connect('simple_search', 35 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search') 36 return map 37 38 def before_search(self, search_params): 39 search_params['q'] = convert_country(search_params['q']) 40 if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']: 41 search_params['facet.field'].append('vocab_Topics') 42 43 # If indicator flag is set, search only that type 44 if 'ext_indicator' in search_params['extras']: 45 if int(search_params['extras']['ext_indicator']) == 1: 46 search_params['fq'] = search_params['fq'] + ' +extras_indicator:1' 47 elif int(search_params['extras']['ext_indicator']) == 0: 48 search_params['fq'] = search_params[ 49 'fq'] + ' -extras_indicator:1' 50 return search_params 51 52 def after_search(self, search_results, search_params): 53 return search_results 54 55 def before_view(self, pkg_dict): 56 return pkg_dict 57 [end of ckanext-hdx_search/ckanext/hdx_search/plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py --- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py +++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py @@ -36,7 +36,7 @@ return map def before_search(self, search_params): - search_params['q'] = convert_country(search_params['q']) + #search_params['q'] = convert_country(search_params['q']) if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']: search_params['facet.field'].append('vocab_Topics')
{"golden_diff": "diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n@@ -36,7 +36,7 @@\n return map\n \n def before_search(self, search_params):\n- search_params['q'] = convert_country(search_params['q'])\n+ #search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n", "issue": "Organization view pages result in 500 error\nOnly on stag. I tested several different orgs. \n\n![image](https://cloud.githubusercontent.com/assets/1654485/5165739/e9046902-73e8-11e4-9358-19cd8652c0c8.png)\n\n", "before_files": [{"content": "import logging, re\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\ndef convert_country(q):\n for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):\n if re.findall(c['display_name'].lower(),q.lower()):\n q += ' '+c['name']\n return q\n\nclass HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def before_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def after_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def before_search(self, search_params):\n search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n\n # If indicator flag is set, search only that type\n if 'ext_indicator' in search_params['extras']:\n if int(search_params['extras']['ext_indicator']) == 1:\n search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'\n elif int(search_params['extras']['ext_indicator']) == 0:\n search_params['fq'] = search_params[\n 'fq'] + ' -extras_indicator:1'\n return search_params\n\n def after_search(self, search_results, search_params):\n return search_results\n\n def before_view(self, pkg_dict):\n return pkg_dict\n", "path": "ckanext-hdx_search/ckanext/hdx_search/plugin.py"}]}
1,281
169
gh_patches_debug_5673
rasdani/github-patches
git_diff
scikit-hep__pyhf-1546
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Different ordering of channels between model.config.channels and mode.config.channel_nbins # Description We've recently observed that the ordering of channels outputted from `model.config.channels` differs from the order obtained with `model.config.channel_nbins`. This isn't really a bug, but more a quirk which we thought would be worth bringing to the attention of the developers. We ran into some issues when breaking up the `expected_data` list into individual channels by looping through the `model.config.channel_nbins` ordering, rather than the `model.config.channels` ordering (the `model.config.channels` order matches that of the model). Hopefully this issue helps save another user some time in the future, at very least. # Expected Behavior We expected that the order of the channels in the two dictionaries from `model.config.channels` and `model.config.channel_nbins` would be the same. # Actual Behavior The ordering of the channels is different. As an example, we are generating workspaces using two categories, and we're separating our data by year (2016, 2017, and 2018). This gives us six channels total. The outputs are: ``` (Pdb) model.config.channels ['vbf_channel_16_high_cat', 'vbf_channel_16_low_cat', 'vbf_channel_17_high_cat', 'vbf_channel_17_low_cat', 'vbf_channel_18_high_cat', 'vbf_channel_18_low_cat'] (Pdb) model.config.channel_nbins {'vbf_channel_16_low_cat': 12, 'vbf_channel_16_high_cat': 18, 'vbf_channel_17_low_cat': 12, 'vbf_channel_17_high_cat': 18, 'vbf_channel_18_low_cat': 12, 'vbf_channel_18_high_cat': 18} ``` I believe that `model.config.channels` is possibly re-ordering the channels so that the names are in alphabetical order. I have not confirmed this, though. The workspace .json file is filled with the ordering produced by `model.config.channel_nbins`. # Steps to Reproduce I'm using pyhf version 0.6.2 along with python 3.8.8. I can make a dummy workspace for this issue, but I thought that since this is a pretty small issue, it might be overkill. Please let me know if this would be helpful, though. # Checklist - [ ] Run `git fetch` to get the most up to date version of `master` - [X] Searched through existing Issues to confirm this is not a duplicate issue - [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue Thanks! -Rachel </issue> <code> [start of src/pyhf/mixins.py] 1 import logging 2 3 log = logging.getLogger(__name__) 4 5 6 class _ChannelSummaryMixin: 7 """ 8 A mixin that provides summary data of the provided channels. 9 10 This mixin will forward all other information to other classes defined in the Child class. 11 12 Args: 13 **channels: A list of channels to provide summary information about. Follows the `defs.json#/definitions/channel` schema. 14 """ 15 16 def __init__(self, *args, **kwargs): 17 channels = kwargs.pop('channels') 18 super().__init__(*args, **kwargs) 19 self.channels = [] 20 self.samples = [] 21 self.parameters = [] 22 self.modifiers = [] 23 # keep track of the width of each channel (how many bins) 24 self.channel_nbins = {} 25 # need to keep track in which order we added the constraints 26 # so that we can generate correctly-ordered data 27 for channel in channels: 28 self.channels.append(channel['name']) 29 self.channel_nbins[channel['name']] = len(channel['samples'][0]['data']) 30 for sample in channel['samples']: 31 self.samples.append(sample['name']) 32 for modifier_def in sample['modifiers']: 33 self.parameters.append(modifier_def['name']) 34 self.modifiers.append( 35 ( 36 modifier_def['name'], # mod name 37 modifier_def['type'], # mod type 38 ) 39 ) 40 41 self.channels = sorted(list(set(self.channels))) 42 self.samples = sorted(list(set(self.samples))) 43 self.parameters = sorted(list(set(self.parameters))) 44 self.modifiers = sorted(list(set(self.modifiers))) 45 46 self.channel_slices = {} 47 begin = 0 48 for c in self.channels: 49 end = begin + self.channel_nbins[c] 50 self.channel_slices[c] = slice(begin, end) 51 begin = end 52 [end of src/pyhf/mixins.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyhf/mixins.py b/src/pyhf/mixins.py --- a/src/pyhf/mixins.py +++ b/src/pyhf/mixins.py @@ -42,6 +42,9 @@ self.samples = sorted(list(set(self.samples))) self.parameters = sorted(list(set(self.parameters))) self.modifiers = sorted(list(set(self.modifiers))) + self.channel_nbins = { + channel: self.channel_nbins[channel] for channel in self.channels + } self.channel_slices = {} begin = 0
{"golden_diff": "diff --git a/src/pyhf/mixins.py b/src/pyhf/mixins.py\n--- a/src/pyhf/mixins.py\n+++ b/src/pyhf/mixins.py\n@@ -42,6 +42,9 @@\n self.samples = sorted(list(set(self.samples)))\n self.parameters = sorted(list(set(self.parameters)))\n self.modifiers = sorted(list(set(self.modifiers)))\n+ self.channel_nbins = {\n+ channel: self.channel_nbins[channel] for channel in self.channels\n+ }\n \n self.channel_slices = {}\n begin = 0\n", "issue": "Different ordering of channels between model.config.channels and mode.config.channel_nbins\n# Description\r\n\r\nWe've recently observed that the ordering of channels outputted from `model.config.channels` differs from the order obtained with `model.config.channel_nbins`. This isn't really a bug, but more a quirk which we thought would be worth bringing to the attention of the developers. We ran into some issues when breaking up the `expected_data` list into individual channels by looping through the `model.config.channel_nbins` ordering, rather than the `model.config.channels` ordering (the `model.config.channels` order matches that of the model). Hopefully this issue helps save another user some time in the future, at very least. \r\n\r\n# Expected Behavior\r\n\r\nWe expected that the order of the channels in the two dictionaries from `model.config.channels` and `model.config.channel_nbins` would be the same. \r\n\r\n# Actual Behavior\r\n\r\nThe ordering of the channels is different. As an example, we are generating workspaces using two categories, and we're separating our data by year (2016, 2017, and 2018). This gives us six channels total. The outputs are: \r\n```\r\n(Pdb) model.config.channels\r\n['vbf_channel_16_high_cat', 'vbf_channel_16_low_cat', 'vbf_channel_17_high_cat', 'vbf_channel_17_low_cat', 'vbf_channel_18_high_cat', 'vbf_channel_18_low_cat']\r\n\r\n(Pdb) model.config.channel_nbins\r\n{'vbf_channel_16_low_cat': 12, 'vbf_channel_16_high_cat': 18, 'vbf_channel_17_low_cat': 12, 'vbf_channel_17_high_cat': 18, 'vbf_channel_18_low_cat': 12, 'vbf_channel_18_high_cat': 18}\r\n```\r\nI believe that `model.config.channels` is possibly re-ordering the channels so that the names are in alphabetical order. I have not confirmed this, though. The workspace .json file is filled with the ordering produced by `model.config.channel_nbins`. \r\n\r\n# Steps to Reproduce\r\n\r\nI'm using pyhf version 0.6.2 along with python 3.8.8. \r\nI can make a dummy workspace for this issue, but I thought that since this is a pretty small issue, it might be overkill. Please let me know if this would be helpful, though. \r\n\r\n# Checklist\r\n\r\n- [ ] Run `git fetch` to get the most up to date version of `master`\r\n- [X] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\r\n\r\nThanks! -Rachel \r\n\n", "before_files": [{"content": "import logging\n\nlog = logging.getLogger(__name__)\n\n\nclass _ChannelSummaryMixin:\n \"\"\"\n A mixin that provides summary data of the provided channels.\n\n This mixin will forward all other information to other classes defined in the Child class.\n\n Args:\n **channels: A list of channels to provide summary information about. Follows the `defs.json#/definitions/channel` schema.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n channels = kwargs.pop('channels')\n super().__init__(*args, **kwargs)\n self.channels = []\n self.samples = []\n self.parameters = []\n self.modifiers = []\n # keep track of the width of each channel (how many bins)\n self.channel_nbins = {}\n # need to keep track in which order we added the constraints\n # so that we can generate correctly-ordered data\n for channel in channels:\n self.channels.append(channel['name'])\n self.channel_nbins[channel['name']] = len(channel['samples'][0]['data'])\n for sample in channel['samples']:\n self.samples.append(sample['name'])\n for modifier_def in sample['modifiers']:\n self.parameters.append(modifier_def['name'])\n self.modifiers.append(\n (\n modifier_def['name'], # mod name\n modifier_def['type'], # mod type\n )\n )\n\n self.channels = sorted(list(set(self.channels)))\n self.samples = sorted(list(set(self.samples)))\n self.parameters = sorted(list(set(self.parameters)))\n self.modifiers = sorted(list(set(self.modifiers)))\n\n self.channel_slices = {}\n begin = 0\n for c in self.channels:\n end = begin + self.channel_nbins[c]\n self.channel_slices[c] = slice(begin, end)\n begin = end\n", "path": "src/pyhf/mixins.py"}]}
1,630
125
gh_patches_debug_12825
rasdani/github-patches
git_diff
fossasia__open-event-server-4403
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Verify Email endpoint doesn’t work. **I'm submitting a ...** (check one with "x") - [X] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server There are two specific errors in the function. - [x] Firstly the email verification state change isn’t saved in db - [x] Response for the view function is missing. </issue> <code> [start of app/api/auth.py] 1 import base64 2 from flask import request, jsonify, abort, make_response, Blueprint 3 from flask_jwt import current_identity as current_user, jwt_required 4 from sqlalchemy.orm.exc import NoResultFound 5 6 from app import get_settings 7 from app.api.helpers.db import save_to_db 8 from app.api.helpers.files import make_frontend_url 9 from app.api.helpers.mail import send_email_with_action 10 11 from app.api.helpers.utilities import get_serializer 12 from app.models.mail import PASSWORD_RESET 13 from app.models.user import User 14 15 auth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth') 16 17 18 @auth_routes.route('/verify-email', methods=['POST']) 19 def verify_email(): 20 token = base64.b64decode(request.json['data']['token']) 21 s = get_serializer() 22 23 try: 24 data = s.loads(token) 25 except Exception: 26 return abort( 27 make_response(jsonify(error="Invalid Token"), 400) 28 ) 29 30 try: 31 user = User.query.filter_by(email=data[0]).one() 32 except Exception: 33 return abort( 34 make_response(jsonify(error="Invalid Token"), 400) 35 ) 36 else: 37 user.is_verified = True 38 39 40 @auth_routes.route('/reset-password', methods=['POST']) 41 def reset_password_post(): 42 email = request.json['data']['email'] 43 44 try: 45 user = User.query.filter_by(email=email).one() 46 except NoResultFound: 47 return abort( 48 make_response(jsonify(error="User not found"), 404) 49 ) 50 else: 51 link = make_frontend_url('/reset-password', {'token': user.reset_password}) 52 send_email_with_action(user, PASSWORD_RESET, app_name=get_settings()['app_name'], link=link) 53 54 return make_response(jsonify(message="Email Sent"), 200) 55 56 57 @auth_routes.route('/reset-password', methods=['PATCH']) 58 def reset_password_patch(): 59 token = request.json['data']['token'] 60 password = request.json['data']['password'] 61 62 try: 63 user = User.query.filter_by(reset_password=token).one() 64 except NoResultFound: 65 return abort( 66 make_response(jsonify(error="User not found"), 404) 67 ) 68 else: 69 user.password = password 70 save_to_db(user) 71 72 return jsonify({ 73 "id": user.id, 74 "email": user.email, 75 "name": user.name if user.get('name') else None 76 }) 77 78 79 @auth_routes.route('/change-password', methods=['POST']) 80 @jwt_required() 81 def change_password(): 82 old_password = request.json['data']['old-password'] 83 new_password = request.json['data']['new-password'] 84 85 try: 86 user = User.query.filter_by(id=current_user.id).one() 87 except NoResultFound: 88 return abort( 89 make_response(jsonify(error="User not found"), 404) 90 ) 91 else: 92 if user.is_correct_password(old_password): 93 94 user.password = new_password 95 save_to_db(user) 96 else: 97 return make_response(jsonify(error="Wrong Password"), 400) 98 99 return jsonify({ 100 "id": user.id, 101 "email": user.email, 102 "name": user.fullname if user.fullname else None, 103 "password_changed": True 104 }) 105 [end of app/api/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/auth.py b/app/api/auth.py --- a/app/api/auth.py +++ b/app/api/auth.py @@ -35,6 +35,8 @@ ) else: user.is_verified = True + save_to_db(user) + return make_response(jsonify(message="Email Verified"), 200) @auth_routes.route('/reset-password', methods=['POST']) @@ -94,7 +96,9 @@ user.password = new_password save_to_db(user) else: - return make_response(jsonify(error="Wrong Password"), 400) + return abort( + make_response(jsonify(error="Wrong Password"), 400) + ) return jsonify({ "id": user.id,
{"golden_diff": "diff --git a/app/api/auth.py b/app/api/auth.py\n--- a/app/api/auth.py\n+++ b/app/api/auth.py\n@@ -35,6 +35,8 @@\n )\n else:\n user.is_verified = True\n+ save_to_db(user)\n+ return make_response(jsonify(message=\"Email Verified\"), 200)\n \n \n @auth_routes.route('/reset-password', methods=['POST'])\n@@ -94,7 +96,9 @@\n user.password = new_password\n save_to_db(user)\n else:\n- return make_response(jsonify(error=\"Wrong Password\"), 400)\n+ return abort(\n+ make_response(jsonify(error=\"Wrong Password\"), 400)\n+ )\n \n return jsonify({\n \"id\": user.id,\n", "issue": " Verify Email endpoint doesn\u2019t work.\n**I'm submitting a ...** (check one with \"x\")\r\n- [X] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nThere are two specific errors in the function.\r\n- [x] Firstly the email verification state change isn\u2019t saved in db\r\n- [x] Response for the view function is missing.\r\n\n", "before_files": [{"content": "import base64\nfrom flask import request, jsonify, abort, make_response, Blueprint\nfrom flask_jwt import current_identity as current_user, jwt_required\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app import get_settings\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_with_action\n\nfrom app.api.helpers.utilities import get_serializer\nfrom app.models.mail import PASSWORD_RESET\nfrom app.models.user import User\n\nauth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth')\n\n\n@auth_routes.route('/verify-email', methods=['POST'])\ndef verify_email():\n token = base64.b64decode(request.json['data']['token'])\n s = get_serializer()\n\n try:\n data = s.loads(token)\n except Exception:\n return abort(\n make_response(jsonify(error=\"Invalid Token\"), 400)\n )\n\n try:\n user = User.query.filter_by(email=data[0]).one()\n except Exception:\n return abort(\n make_response(jsonify(error=\"Invalid Token\"), 400)\n )\n else:\n user.is_verified = True\n\n\n@auth_routes.route('/reset-password', methods=['POST'])\ndef reset_password_post():\n email = request.json['data']['email']\n\n try:\n user = User.query.filter_by(email=email).one()\n except NoResultFound:\n return abort(\n make_response(jsonify(error=\"User not found\"), 404)\n )\n else:\n link = make_frontend_url('/reset-password', {'token': user.reset_password})\n send_email_with_action(user, PASSWORD_RESET, app_name=get_settings()['app_name'], link=link)\n\n return make_response(jsonify(message=\"Email Sent\"), 200)\n\n\n@auth_routes.route('/reset-password', methods=['PATCH'])\ndef reset_password_patch():\n token = request.json['data']['token']\n password = request.json['data']['password']\n\n try:\n user = User.query.filter_by(reset_password=token).one()\n except NoResultFound:\n return abort(\n make_response(jsonify(error=\"User not found\"), 404)\n )\n else:\n user.password = password\n save_to_db(user)\n\n return jsonify({\n \"id\": user.id,\n \"email\": user.email,\n \"name\": user.name if user.get('name') else None\n })\n\n\n@auth_routes.route('/change-password', methods=['POST'])\n@jwt_required()\ndef change_password():\n old_password = request.json['data']['old-password']\n new_password = request.json['data']['new-password']\n\n try:\n user = User.query.filter_by(id=current_user.id).one()\n except NoResultFound:\n return abort(\n make_response(jsonify(error=\"User not found\"), 404)\n )\n else:\n if user.is_correct_password(old_password):\n\n user.password = new_password\n save_to_db(user)\n else:\n return make_response(jsonify(error=\"Wrong Password\"), 400)\n\n return jsonify({\n \"id\": user.id,\n \"email\": user.email,\n \"name\": user.fullname if user.fullname else None,\n \"password_changed\": True\n })\n", "path": "app/api/auth.py"}]}
1,553
171
gh_patches_debug_7577
rasdani/github-patches
git_diff
webkom__lego-24
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add shell pluss and django extensions </issue> <code> [start of lego/settings/base.py] 1 import sys 2 import os 3 BASE_DIR = os.path.dirname(os.path.dirname(__file__)) 4 5 SECRET_KEY = 'This is supersecret' 6 7 TESTING = 'test' in sys.argv # Check if manage.py test has been run 8 9 DEBUG = True 10 TEMPLATE_DEBUG = True 11 ALLOWED_HOSTS = [] 12 13 AUTH_USER_MODEL = 'users.User' 14 15 INSTALLED_APPS = ( 16 'django.contrib.admin', 17 'django.contrib.auth', 18 'django.contrib.contenttypes', 19 'django.contrib.sessions', 20 'django.contrib.messages', 21 'django.contrib.staticfiles', 22 23 'oauth2_provider', 24 'rest_framework', 25 26 'lego.apps.LegoConfig', 27 'lego.users', 28 29 'lego.app.oauth', 30 ) 31 32 AUTHENTICATION_BACKEND = ( 33 'oauth2_provider.backends.OAuth2Backend' 34 ) 35 36 MIDDLEWARE_CLASSES = ( 37 'django.contrib.sessions.middleware.SessionMiddleware', 38 'django.middleware.common.CommonMiddleware', 39 'django.middleware.csrf.CsrfViewMiddleware', 40 'oauth2_provider.middleware.OAuth2TokenMiddleware', 41 'django.contrib.auth.middleware.AuthenticationMiddleware', 42 'django.contrib.messages.middleware.MessageMiddleware', 43 'django.middleware.clickjacking.XFrameOptionsMiddleware', 44 ) 45 46 OAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth.APIApplication' 47 48 ROOT_URLCONF = 'lego.urls' 49 50 WSGI_APPLICATION = 'lego.wsgi.application' 51 52 LANGUAGE_CODE = 'en-us' 53 TIME_ZONE = 'UTC' 54 USE_I18N = True 55 USE_L10N = True 56 USE_TZ = True 57 DEBUG = True 58 59 STATIC_URL = '/static/' 60 STATIC_ROOT = os.path.join(BASE_DIR, '../static') 61 MEDIA_URL = '/uploads/' 62 MEDIA_ROOT = os.path.join(BASE_DIR, '../uploads') 63 64 TEMPLATE_DIRS = ( 65 os.path.join(BASE_DIR, 'templates'), 66 ) 67 [end of lego/settings/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lego/settings/base.py b/lego/settings/base.py --- a/lego/settings/base.py +++ b/lego/settings/base.py @@ -6,6 +6,8 @@ TESTING = 'test' in sys.argv # Check if manage.py test has been run +SHELL_PLUS = "ipython" + DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] @@ -20,6 +22,8 @@ 'django.contrib.messages', 'django.contrib.staticfiles', + 'django_extensions', + 'oauth2_provider', 'rest_framework',
{"golden_diff": "diff --git a/lego/settings/base.py b/lego/settings/base.py\n--- a/lego/settings/base.py\n+++ b/lego/settings/base.py\n@@ -6,6 +6,8 @@\n \n TESTING = 'test' in sys.argv # Check if manage.py test has been run\n \n+SHELL_PLUS = \"ipython\"\n+\n DEBUG = True\n TEMPLATE_DEBUG = True\n ALLOWED_HOSTS = []\n@@ -20,6 +22,8 @@\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n \n+ 'django_extensions',\n+\n 'oauth2_provider',\n 'rest_framework',\n", "issue": "Add shell pluss and django extensions\n\n", "before_files": [{"content": "import sys\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSECRET_KEY = 'This is supersecret'\n\nTESTING = 'test' in sys.argv # Check if manage.py test has been run\n\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\n\nAUTH_USER_MODEL = 'users.User'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'oauth2_provider',\n 'rest_framework',\n\n 'lego.apps.LegoConfig',\n 'lego.users',\n\n 'lego.app.oauth',\n)\n\nAUTHENTICATION_BACKEND = (\n 'oauth2_provider.backends.OAuth2Backend'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'oauth2_provider.middleware.OAuth2TokenMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nOAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth.APIApplication'\n\nROOT_URLCONF = 'lego.urls'\n\nWSGI_APPLICATION = 'lego.wsgi.application'\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDEBUG = True\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, '../static')\nMEDIA_URL = '/uploads/'\nMEDIA_ROOT = os.path.join(BASE_DIR, '../uploads')\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n", "path": "lego/settings/base.py"}]}
1,048
135
gh_patches_debug_25227
rasdani/github-patches
git_diff
Textualize__textual-584
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lazy load Widgets We have a `widgets` module that imports all the widgets. THis allows for easy imports like: ```python from textual.widgets import DataTable, Button ``` The downside is that all widgets are imported even if you only need one, increasing startup time. I think we should be able to do this lazily some how, possibly by implementing a module level `__getattr__`. </issue> <code> [start of src/textual/widgets/__init__.py] 1 from ._footer import Footer 2 from ._header import Header 3 from ._button import Button 4 from ._placeholder import Placeholder 5 from ._static import Static 6 from ._tree_control import TreeControl, TreeClick, TreeNode, NodeID 7 from ._directory_tree import DirectoryTree, FileClick 8 9 __all__ = [ 10 "Button", 11 "DirectoryTree", 12 "FileClick", 13 "Footer", 14 "Header", 15 "Placeholder", 16 "Static", 17 "TreeClick", 18 "TreeControl", 19 "TreeNode", 20 "NodeID", 21 ] 22 [end of src/textual/widgets/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/textual/widgets/__init__.py b/src/textual/widgets/__init__.py --- a/src/textual/widgets/__init__.py +++ b/src/textual/widgets/__init__.py @@ -1,21 +1,41 @@ -from ._footer import Footer -from ._header import Header -from ._button import Button -from ._placeholder import Placeholder -from ._static import Static -from ._tree_control import TreeControl, TreeClick, TreeNode, NodeID -from ._directory_tree import DirectoryTree, FileClick +from __future__ import annotations +from importlib import import_module +import typing +from ..case import camel_to_snake + +if typing.TYPE_CHECKING: + from ..widget import Widget + + +# ⚠️For any new built-in Widget we create, not only we have to add them to the following list, but also to the +# `__init__.pyi` file in this same folder - otherwise text editors and type checkers won't be able to "see" them. __all__ = [ "Button", "DirectoryTree", - "FileClick", "Footer", "Header", "Placeholder", "Static", - "TreeClick", "TreeControl", - "TreeNode", - "NodeID", ] + + +_WIDGETS_LAZY_LOADING_CACHE: dict[str, type[Widget]] = {} + +# Let's decrease startup time by lazy loading our Widgets: +def __getattr__(widget_class: str) -> type[Widget]: + try: + return _WIDGETS_LAZY_LOADING_CACHE[widget_class] + except KeyError: + pass + + if widget_class not in __all__: + raise ImportError(f"Package 'textual.widgets' has no class '{widget_class}'") + + widget_module_path = f"._{camel_to_snake(widget_class)}" + module = import_module(widget_module_path, package="textual.widgets") + class_ = getattr(module, widget_class) + + _WIDGETS_LAZY_LOADING_CACHE[widget_class] = class_ + return class_
{"golden_diff": "diff --git a/src/textual/widgets/__init__.py b/src/textual/widgets/__init__.py\n--- a/src/textual/widgets/__init__.py\n+++ b/src/textual/widgets/__init__.py\n@@ -1,21 +1,41 @@\n-from ._footer import Footer\n-from ._header import Header\n-from ._button import Button\n-from ._placeholder import Placeholder\n-from ._static import Static\n-from ._tree_control import TreeControl, TreeClick, TreeNode, NodeID\n-from ._directory_tree import DirectoryTree, FileClick\n+from __future__ import annotations\n+from importlib import import_module\n+import typing\n \n+from ..case import camel_to_snake\n+\n+if typing.TYPE_CHECKING:\n+ from ..widget import Widget\n+\n+\n+# \u26a0\ufe0fFor any new built-in Widget we create, not only we have to add them to the following list, but also to the\n+# `__init__.pyi` file in this same folder - otherwise text editors and type checkers won't be able to \"see\" them.\n __all__ = [\n \"Button\",\n \"DirectoryTree\",\n- \"FileClick\",\n \"Footer\",\n \"Header\",\n \"Placeholder\",\n \"Static\",\n- \"TreeClick\",\n \"TreeControl\",\n- \"TreeNode\",\n- \"NodeID\",\n ]\n+\n+\n+_WIDGETS_LAZY_LOADING_CACHE: dict[str, type[Widget]] = {}\n+\n+# Let's decrease startup time by lazy loading our Widgets:\n+def __getattr__(widget_class: str) -> type[Widget]:\n+ try:\n+ return _WIDGETS_LAZY_LOADING_CACHE[widget_class]\n+ except KeyError:\n+ pass\n+\n+ if widget_class not in __all__:\n+ raise ImportError(f\"Package 'textual.widgets' has no class '{widget_class}'\")\n+\n+ widget_module_path = f\"._{camel_to_snake(widget_class)}\"\n+ module = import_module(widget_module_path, package=\"textual.widgets\")\n+ class_ = getattr(module, widget_class)\n+\n+ _WIDGETS_LAZY_LOADING_CACHE[widget_class] = class_\n+ return class_\n", "issue": "Lazy load Widgets\nWe have a `widgets` module that imports all the widgets. THis allows for easy imports like:\r\n\r\n```python\r\nfrom textual.widgets import DataTable, Button\r\n```\r\n\r\nThe downside is that all widgets are imported even if you only need one, increasing startup time.\r\n\r\nI think we should be able to do this lazily some how, possibly by implementing a module level `__getattr__`.\n", "before_files": [{"content": "from ._footer import Footer\nfrom ._header import Header\nfrom ._button import Button\nfrom ._placeholder import Placeholder\nfrom ._static import Static\nfrom ._tree_control import TreeControl, TreeClick, TreeNode, NodeID\nfrom ._directory_tree import DirectoryTree, FileClick\n\n__all__ = [\n \"Button\",\n \"DirectoryTree\",\n \"FileClick\",\n \"Footer\",\n \"Header\",\n \"Placeholder\",\n \"Static\",\n \"TreeClick\",\n \"TreeControl\",\n \"TreeNode\",\n \"NodeID\",\n]\n", "path": "src/textual/widgets/__init__.py"}]}
772
459
gh_patches_debug_14294
rasdani/github-patches
git_diff
scikit-image__scikit-image-4172
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> IO: unhandled exception, in case the URL cannot be opened ## Description ![image](https://user-images.githubusercontent.com/30770221/53887634-4a1eb600-3fe8-11e9-97db-f374815eb620.png) ## Way to reproduce ```python def url_to_image(url): """download image from url and return it""" im_mat = io.imread(url) image = cv2.imdecode(im_mat, cv2.IMREAD_COLOR) # return the image return image ``` ## Version information ```python >>> from __future__ import print_function >>> import sys; print(sys.version) 3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 23:09:28) [MSC v.1916 64 bit (AMD64)] >>> import platform; print(platform.platform()) Windows-10-10.0.17134-SP0 >>> import skimage; print("scikit-image version: {}".format(skimage.__version__)) scikit-image version: 0.14.2 >>> import numpy; print("numpy version: {}".format(numpy.__version__)) ``` ```python Traceback (most recent call last): File "C:\Python37\lib\site-packages\skimage\io\util.py", line 28, in file_or_url_context with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: File "C:\Python37\lib\tempfile.py", line 547, in NamedTemporaryFile (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type) File "C:\Python37\lib\tempfile.py", line 258, in _mkstemp_inner fd = _os.open(file, flags, 0o600) OSError: [Errno 22] Invalid argument: 'C:\\Users\\kelvin\\AppData\\Local\\Temp\\tmpegjt2y8r.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "imagecheck.py", line 28, in <module> print(url_to_image("http://www.movable-ink-6437.com/p/rp/0fd853751b1edbb0.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN")) File "imagecheck.py", line 9, in url_to_image im_mat = io.imread(url) File "C:\Python37\lib\site-packages\skimage\io\_io.py", line 61, in imread with file_or_url_context(fname) as fname: File "C:\Python37\lib\contextlib.py", line 112, in __enter__ return next(self.gen) File "C:\Python37\lib\site-packages\skimage\io\util.py", line 34, in file_or_url_context os.remove(f.name) UnboundLocalError: local variable 'f' referenced before assignment ``` </issue> <code> [start of skimage/io/util.py] 1 import urllib.parse 2 import urllib.request 3 4 import os 5 import re 6 import tempfile 7 from contextlib import contextmanager 8 9 10 URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\') 11 12 13 def is_url(filename): 14 """Return True if string is an http or ftp path.""" 15 return (isinstance(filename, str) and 16 URL_REGEX.match(filename) is not None) 17 18 19 @contextmanager 20 def file_or_url_context(resource_name): 21 """Yield name of file from the given resource (i.e. file or url).""" 22 if is_url(resource_name): 23 url_components = urllib.parse.urlparse(resource_name) 24 _, ext = os.path.splitext(url_components.path) 25 try: 26 with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: 27 u = urllib.request.urlopen(resource_name) 28 f.write(u.read()) 29 # f must be closed before yielding 30 yield f.name 31 finally: 32 os.remove(f.name) 33 else: 34 yield resource_name 35 [end of skimage/io/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/io/util.py b/skimage/io/util.py --- a/skimage/io/util.py +++ b/skimage/io/util.py @@ -1,5 +1,6 @@ import urllib.parse import urllib.request +from urllib.error import URLError, HTTPError import os import re @@ -28,7 +29,15 @@ f.write(u.read()) # f must be closed before yielding yield f.name - finally: + except (URLError, HTTPError): + # could not open URL + os.remove(f.name) + raise + except (FileNotFoundError, FileExistsError, + PermissionError, BaseException): + # could not create temporary file + raise + else: os.remove(f.name) else: yield resource_name
{"golden_diff": "diff --git a/skimage/io/util.py b/skimage/io/util.py\n--- a/skimage/io/util.py\n+++ b/skimage/io/util.py\n@@ -1,5 +1,6 @@\n import urllib.parse\n import urllib.request\n+from urllib.error import URLError, HTTPError\n \n import os\n import re\n@@ -28,7 +29,15 @@\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n- finally:\n+ except (URLError, HTTPError):\n+ # could not open URL\n+ os.remove(f.name)\n+ raise\n+ except (FileNotFoundError, FileExistsError,\n+ PermissionError, BaseException):\n+ # could not create temporary file\n+ raise\n+ else:\n os.remove(f.name)\n else:\n yield resource_name\n", "issue": "IO: unhandled exception, in case the URL cannot be opened\n## Description\r\n![image](https://user-images.githubusercontent.com/30770221/53887634-4a1eb600-3fe8-11e9-97db-f374815eb620.png)\r\n\r\n\r\n## Way to reproduce\r\n```python\r\ndef url_to_image(url):\r\n \"\"\"download image from url and return it\"\"\"\r\n im_mat = io.imread(url)\r\n image = cv2.imdecode(im_mat, cv2.IMREAD_COLOR)\r\n \r\n # return the image\r\n return image\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n>>> from __future__ import print_function\r\n>>> import sys; print(sys.version)\r\n3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 23:09:28) [MSC v.1916 64 bit (AMD64)]\r\n>>> import platform; print(platform.platform())\r\nWindows-10-10.0.17134-SP0\r\n>>> import skimage; print(\"scikit-image version: {}\".format(skimage.__version__))\r\nscikit-image version: 0.14.2\r\n>>> import numpy; print(\"numpy version: {}\".format(numpy.__version__))\r\n```\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"C:\\Python37\\lib\\site-packages\\skimage\\io\\util.py\", line 28, in file_or_url_context\r\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\r\n File \"C:\\Python37\\lib\\tempfile.py\", line 547, in NamedTemporaryFile\r\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\r\n File \"C:\\Python37\\lib\\tempfile.py\", line 258, in _mkstemp_inner\r\n fd = _os.open(file, flags, 0o600)\r\nOSError: [Errno 22] Invalid argument: 'C:\\\\Users\\\\kelvin\\\\AppData\\\\Local\\\\Temp\\\\tmpegjt2y8r.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"imagecheck.py\", line 28, in <module>\r\n print(url_to_image(\"http://www.movable-ink-6437.com/p/rp/0fd853751b1edbb0.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN\"))\r\n File \"imagecheck.py\", line 9, in url_to_image\r\n im_mat = io.imread(url)\r\n File \"C:\\Python37\\lib\\site-packages\\skimage\\io\\_io.py\", line 61, in imread\r\n with file_or_url_context(fname) as fname:\r\n File \"C:\\Python37\\lib\\contextlib.py\", line 112, in __enter__\r\n return next(self.gen)\r\n File \"C:\\Python37\\lib\\site-packages\\skimage\\io\\util.py\", line 34, in file_or_url_context\r\n os.remove(f.name)\r\nUnboundLocalError: local variable 'f' referenced before assignment\r\n```\r\n\r\n\n", "before_files": [{"content": "import urllib.parse\nimport urllib.request\n\nimport os\nimport re\nimport tempfile\nfrom contextlib import contextmanager\n\n\nURL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\\\')\n\n\ndef is_url(filename):\n \"\"\"Return True if string is an http or ftp path.\"\"\"\n return (isinstance(filename, str) and\n URL_REGEX.match(filename) is not None)\n\n\n@contextmanager\ndef file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n url_components = urllib.parse.urlparse(resource_name)\n _, ext = os.path.splitext(url_components.path)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n u = urllib.request.urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n finally:\n os.remove(f.name)\n else:\n yield resource_name\n", "path": "skimage/io/util.py"}]}
1,656
188
gh_patches_debug_33372
rasdani/github-patches
git_diff
rasterio__rasterio-241
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rio merge assumes nodata Running `rio merge` over a set of images without a nodata value results in an error: ``` (rio-test)$ rio merge warped.tif merged.tif ERROR:rio:Failed. Exception caught Traceback (most recent call last): File "/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py", line 50, in merge dest.fill(nodataval) TypeError: long() argument must be a string or a number, not 'NoneType' ``` Checking for nodata in `rasterio/rio/merge.py` still results in a lower level error (https://github.com/mapbox/rasterio/commit/6b251f8261fd1b3e785dc73462212741b6bd62b7): ``` (rio-test)$ rio merge warped.tif merged.tif ERROR:rio:Failed. Exception caught Traceback (most recent call last): File "/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py", line 55, in merge data = src.read() File "rasterio/_io.pyx", line 654, in rasterio._io.RasterReader.read (rasterio/_io.c:8075) arr.fill(ndv) TypeError: long() argument must be a string or a number, not 'NoneType' ``` Patching this case throws the error down further until a regular numpy array is confused for a masked array (https://github.com/mapbox/rasterio/commit/c271c4ee23531db7c24208c85f56d04d79acc851): ``` (rio-test)$ rio merge warped.tif merged.tif /Users/amit/Mapbox/rasterio/rasterio/rio/merge.py:58: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future. dest==nodataval, data.mask==False)) ERROR:rio:Failed. Exception caught Traceback (most recent call last): File "/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py", line 58, in merge dest==nodataval, data.mask==False)) AttributeError: 'numpy.ndarray' object has no attribute 'mask' ``` /cc @sgillies </issue> <code> [start of rasterio/rio/merge.py] 1 # Merge command. 2 3 import logging 4 import os.path 5 import sys 6 7 import click 8 from cligj import files_inout_arg, format_opt 9 10 import rasterio 11 12 from rasterio.rio.cli import cli 13 14 15 @cli.command(short_help="Merge a stack of raster datasets.") 16 @files_inout_arg 17 @format_opt 18 @click.pass_context 19 def merge(ctx, files, driver): 20 """Copy valid pixels from input files to an output file. 21 22 All files must have the same shape, number of bands, and data type. 23 24 Input files are merged in their listed order using a reverse 25 painter's algorithm. 26 """ 27 import numpy as np 28 29 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1 30 logger = logging.getLogger('rio') 31 try: 32 with rasterio.drivers(CPL_DEBUG=verbosity>2): 33 output = files[-1] 34 files = files[:-1] 35 36 with rasterio.open(files[0]) as first: 37 kwargs = first.meta 38 kwargs['transform'] = kwargs.pop('affine') 39 dest = np.empty((first.count,) + first.shape, 40 dtype=first.dtypes[0]) 41 42 if os.path.exists(output): 43 dst = rasterio.open(output, 'r+') 44 nodataval = dst.nodatavals[0] 45 else: 46 kwargs['driver'] == driver 47 dst = rasterio.open(output, 'w', **kwargs) 48 nodataval = first.nodatavals[0] 49 50 dest.fill(nodataval) 51 52 for fname in reversed(files): 53 with rasterio.open(fname) as src: 54 data = src.read() 55 np.copyto(dest, data, 56 where=np.logical_and( 57 dest==nodataval, data.mask==False)) 58 59 if dst.mode == 'r+': 60 data = dst.read() 61 np.copyto(dest, data, 62 where=np.logical_and( 63 dest==nodataval, data.mask==False)) 64 65 dst.write(dest) 66 dst.close() 67 68 sys.exit(0) 69 except Exception: 70 logger.exception("Failed. Exception caught") 71 sys.exit(1) 72 [end of rasterio/rio/merge.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py --- a/rasterio/rio/merge.py +++ b/rasterio/rio/merge.py @@ -36,31 +36,39 @@ with rasterio.open(files[0]) as first: kwargs = first.meta kwargs['transform'] = kwargs.pop('affine') - dest = np.empty((first.count,) + first.shape, + dest = np.zeros((first.count,) + first.shape, dtype=first.dtypes[0]) + nodataval = next(dest.flat) if os.path.exists(output): dst = rasterio.open(output, 'r+') - nodataval = dst.nodatavals[0] + nodataval = dst.nodatavals[0] or nodataval else: kwargs['driver'] == driver dst = rasterio.open(output, 'w', **kwargs) - nodataval = first.nodatavals[0] + nodataval = first.nodatavals[0] or nodataval - dest.fill(nodataval) + if nodataval: + dest.fill(nodataval) for fname in reversed(files): with rasterio.open(fname) as src: data = src.read() - np.copyto(dest, data, - where=np.logical_and( - dest==nodataval, data.mask==False)) + try: + where = np.logical_and( + dest==nodataval, data.mask==False) + except AttributeError: + where = dest==nodataval + np.copyto(dest, data, where=where) if dst.mode == 'r+': data = dst.read() - np.copyto(dest, data, - where=np.logical_and( - dest==nodataval, data.mask==False)) + try: + where = np.logical_and( + dest==nodataval, data.mask==False) + except AttributeError: + where = dest==nodataval + np.copyto(dest, data, where=where) dst.write(dest) dst.close()
{"golden_diff": "diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py\n--- a/rasterio/rio/merge.py\n+++ b/rasterio/rio/merge.py\n@@ -36,31 +36,39 @@\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs['transform'] = kwargs.pop('affine')\n- dest = np.empty((first.count,) + first.shape, \n+ dest = np.zeros((first.count,) + first.shape, \n dtype=first.dtypes[0])\n \n+ nodataval = next(dest.flat)\n if os.path.exists(output):\n dst = rasterio.open(output, 'r+')\n- nodataval = dst.nodatavals[0]\n+ nodataval = dst.nodatavals[0] or nodataval\n else:\n kwargs['driver'] == driver\n dst = rasterio.open(output, 'w', **kwargs)\n- nodataval = first.nodatavals[0]\n+ nodataval = first.nodatavals[0] or nodataval\n \n- dest.fill(nodataval)\n+ if nodataval:\n+ dest.fill(nodataval)\n \n for fname in reversed(files):\n with rasterio.open(fname) as src:\n data = src.read()\n- np.copyto(dest, data,\n- where=np.logical_and(\n- dest==nodataval, data.mask==False))\n+ try:\n+ where = np.logical_and(\n+ dest==nodataval, data.mask==False)\n+ except AttributeError:\n+ where = dest==nodataval\n+ np.copyto(dest, data, where=where)\n \n if dst.mode == 'r+':\n data = dst.read()\n- np.copyto(dest, data,\n- where=np.logical_and(\n- dest==nodataval, data.mask==False))\n+ try:\n+ where = np.logical_and(\n+ dest==nodataval, data.mask==False)\n+ except AttributeError:\n+ where = dest==nodataval\n+ np.copyto(dest, data, where=where)\n \n dst.write(dest)\n dst.close()\n", "issue": "rio merge assumes nodata\nRunning `rio merge` over a set of images without a nodata value results in an error:\n\n```\n(rio-test)$ rio merge warped.tif merged.tif\nERROR:rio:Failed. Exception caught\nTraceback (most recent call last):\n File \"/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py\", line 50, in merge\n dest.fill(nodataval)\nTypeError: long() argument must be a string or a number, not 'NoneType'\n```\n\nChecking for nodata in `rasterio/rio/merge.py` still results in a lower level error (https://github.com/mapbox/rasterio/commit/6b251f8261fd1b3e785dc73462212741b6bd62b7):\n\n```\n(rio-test)$ rio merge warped.tif merged.tif\nERROR:rio:Failed. Exception caught\nTraceback (most recent call last):\n File \"/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py\", line 55, in merge\n data = src.read()\n File \"rasterio/_io.pyx\", line 654, in rasterio._io.RasterReader.read (rasterio/_io.c:8075)\n arr.fill(ndv)\nTypeError: long() argument must be a string or a number, not 'NoneType'\n```\n\nPatching this case throws the error down further until a regular numpy array is confused for a masked array (https://github.com/mapbox/rasterio/commit/c271c4ee23531db7c24208c85f56d04d79acc851):\n\n```\n(rio-test)$ rio merge warped.tif merged.tif\n/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py:58: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.\n dest==nodataval, data.mask==False))\nERROR:rio:Failed. Exception caught\nTraceback (most recent call last):\n File \"/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py\", line 58, in merge\n dest==nodataval, data.mask==False))\nAttributeError: 'numpy.ndarray' object has no attribute 'mask'\n```\n\n/cc @sgillies \n\n", "before_files": [{"content": "# Merge command.\n\nimport logging\nimport os.path\nimport sys\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nimport rasterio\n\nfrom rasterio.rio.cli import cli\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\n@format_opt\[email protected]_context\ndef merge(ctx, files, driver):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same shape, number of bands, and data type.\n\n Input files are merged in their listed order using a reverse\n painter's algorithm.\n \"\"\"\n import numpy as np\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n logger = logging.getLogger('rio')\n try:\n with rasterio.drivers(CPL_DEBUG=verbosity>2):\n output = files[-1]\n files = files[:-1]\n\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs['transform'] = kwargs.pop('affine')\n dest = np.empty((first.count,) + first.shape, \n dtype=first.dtypes[0])\n\n if os.path.exists(output):\n dst = rasterio.open(output, 'r+')\n nodataval = dst.nodatavals[0]\n else:\n kwargs['driver'] == driver\n dst = rasterio.open(output, 'w', **kwargs)\n nodataval = first.nodatavals[0]\n\n dest.fill(nodataval)\n\n for fname in reversed(files):\n with rasterio.open(fname) as src:\n data = src.read()\n np.copyto(dest, data,\n where=np.logical_and(\n dest==nodataval, data.mask==False))\n\n if dst.mode == 'r+':\n data = dst.read()\n np.copyto(dest, data,\n where=np.logical_and(\n dest==nodataval, data.mask==False))\n\n dst.write(dest)\n dst.close()\n\n sys.exit(0)\n except Exception:\n logger.exception(\"Failed. Exception caught\")\n sys.exit(1)\n", "path": "rasterio/rio/merge.py"}]}
1,670
492
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
0