problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_50089
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1228
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can't change filename when send document after upgrading to v11.1.0 ### Steps to reproduce 1. Generate a pickle file "test" (I didn't test other common files yet) 2. Send this file to user `bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'), filename="test")` or `bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'))` ### Expected behaviour User will receive a file named **test** ### Actual behaviour User received a file named **application.octet-stream** ### Configuration **Operating System:** Debian (Server, where I first found this issue) Ubuntu(Local, **I test on v10.1.0, everything is fine**, so I upgrade to v11.1.0, then I have the same issue as Debian Server) **Version of Python, python-telegram-bot & dependencies:** ``$ python -m telegram`` *My Local Ubuntu After Upgrade:* python-telegram-bot 11.1.0 certifi 2018.08.24 future 0.16.0 Python 3.6.6 (default, Sep 12 2018, 18:26:19) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] The pictures shows results of python-telegram-bot v10.1.0 (the first one) and v11.1.0 (the second one) : ![screenshot from 2018-09-29 14-16-06](https://user-images.githubusercontent.com/16657782/46246951-2382a080-c3f4-11e8-9f01-b4dd90960ccd.png) </issue> <code> [start of telegram/files/inputfile.py] 1 #!/usr/bin/env python 2 # pylint: disable=W0622,E0611 3 # 4 # A library that provides a Python interface to the Telegram Bot API 5 # Copyright (C) 2015-2018 6 # Leandro Toledo de Souza <[email protected]> 7 # 8 # This program is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU Lesser Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU Lesser Public License for more details. 17 # 18 # You should have received a copy of the GNU Lesser Public License 19 # along with this program. If not, see [http://www.gnu.org/licenses/]. 20 """This module contains an object that represents a Telegram InputFile.""" 21 22 import imghdr 23 import mimetypes 24 import os 25 from uuid import uuid4 26 27 from telegram import TelegramError 28 29 DEFAULT_MIME_TYPE = 'application/octet-stream' 30 31 32 class InputFile(object): 33 """This object represents a Telegram InputFile. 34 35 Attributes: 36 input_file_content (:obj:`bytes`): The binaray content of the file to send. 37 filename (:obj:`str`): Optional, Filename for the file to be sent. 38 attach (:obj:`str`): Optional, attach id for sending multiple files. 39 40 Args: 41 obj (:obj:`File handler`): An open file descriptor. 42 filename (:obj:`str`, optional): Filename for this InputFile. 43 attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a 44 collection of files. 45 46 Raises: 47 TelegramError 48 49 """ 50 51 def __init__(self, obj, filename=None, attach=None): 52 self.filename = None 53 self.input_file_content = obj.read() 54 self.attach = 'attached' + uuid4().hex if attach else None 55 56 if filename: 57 self.filename = filename 58 elif (hasattr(obj, 'name') and 59 not isinstance(obj.name, int) and # py3 60 obj.name != '<fdopen>'): # py2 61 # on py2.7, pylint fails to understand this properly 62 # pylint: disable=E1101 63 self.filename = os.path.basename(obj.name) 64 65 try: 66 self.mimetype = self.is_image(self.input_file_content) 67 except TelegramError: 68 if self.filename: 69 self.mimetype = mimetypes.guess_type( 70 self.filename)[0] or DEFAULT_MIME_TYPE 71 else: 72 self.mimetype = DEFAULT_MIME_TYPE 73 if not self.filename or '.' not in self.filename: 74 self.filename = self.mimetype.replace('/', '.') 75 76 @property 77 def field_tuple(self): 78 return self.filename, self.input_file_content, self.mimetype 79 80 @staticmethod 81 def is_image(stream): 82 """Check if the content file is an image by analyzing its headers. 83 84 Args: 85 stream (:obj:`str`): A str representing the content of a file. 86 87 Returns: 88 :obj:`str`: The str mime-type of an image. 89 90 """ 91 image = imghdr.what(None, stream) 92 if image: 93 return 'image/%s' % image 94 95 raise TelegramError('Could not parse file content') 96 97 @staticmethod 98 def is_file(obj): 99 return hasattr(obj, 'read') 100 101 def to_dict(self): 102 if self.attach: 103 return 'attach://' + self.attach 104 [end of telegram/files/inputfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py --- a/telegram/files/inputfile.py +++ b/telegram/files/inputfile.py @@ -70,7 +70,7 @@ self.filename)[0] or DEFAULT_MIME_TYPE else: self.mimetype = DEFAULT_MIME_TYPE - if not self.filename or '.' not in self.filename: + if not self.filename: self.filename = self.mimetype.replace('/', '.') @property
{"golden_diff": "diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py\n--- a/telegram/files/inputfile.py\n+++ b/telegram/files/inputfile.py\n@@ -70,7 +70,7 @@\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n- if not self.filename or '.' not in self.filename:\n+ if not self.filename:\n self.filename = self.mimetype.replace('/', '.')\n \n @property\n", "issue": "Can't change filename when send document after upgrading to v11.1.0\n### Steps to reproduce\r\n1. Generate a pickle file \"test\" (I didn't test other common files yet)\r\n\r\n2. Send this file to user\r\n\r\n`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'), filename=\"test\")`\r\n\r\nor\r\n\r\n`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'))`\r\n\r\n### Expected behaviour\r\nUser will receive a file named **test**\r\n\r\n### Actual behaviour\r\nUser received a file named **application.octet-stream**\r\n\r\n### Configuration\r\n**Operating System:** \r\n\r\nDebian (Server, where I first found this issue)\r\n\r\nUbuntu(Local, **I test on v10.1.0, everything is fine**, so I upgrade to v11.1.0, then I have the same issue as Debian Server)\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n\r\n*My Local Ubuntu After Upgrade:*\r\npython-telegram-bot 11.1.0\r\ncertifi 2018.08.24\r\nfuture 0.16.0\r\nPython 3.6.6 (default, Sep 12 2018, 18:26:19) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]\r\n\r\nThe pictures shows results of python-telegram-bot v10.1.0 (the first one) and v11.1.0 (the second one) :\r\n\r\n![screenshot from 2018-09-29 14-16-06](https://user-images.githubusercontent.com/16657782/46246951-2382a080-c3f4-11e8-9f01-b4dd90960ccd.png)\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport mimetypes\nimport os\nfrom uuid import uuid4\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\n\n\nclass InputFile(object):\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binaray content of the file to send.\n filename (:obj:`str`): Optional, Filename for the file to be sent.\n attach (:obj:`str`): Optional, attach id for sending multiple files.\n\n Args:\n obj (:obj:`File handler`): An open file descriptor.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a\n collection of files.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, obj, filename=None, attach=None):\n self.filename = None\n self.input_file_content = obj.read()\n self.attach = 'attached' + uuid4().hex if attach else None\n\n if filename:\n self.filename = filename\n elif (hasattr(obj, 'name') and\n not isinstance(obj.name, int) and # py3\n obj.name != '<fdopen>'): # py2\n # on py2.7, pylint fails to understand this properly\n # pylint: disable=E1101\n self.filename = os.path.basename(obj.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n if not self.filename or '.' not in self.filename:\n self.filename = self.mimetype.replace('/', '.')\n\n @property\n def field_tuple(self):\n return self.filename, self.input_file_content, self.mimetype\n\n @staticmethod\n def is_image(stream):\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`str`): A str representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_file(obj):\n return hasattr(obj, 'read')\n\n def to_dict(self):\n if self.attach:\n return 'attach://' + self.attach\n", "path": "telegram/files/inputfile.py"}]}
1,979
110
gh_patches_debug_26
rasdani/github-patches
git_diff
nautobot__nautobot-3317
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove legacy `manage.py` <!-- NOTE: This template is for use by maintainers only. Please do not submit an issue using this template unless you have been specifically asked to do so. --> ### Proposed Changes Simply remove `manage.py` from the project root. <!-- Provide justification for the proposed change(s). --> ### Justification This was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed. </issue> <code> [start of manage.py] 1 #!/usr/bin/env python3 2 3 import sys 4 5 from nautobot.core.cli import main 6 7 8 if __name__ == "__main__": 9 main() 10 [end of manage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/manage.py b/manage.py deleted file mode 100755 --- a/manage.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -import sys - -from nautobot.core.cli import main - - -if __name__ == "__main__": - main()
{"golden_diff": "diff --git a/manage.py b/manage.py\ndeleted file mode 100755\n--- a/manage.py\n+++ /dev/null\n@@ -1,9 +0,0 @@\n-#!/usr/bin/env python3\n-\n-import sys\n-\n-from nautobot.core.cli import main\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Remove legacy `manage.py` \n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\n\r\nSimply remove `manage.py` from the project root.\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\n\r\nThis was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport sys\n\nfrom nautobot.core.cli import main\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manage.py"}]}
675
78
gh_patches_debug_439
rasdani/github-patches
git_diff
localstack__localstack-1075
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update code climate and badge https://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project. </issue> <code> [start of localstack/constants.py] 1 import os 2 import localstack_client.config 3 4 # LocalStack version 5 VERSION = '0.8.9' 6 7 # default AWS region 8 if 'DEFAULT_REGION' not in os.environ: 9 os.environ['DEFAULT_REGION'] = 'us-east-1' 10 DEFAULT_REGION = os.environ['DEFAULT_REGION'] 11 12 # constant to represent the "local" region, i.e., local machine 13 REGION_LOCAL = 'local' 14 15 # dev environment 16 ENV_DEV = 'dev' 17 18 # backend service ports, for services that are behind a proxy (counting down from 4566) 19 DEFAULT_PORT_APIGATEWAY_BACKEND = 4566 20 DEFAULT_PORT_KINESIS_BACKEND = 4565 21 DEFAULT_PORT_DYNAMODB_BACKEND = 4564 22 DEFAULT_PORT_S3_BACKEND = 4563 23 DEFAULT_PORT_SNS_BACKEND = 4562 24 DEFAULT_PORT_SQS_BACKEND = 4561 25 DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560 26 DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559 27 28 DEFAULT_PORT_WEB_UI = 8080 29 30 LOCALHOST = 'localhost' 31 32 # version of the Maven dependency with Java utility code 33 LOCALSTACK_MAVEN_VERSION = '0.1.15' 34 35 # map of default service APIs and ports to be spun up (fetch map from localstack_client) 36 DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports() 37 38 # host to bind to when starting the services 39 BIND_HOST = '0.0.0.0' 40 41 # AWS user account ID used for tests 42 TEST_AWS_ACCOUNT_ID = '000000000000' 43 os.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID 44 45 # root code folder 46 LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) 47 48 # virtualenv folder 49 LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv') 50 if not os.path.isdir(LOCALSTACK_VENV_FOLDER): 51 # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/ 52 LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..')) 53 54 # API Gateway path to indicate a user request sent to the gateway 55 PATH_USER_REQUEST = '_user_request_' 56 57 # name of LocalStack Docker image 58 DOCKER_IMAGE_NAME = 'localstack/localstack' 59 60 # environment variable name to tag local test runs 61 ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN' 62 63 # content types 64 APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0' 65 APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1' 66 APPLICATION_JSON = 'application/json' 67 68 # Lambda defaults 69 LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID 70 71 # installation constants 72 ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip' 73 DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip' 74 ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar' 75 STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar' 76 77 # API endpoint for analytics events 78 API_ENDPOINT = 'https://api.localstack.cloud/v1' 79 [end of localstack/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/localstack/constants.py b/localstack/constants.py --- a/localstack/constants.py +++ b/localstack/constants.py @@ -2,7 +2,7 @@ import localstack_client.config # LocalStack version -VERSION = '0.8.9' +VERSION = '0.8.10' # default AWS region if 'DEFAULT_REGION' not in os.environ:
{"golden_diff": "diff --git a/localstack/constants.py b/localstack/constants.py\n--- a/localstack/constants.py\n+++ b/localstack/constants.py\n@@ -2,7 +2,7 @@\n import localstack_client.config\n \n # LocalStack version\n-VERSION = '0.8.9'\n+VERSION = '0.8.10'\n \n # default AWS region\n if 'DEFAULT_REGION' not in os.environ:\n", "issue": "Update code climate and badge\nhttps://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project.\n", "before_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.9'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}]}
1,527
87
gh_patches_debug_13138
rasdani/github-patches
git_diff
ivy-llc__ivy-15454
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cosh </issue> <code> [start of ivy/functional/frontends/paddle/tensor/math.py] 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 ) 7 8 9 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 10 @to_ivy_arrays_and_back 11 def sin(x, name=None): 12 return ivy.sin(x) 13 14 15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 16 @to_ivy_arrays_and_back 17 def cos(x, name=None): 18 return ivy.cos(x) 19 20 21 22 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 23 @to_ivy_arrays_and_back 24 def acos(x, name=None): 25 return ivy.acos(x) 26 27 28 29 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 30 @to_ivy_arrays_and_back 31 def tanh(x, name=None): 32 return ivy.tanh(x) 33 34 [end of ivy/functional/frontends/paddle/tensor/math.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -18,16 +18,19 @@ return ivy.cos(x) - @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back def acos(x, name=None): return ivy.acos(x) +@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") +@to_ivy_arrays_and_back +def cosh(x, name=None): + return ivy.cosh(x) + @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back def tanh(x, name=None): return ivy.tanh(x) -
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -18,16 +18,19 @@\n return ivy.cos(x)\n \n \n-\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def acos(x, name=None):\n return ivy.acos(x)\n \n \n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def cosh(x, name=None):\n+ return ivy.cosh(x)\n+\n \n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def tanh(x, name=None):\n return ivy.tanh(x)\n-\n", "issue": "cosh\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
878
256
gh_patches_debug_569
rasdani/github-patches
git_diff
pex-tool__pex-945
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.8 On the docket: + [x] Cache pip.pex. #937 + [x] Ensure the interpreter path is a file #938 + [x] Support an unzip toggle for PEXes. #939 + [x] Better support unzip mode PEXes. #941 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '2.1.7' 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.7' +__version__ = '2.1.8'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.7'\n+__version__ = '2.1.8'\n", "issue": "Release 2.1.8\nOn the docket:\r\n+ [x] Cache pip.pex. #937\r\n+ [x] Ensure the interpreter path is a file #938\r\n+ [x] Support an unzip toggle for PEXes. #939\r\n+ [x] Better support unzip mode PEXes. #941\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.7'\n", "path": "pex/version.py"}]}
660
95
gh_patches_debug_8594
rasdani/github-patches
git_diff
mozilla__bugbug-1094
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support multiclass classifiers in bug_classifier script The bug_classifier script at scripts/bug_classifier.py currently assumes the model is a binary model. We need to make it work for multiclass models too (e.g. defectenhancementtask). In particular, https://github.com/mozilla/bugbug/blob/65bf1b4604ca55a67490d27adc99c6441bad38c8/scripts/bug_classifier.py#L75-L78 needs to be changed. To test your changes, simply run `python3 -m scripts.bug_classifier defectenhancementtask` </issue> <code> [start of scripts/bug_classifier.py] 1 # -*- coding: utf-8 -*- 2 3 import argparse 4 import os 5 from logging import INFO, basicConfig, getLogger 6 7 import numpy as np 8 import requests 9 10 from bugbug import bugzilla 11 from bugbug.models import get_model_class 12 from bugbug.utils import download_check_etag, zstd_decompress 13 14 MODELS_WITH_TYPE = ("component",) 15 16 basicConfig(level=INFO) 17 logger = getLogger(__name__) 18 19 20 def classify_bugs(model_name, classifier, bug_id): 21 if classifier != "default": 22 assert ( 23 model_name in MODELS_WITH_TYPE 24 ), f"{classifier} is not a valid classifier type for {model_name}" 25 26 model_file_name = f"{model_name}{classifier}model" 27 model_name = f"{model_name}_{classifier}" 28 else: 29 model_file_name = f"{model_name}model" 30 31 if not os.path.exists(model_file_name): 32 logger.info(f"{model_file_name} does not exist. Downloading the model....") 33 try: 34 download_check_etag( 35 f"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst", 36 f"{model_file_name}.zst", 37 ) 38 except requests.HTTPError: 39 logger.error( 40 f"A pre-trained model is not available, you will need to train it yourself using the trainer script" 41 ) 42 raise SystemExit(1) 43 44 zstd_decompress(model_file_name) 45 assert os.path.exists(model_file_name), "Decompressed file doesn't exist" 46 47 model_class = get_model_class(model_name) 48 model = model_class.load(model_file_name) 49 50 if bug_id: 51 bugs = bugzilla.get(bug_id).values() 52 assert bugs, f"A bug with a bug id of {bug_id} was not found" 53 else: 54 bugs = bugzilla.get_bugs() 55 56 for bug in bugs: 57 print( 58 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} ' 59 ) 60 61 if model.calculate_importance: 62 probas, importance = model.classify( 63 bug, probabilities=True, importances=True 64 ) 65 66 model.print_feature_importances( 67 importance["importances"], class_probabilities=probas 68 ) 69 else: 70 probas = model.classify(bug, probabilities=True, importances=False) 71 72 if np.argmax(probas) == 1: 73 print(f"Positive! {probas}") 74 else: 75 print(f"Negative! {probas}") 76 input() 77 78 79 def main(): 80 description = "Perform evaluation on bugs using the specified model" 81 parser = argparse.ArgumentParser(description=description) 82 83 parser.add_argument("model", help="Which model to use for evaluation") 84 parser.add_argument( 85 "--classifier", 86 help="Type of the classifier. Only used for component classification.", 87 choices=["default", "nn"], 88 default="default", 89 ) 90 parser.add_argument("--bug-id", help="Classify the given bug id") 91 92 args = parser.parse_args() 93 94 classify_bugs(args.model, args.classifier, args.bug_id) 95 96 97 if __name__ == "__main__": 98 main() 99 [end of scripts/bug_classifier.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py --- a/scripts/bug_classifier.py +++ b/scripts/bug_classifier.py @@ -69,10 +69,13 @@ else: probas = model.classify(bug, probabilities=True, importances=False) - if np.argmax(probas) == 1: - print(f"Positive! {probas}") + probability = probas[0] + pred_index = np.argmax(probability) + if len(probability) > 2: + pred_class = model.le.inverse_transform([pred_index])[0] else: - print(f"Negative! {probas}") + pred_class = "Positive" if pred_index == 1 else "Negative" + print(f"{pred_class} {probability}") input()
{"golden_diff": "diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py\n--- a/scripts/bug_classifier.py\n+++ b/scripts/bug_classifier.py\n@@ -69,10 +69,13 @@\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n \n- if np.argmax(probas) == 1:\n- print(f\"Positive! {probas}\")\n+ probability = probas[0]\n+ pred_index = np.argmax(probability)\n+ if len(probability) > 2:\n+ pred_class = model.le.inverse_transform([pred_index])[0]\n else:\n- print(f\"Negative! {probas}\")\n+ pred_class = \"Positive\" if pred_index == 1 else \"Negative\"\n+ print(f\"{pred_class} {probability}\")\n input()\n", "issue": "Support multiclass classifiers in bug_classifier script\nThe bug_classifier script at scripts/bug_classifier.py currently assumes the model is a binary model. We need to make it work for multiclass models too (e.g. defectenhancementtask).\r\n\r\nIn particular, https://github.com/mozilla/bugbug/blob/65bf1b4604ca55a67490d27adc99c6441bad38c8/scripts/bug_classifier.py#L75-L78 needs to be changed.\r\n\r\nTo test your changes, simply run `python3 -m scripts.bug_classifier defectenhancementtask`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom logging import INFO, basicConfig, getLogger\n\nimport numpy as np\nimport requests\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import download_check_etag, zstd_decompress\n\nMODELS_WITH_TYPE = (\"component\",)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\n\ndef classify_bugs(model_name, classifier, bug_id):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n if not os.path.exists(model_file_name):\n logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n try:\n download_check_etag(\n f\"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n f\"{model_file_name}.zst\",\n )\n except requests.HTTPError:\n logger.error(\n f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n )\n raise SystemExit(1)\n\n zstd_decompress(model_file_name)\n assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n if bug_id:\n bugs = bugzilla.get(bug_id).values()\n assert bugs, f\"A bug with a bug id of {bug_id} was not found\"\n else:\n bugs = bugzilla.get_bugs()\n\n for bug in bugs:\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n model.print_feature_importances(\n importance[\"importances\"], class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--bug-id\", help=\"Classify the given bug id\")\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier, args.bug_id)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/bug_classifier.py"}]}
1,553
182
gh_patches_debug_19749
rasdani/github-patches
git_diff
openstates__openstates-scrapers-2162
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> OH: `get_session_list` cannot "see" current session Ohio's `get_session_list` appears to have the current session _manually appended_. Not to mention, it hasn't been updated in a year. This should be fixed, to automatically "guess" the current session instead. https://github.com/openstates/openstates/blob/master/openstates/oh/__init__.py#L91-L92 </issue> <code> [start of openstates/oh/__init__.py] 1 from pupa.scrape import Jurisdiction, Organization 2 from openstates.utils import url_xpath 3 4 from .people import OHLegislatorScraper 5 # from .events import OHEventScraper 6 from .bills import OHBillScraper 7 8 9 class Ohio(Jurisdiction): 10 division_id = "ocd-division/country:us/state:oh" 11 classification = "government" 12 name = "Ohio" 13 url = "http://www.legislature.state.oh.us/" 14 scrapers = { 15 'people': OHLegislatorScraper, 16 # 'events': OHEventScraper, 17 'bills': OHBillScraper, 18 } 19 legislative_sessions = [ 20 { 21 "_scraped_name": "128", 22 "identifier": "128", 23 "name": "128th Legislature (2009-2010)" 24 }, 25 { 26 "_scraped_name": "129", 27 "identifier": "129", 28 "name": "129th Legislature (2011-2012)", 29 "start_date": "2011-01-03" 30 }, 31 { 32 "_scraped_name": "130", 33 "identifier": "130", 34 "name": "130th Legislature (2013-2014)" 35 }, 36 { 37 "_scraped_name": "131", 38 "identifier": "131", 39 "name": "131st Legislature (2015-2016)" 40 }, 41 { 42 "_scraped_name": "132", 43 "identifier": "132", 44 "name": "132st Legislature (2017-2018)", 45 "start_date": "2017-01-02", 46 "end_date": "2017-12-31" 47 } 48 ] 49 ignored_scraped_sessions = [ 50 "127", 51 "126", 52 "125", 53 "124", 54 "123", 55 "122" 56 ] 57 58 def get_organizations(self): 59 legislature_name = "Ohio General Assembly" 60 lower_chamber_name = "House" 61 lower_seats = 99 62 lower_title = "Representative" 63 upper_chamber_name = "Senate" 64 upper_seats = 33 65 upper_title = "Senator" 66 67 legislature = Organization(name=legislature_name, 68 classification="legislature") 69 upper = Organization(upper_chamber_name, classification='upper', 70 parent_id=legislature._id) 71 lower = Organization(lower_chamber_name, classification='lower', 72 parent_id=legislature._id) 73 74 for n in range(1, upper_seats+1): 75 upper.add_post( 76 label=str(n), role=upper_title, 77 division_id='{}/sldu:{}'.format(self.division_id, n)) 78 for n in range(1, lower_seats+1): 79 lower.add_post( 80 label=str(n), role=lower_title, 81 division_id='{}/sldl:{}'.format(self.division_id, n)) 82 83 yield legislature 84 yield upper 85 yield lower 86 87 def get_session_list(self): 88 sessions = url_xpath('http://archives.legislature.state.oh.us', 89 '//form[@action="bill_search.cfm"]//input[@type="radio"' 90 ' and @name="SESSION"]/@value') 91 # Archive does not include current session 92 sessions.append('131') 93 return sessions 94 [end of openstates/oh/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/openstates/oh/__init__.py b/openstates/oh/__init__.py --- a/openstates/oh/__init__.py +++ b/openstates/oh/__init__.py @@ -46,14 +46,7 @@ "end_date": "2017-12-31" } ] - ignored_scraped_sessions = [ - "127", - "126", - "125", - "124", - "123", - "122" - ] + ignored_scraped_sessions = [] def get_organizations(self): legislature_name = "Ohio General Assembly" @@ -85,9 +78,7 @@ yield lower def get_session_list(self): - sessions = url_xpath('http://archives.legislature.state.oh.us', - '//form[@action="bill_search.cfm"]//input[@type="radio"' - ' and @name="SESSION"]/@value') + sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation', + '//div[@class="selectedValues"]/ul/span/li/text()') # Archive does not include current session - sessions.append('131') return sessions
{"golden_diff": "diff --git a/openstates/oh/__init__.py b/openstates/oh/__init__.py\n--- a/openstates/oh/__init__.py\n+++ b/openstates/oh/__init__.py\n@@ -46,14 +46,7 @@\n \"end_date\": \"2017-12-31\"\n }\n ]\n- ignored_scraped_sessions = [\n- \"127\",\n- \"126\",\n- \"125\",\n- \"124\",\n- \"123\",\n- \"122\"\n- ]\n+ ignored_scraped_sessions = []\n \n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n@@ -85,9 +78,7 @@\n yield lower\n \n def get_session_list(self):\n- sessions = url_xpath('http://archives.legislature.state.oh.us',\n- '//form[@action=\"bill_search.cfm\"]//input[@type=\"radio\"'\n- ' and @name=\"SESSION\"]/@value')\n+ sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation',\n+ '//div[@class=\"selectedValues\"]/ul/span/li/text()')\n # Archive does not include current session\n- sessions.append('131')\n return sessions\n", "issue": "OH: `get_session_list` cannot \"see\" current session\nOhio's `get_session_list` appears to have the current session _manually appended_. Not to mention, it hasn't been updated in a year. This should be fixed, to automatically \"guess\" the current session instead.\r\n\r\nhttps://github.com/openstates/openstates/blob/master/openstates/oh/__init__.py#L91-L92\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .people import OHLegislatorScraper\n# from .events import OHEventScraper\nfrom .bills import OHBillScraper\n\n\nclass Ohio(Jurisdiction):\n division_id = \"ocd-division/country:us/state:oh\"\n classification = \"government\"\n name = \"Ohio\"\n url = \"http://www.legislature.state.oh.us/\"\n scrapers = {\n 'people': OHLegislatorScraper,\n # 'events': OHEventScraper,\n 'bills': OHBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"128\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"129\",\n \"identifier\": \"129\",\n \"name\": \"129th Legislature (2011-2012)\",\n \"start_date\": \"2011-01-03\"\n },\n {\n \"_scraped_name\": \"130\",\n \"identifier\": \"130\",\n \"name\": \"130th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"131\",\n \"identifier\": \"131\",\n \"name\": \"131st Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"132\",\n \"identifier\": \"132\",\n \"name\": \"132st Legislature (2017-2018)\",\n \"start_date\": \"2017-01-02\",\n \"end_date\": \"2017-12-31\"\n }\n ]\n ignored_scraped_sessions = [\n \"127\",\n \"126\",\n \"125\",\n \"124\",\n \"123\",\n \"122\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 99\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 33\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats+1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats+1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('http://archives.legislature.state.oh.us',\n '//form[@action=\"bill_search.cfm\"]//input[@type=\"radio\"'\n ' and @name=\"SESSION\"]/@value')\n # Archive does not include current session\n sessions.append('131')\n return sessions\n", "path": "openstates/oh/__init__.py"}]}
1,601
295
gh_patches_debug_13150
rasdani/github-patches
git_diff
comic__grand-challenge.org-1631
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show domain name of the email address that was used to verify the account Google Scholar shows on profiles instead of the entire email address only the domain name. Something like "Verified email address at radboudumc.nl". Would be a good feature for grand challenge as well, this would make it possible to check if users used an email address from their institution to verify their account. A similar text to what Google Scholar shows could be displayed when hovering over the check mark icon, for example. </issue> <code> [start of app/grandchallenge/profiles/templatetags/profiles.py] 1 from typing import Union 2 3 from django import template 4 from django.contrib.auth import get_user_model 5 from django.contrib.auth.models import AbstractUser 6 from django.core.exceptions import ObjectDoesNotExist 7 from django.utils.html import format_html 8 from django.utils.safestring import mark_safe 9 10 from grandchallenge.subdomains.utils import reverse 11 12 register = template.Library() 13 14 15 @register.filter 16 def user_profile_link(user: Union[AbstractUser, None]) -> str: 17 verified = "" 18 19 if user: 20 username = user.username 21 profile_url = reverse( 22 "userena_profile_detail", kwargs={"username": user.username} 23 ) 24 mugshot = format_html( 25 ( 26 '<img class="mugshot" loading="lazy" src="{0}" ' 27 'alt="User Mugshot" ' 28 # Match the "fa-lg" class style 29 'style="height: 1.33em; vertical-align: -25%;"/>' 30 ), 31 user.user_profile.get_mugshot_url(), 32 ) 33 34 try: 35 if user.verification.is_verified: 36 verified = mark_safe( 37 '<i class="fas fa-user-check text-success" ' 38 'title="Verified User"></i>' 39 ) 40 except ObjectDoesNotExist: 41 # No verification request 42 pass 43 else: 44 username = "Unknown" 45 profile_url = "#" 46 mugshot = mark_safe('<i class="fas fa-user fa-lg"></i>') 47 48 return format_html( 49 '<span class="text-nowrap"><a href="{0}">{1}</a>&nbsp;<a href="{0}">{2}</a>&nbsp;{3}</span>', 50 profile_url, 51 mugshot, 52 username, 53 verified, 54 ) 55 56 57 @register.filter 58 def user_profile_link_username(username: str) -> str: 59 User = get_user_model() # noqa: N806 60 return user_profile_link(User.objects.get(username=username)) 61 [end of app/grandchallenge/profiles/templatetags/profiles.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/profiles/templatetags/profiles.py b/app/grandchallenge/profiles/templatetags/profiles.py --- a/app/grandchallenge/profiles/templatetags/profiles.py +++ b/app/grandchallenge/profiles/templatetags/profiles.py @@ -33,9 +33,15 @@ try: if user.verification.is_verified: - verified = mark_safe( + email = ( + user.verification.email + if user.verification.email + else user.email + ) + verified = format_html( '<i class="fas fa-user-check text-success" ' - 'title="Verified User"></i>' + 'title="Verified email address at {}"></i>', + email.split("@")[1], ) except ObjectDoesNotExist: # No verification request
{"golden_diff": "diff --git a/app/grandchallenge/profiles/templatetags/profiles.py b/app/grandchallenge/profiles/templatetags/profiles.py\n--- a/app/grandchallenge/profiles/templatetags/profiles.py\n+++ b/app/grandchallenge/profiles/templatetags/profiles.py\n@@ -33,9 +33,15 @@\n \n try:\n if user.verification.is_verified:\n- verified = mark_safe(\n+ email = (\n+ user.verification.email\n+ if user.verification.email\n+ else user.email\n+ )\n+ verified = format_html(\n '<i class=\"fas fa-user-check text-success\" '\n- 'title=\"Verified User\"></i>'\n+ 'title=\"Verified email address at {}\"></i>',\n+ email.split(\"@\")[1],\n )\n except ObjectDoesNotExist:\n # No verification request\n", "issue": "Show domain name of the email address that was used to verify the account\nGoogle Scholar shows on profiles instead of the entire email address only the domain name. Something like \"Verified email address at radboudumc.nl\". Would be a good feature for grand challenge as well, this would make it possible to check if users used an email address from their institution to verify their account. A similar text to what Google Scholar shows could be displayed when hovering over the check mark icon, for example.\n", "before_files": [{"content": "from typing import Union\n\nfrom django import template\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom grandchallenge.subdomains.utils import reverse\n\nregister = template.Library()\n\n\[email protected]\ndef user_profile_link(user: Union[AbstractUser, None]) -> str:\n verified = \"\"\n\n if user:\n username = user.username\n profile_url = reverse(\n \"userena_profile_detail\", kwargs={\"username\": user.username}\n )\n mugshot = format_html(\n (\n '<img class=\"mugshot\" loading=\"lazy\" src=\"{0}\" '\n 'alt=\"User Mugshot\" '\n # Match the \"fa-lg\" class style\n 'style=\"height: 1.33em; vertical-align: -25%;\"/>'\n ),\n user.user_profile.get_mugshot_url(),\n )\n\n try:\n if user.verification.is_verified:\n verified = mark_safe(\n '<i class=\"fas fa-user-check text-success\" '\n 'title=\"Verified User\"></i>'\n )\n except ObjectDoesNotExist:\n # No verification request\n pass\n else:\n username = \"Unknown\"\n profile_url = \"#\"\n mugshot = mark_safe('<i class=\"fas fa-user fa-lg\"></i>')\n\n return format_html(\n '<span class=\"text-nowrap\"><a href=\"{0}\">{1}</a>&nbsp;<a href=\"{0}\">{2}</a>&nbsp;{3}</span>',\n profile_url,\n mugshot,\n username,\n verified,\n )\n\n\[email protected]\ndef user_profile_link_username(username: str) -> str:\n User = get_user_model() # noqa: N806\n return user_profile_link(User.objects.get(username=username))\n", "path": "app/grandchallenge/profiles/templatetags/profiles.py"}]}
1,175
195
gh_patches_debug_7871
rasdani/github-patches
git_diff
explosion__spaCy-1905
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dummy command argument wasn't removed from auto-link after download Auto-linking of downloaded languages was broken in https://github.com/explosion/spaCy/commit/7f0ab145e95036a55af4802184a4b1c496557d0a. The dummy argument wasn't removed from the `link` call at https://github.com/explosion/spaCy/blob/master/spacy/cli/download.py#L44. I can make a PR for the fix unless it's easier for a maintainer to just fix this quickly. ## Your Environment * Operating System: Docker python:3.6 * Python Version Used: 3.6.2 * spaCy Version Used: Changes made since 2.0.5 * Environment Information: Docker python:3.6 </issue> <code> [start of spacy/cli/download.py] 1 # coding: utf8 2 from __future__ import unicode_literals 3 4 import plac 5 import requests 6 import os 7 import subprocess 8 import sys 9 10 from .link import link 11 from ..util import prints, get_package_path 12 from .. import about 13 14 15 @plac.annotations( 16 model=("model to download, shortcut or name)", "positional", None, str), 17 direct=("force direct download. Needs model name with version and won't " 18 "perform compatibility check", "flag", "d", bool)) 19 def download(model, direct=False): 20 """ 21 Download compatible model from default download path using pip. Model 22 can be shortcut, model name or, if --direct flag is set, full model name 23 with version. 24 """ 25 if direct: 26 dl = download_model('{m}/{m}.tar.gz'.format(m=model)) 27 else: 28 shortcuts = get_json(about.__shortcuts__, "available shortcuts") 29 model_name = shortcuts.get(model, model) 30 compatibility = get_compatibility() 31 version = get_version(model_name, compatibility) 32 dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name, 33 v=version)) 34 if dl != 0: 35 # if download subprocess doesn't return 0, exit with the respective 36 # exit code before doing anything else 37 sys.exit(dl) 38 try: 39 # Get package path here because link uses 40 # pip.get_installed_distributions() to check if model is a 41 # package, which fails if model was just installed via 42 # subprocess 43 package_path = get_package_path(model_name) 44 link(None, model_name, model, force=True, 45 model_path=package_path) 46 except: 47 # Dirty, but since spacy.download and the auto-linking is 48 # mostly a convenience wrapper, it's best to show a success 49 # message and loading instructions, even if linking fails. 50 prints( 51 "Creating a shortcut link for 'en' didn't work (maybe " 52 "you don't have admin permissions?), but you can still " 53 "load the model via its full package name:", 54 "nlp = spacy.load('%s')" % model_name, 55 title="Download successful but linking failed") 56 57 58 def get_json(url, desc): 59 r = requests.get(url) 60 if r.status_code != 200: 61 msg = ("Couldn't fetch %s. Please find a model for your spaCy " 62 "installation (v%s), and download it manually.") 63 prints(msg % (desc, about.__version__), about.__docs_models__, 64 title="Server error (%d)" % r.status_code, exits=1) 65 return r.json() 66 67 68 def get_compatibility(): 69 version = about.__version__ 70 version = version.rsplit('.dev', 1)[0] 71 comp_table = get_json(about.__compatibility__, "compatibility table") 72 comp = comp_table['spacy'] 73 if version not in comp: 74 prints("No compatible models found for v%s of spaCy." % version, 75 title="Compatibility error", exits=1) 76 return comp[version] 77 78 79 def get_version(model, comp): 80 model = model.rsplit('.dev', 1)[0] 81 if model not in comp: 82 version = about.__version__ 83 msg = "No compatible model found for '%s' (spaCy v%s)." 84 prints(msg % (model, version), title="Compatibility error", exits=1) 85 return comp[model][0] 86 87 88 def download_model(filename): 89 download_url = about.__download_url__ + '/' + filename 90 return subprocess.call( 91 [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps', 92 download_url], env=os.environ.copy()) 93 [end of spacy/cli/download.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/spacy/cli/download.py b/spacy/cli/download.py --- a/spacy/cli/download.py +++ b/spacy/cli/download.py @@ -41,7 +41,7 @@ # package, which fails if model was just installed via # subprocess package_path = get_package_path(model_name) - link(None, model_name, model, force=True, + link(model_name, model, force=True, model_path=package_path) except: # Dirty, but since spacy.download and the auto-linking is
{"golden_diff": "diff --git a/spacy/cli/download.py b/spacy/cli/download.py\n--- a/spacy/cli/download.py\n+++ b/spacy/cli/download.py\n@@ -41,7 +41,7 @@\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n- link(None, model_name, model, force=True,\n+ link(model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n", "issue": "Dummy command argument wasn't removed from auto-link after download\nAuto-linking of downloaded languages was broken in https://github.com/explosion/spaCy/commit/7f0ab145e95036a55af4802184a4b1c496557d0a. The dummy argument wasn't removed from the `link` call at https://github.com/explosion/spaCy/blob/master/spacy/cli/download.py#L44. I can make a PR for the fix unless it's easier for a maintainer to just fix this quickly.\r\n\r\n## Your Environment\r\n* Operating System: Docker python:3.6\r\n* Python Version Used: 3.6.2\r\n* spaCy Version Used: Changes made since 2.0.5\r\n* Environment Information: Docker python:3.6\r\n\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport plac\nimport requests\nimport os\nimport subprocess\nimport sys\n\nfrom .link import link\nfrom ..util import prints, get_package_path\nfrom .. import about\n\n\[email protected](\n model=(\"model to download, shortcut or name)\", \"positional\", None, str),\n direct=(\"force direct download. Needs model name with version and won't \"\n \"perform compatibility check\", \"flag\", \"d\", bool))\ndef download(model, direct=False):\n \"\"\"\n Download compatible model from default download path using pip. Model\n can be shortcut, model name or, if --direct flag is set, full model name\n with version.\n \"\"\"\n if direct:\n dl = download_model('{m}/{m}.tar.gz'.format(m=model))\n else:\n shortcuts = get_json(about.__shortcuts__, \"available shortcuts\")\n model_name = shortcuts.get(model, model)\n compatibility = get_compatibility()\n version = get_version(model_name, compatibility)\n dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name,\n v=version))\n if dl != 0:\n # if download subprocess doesn't return 0, exit with the respective\n # exit code before doing anything else\n sys.exit(dl)\n try:\n # Get package path here because link uses\n # pip.get_installed_distributions() to check if model is a\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n link(None, model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n # mostly a convenience wrapper, it's best to show a success\n # message and loading instructions, even if linking fails.\n prints(\n \"Creating a shortcut link for 'en' didn't work (maybe \"\n \"you don't have admin permissions?), but you can still \"\n \"load the model via its full package name:\",\n \"nlp = spacy.load('%s')\" % model_name,\n title=\"Download successful but linking failed\")\n\n\ndef get_json(url, desc):\n r = requests.get(url)\n if r.status_code != 200:\n msg = (\"Couldn't fetch %s. Please find a model for your spaCy \"\n \"installation (v%s), and download it manually.\")\n prints(msg % (desc, about.__version__), about.__docs_models__,\n title=\"Server error (%d)\" % r.status_code, exits=1)\n return r.json()\n\n\ndef get_compatibility():\n version = about.__version__\n version = version.rsplit('.dev', 1)[0]\n comp_table = get_json(about.__compatibility__, \"compatibility table\")\n comp = comp_table['spacy']\n if version not in comp:\n prints(\"No compatible models found for v%s of spaCy.\" % version,\n title=\"Compatibility error\", exits=1)\n return comp[version]\n\n\ndef get_version(model, comp):\n model = model.rsplit('.dev', 1)[0]\n if model not in comp:\n version = about.__version__\n msg = \"No compatible model found for '%s' (spaCy v%s).\"\n prints(msg % (model, version), title=\"Compatibility error\", exits=1)\n return comp[model][0]\n\n\ndef download_model(filename):\n download_url = about.__download_url__ + '/' + filename\n return subprocess.call(\n [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps',\n download_url], env=os.environ.copy())\n", "path": "spacy/cli/download.py"}]}
1,701
122
gh_patches_debug_433
rasdani/github-patches
git_diff
kornia__kornia-2476
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Next release? ## 🚀 Feature Hi, when will the next kornia release on conda or pypi be? ## Motivation Last conda release was in April, and new features have landed since then, but are unavailable in wheels. </issue> <code> [start of kornia/__init__.py] 1 # NOTE: kornia filters and geometry must go first since are the core of the library 2 # and by changing the import order you might get into a circular dependencies issue. 3 from . import filters 4 from . import geometry 5 from . import grad_estimator 6 7 # import the other modules for convenience 8 from . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x 9 10 # NOTE: we are going to expose to top level very few things 11 from kornia.constants import pi 12 from kornia.testing import xla_is_available 13 from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image 14 15 # Version variable 16 __version__ = "0.6.13-dev" 17 [end of kornia/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kornia/__init__.py b/kornia/__init__.py --- a/kornia/__init__.py +++ b/kornia/__init__.py @@ -13,4 +13,4 @@ from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image # Version variable -__version__ = "0.6.13-dev" +__version__ = "0.7.0"
{"golden_diff": "diff --git a/kornia/__init__.py b/kornia/__init__.py\n--- a/kornia/__init__.py\n+++ b/kornia/__init__.py\n@@ -13,4 +13,4 @@\n from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n \n # Version variable\n-__version__ = \"0.6.13-dev\"\n+__version__ = \"0.7.0\"\n", "issue": "Next release?\n## \ud83d\ude80 Feature\r\nHi, when will the next kornia release on conda or pypi be?\r\n\r\n## Motivation\r\n\r\nLast conda release was in April, and new features have landed since then, but are unavailable in wheels.\n", "before_files": [{"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.6.13-dev\"\n", "path": "kornia/__init__.py"}]}
786
107
gh_patches_debug_22898
rasdani/github-patches
git_diff
gratipay__gratipay.com-3485
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> revenue model is '' for everyone cf. #3479 </issue> <code> [start of gratipay/models/team.py] 1 """Teams on Gratipay are plural participants with members. 2 """ 3 from postgres.orm import Model 4 5 6 class Team(Model): 7 """Represent a Gratipay team. 8 """ 9 10 typname = 'teams' 11 12 def __eq__(self, other): 13 if not isinstance(other, Team): 14 return False 15 return self.id == other.id 16 17 def __ne__(self, other): 18 if not isinstance(other, Team): 19 return True 20 return self.id != other.id 21 22 23 # Constructors 24 # ============ 25 26 @classmethod 27 def from_id(cls, id): 28 """Return an existing team based on id. 29 """ 30 return cls._from_thing("id", id) 31 32 @classmethod 33 def from_slug(cls, slug): 34 """Return an existing team based on slug. 35 """ 36 return cls._from_thing("slug_lower", slug.lower()) 37 38 @classmethod 39 def _from_thing(cls, thing, value): 40 assert thing in ("id", "slug_lower") 41 return cls.db.one(""" 42 43 SELECT teams.*::teams 44 FROM teams 45 WHERE {}=%s 46 47 """.format(thing), (value,)) 48 49 @classmethod 50 def create_new(cls, owner, fields): 51 return cls.db.one(""" 52 53 INSERT INTO teams 54 (slug, slug_lower, name, homepage, product_or_service, 55 getting_involved, getting_paid, owner) 56 VALUES (%s, %s, %s, %s, %s, %s, %s, %s) 57 RETURNING teams.*::teams 58 59 """, (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'], 60 fields['product_or_service'], fields['getting_involved'], fields['getting_paid'], 61 owner.username)) 62 63 def get_og_title(self): 64 out = self.name 65 receiving = self.receiving 66 if receiving > 0: 67 out += " receives $%.2f/wk" % receiving 68 else: 69 out += " is" 70 return out + " on Gratipay" 71 72 73 def update_receiving(self, cursor=None): 74 # Stubbed out for now. Migrate this over from Participant. 75 pass 76 77 78 @property 79 def status(self): 80 return { None: 'unreviewed' 81 , False: 'rejected' 82 , True: 'approved' 83 }[self.is_approved] 84 [end of gratipay/models/team.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/models/team.py b/gratipay/models/team.py --- a/gratipay/models/team.py +++ b/gratipay/models/team.py @@ -47,18 +47,22 @@ """.format(thing), (value,)) @classmethod - def create_new(cls, owner, fields): + def insert(cls, owner, **fields): + fields['slug_lower'] = fields['slug'].lower() + fields['owner'] = owner.username return cls.db.one(""" INSERT INTO teams - (slug, slug_lower, name, homepage, product_or_service, - getting_involved, getting_paid, owner) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s) + (slug, slug_lower, name, homepage, + product_or_service, revenue_model, getting_involved, getting_paid, + owner) + VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s, + %(product_or_service)s, %(revenue_model)s, %(getting_involved)s, + %(getting_paid)s, + %(owner)s) RETURNING teams.*::teams - """, (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'], - fields['product_or_service'], fields['getting_involved'], fields['getting_paid'], - owner.username)) + """, fields) def get_og_title(self): out = self.name
{"golden_diff": "diff --git a/gratipay/models/team.py b/gratipay/models/team.py\n--- a/gratipay/models/team.py\n+++ b/gratipay/models/team.py\n@@ -47,18 +47,22 @@\n \"\"\".format(thing), (value,))\n \n @classmethod\n- def create_new(cls, owner, fields):\n+ def insert(cls, owner, **fields):\n+ fields['slug_lower'] = fields['slug'].lower()\n+ fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n \n INSERT INTO teams\n- (slug, slug_lower, name, homepage, product_or_service,\n- getting_involved, getting_paid, owner)\n- VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n+ (slug, slug_lower, name, homepage,\n+ product_or_service, revenue_model, getting_involved, getting_paid,\n+ owner)\n+ VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n+ %(product_or_service)s, %(revenue_model)s, %(getting_involved)s,\n+ %(getting_paid)s,\n+ %(owner)s)\n RETURNING teams.*::teams\n \n- \"\"\", (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],\n- fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],\n- owner.username))\n+ \"\"\", fields)\n \n def get_og_title(self):\n out = self.name\n", "issue": "revenue model is '' for everyone\ncf. #3479\n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def create_new(cls, owner, fields):\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage, product_or_service,\n getting_involved, getting_paid, owner)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING teams.*::teams\n\n \"\"\", (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],\n fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],\n owner.username))\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n # Stubbed out for now. Migrate this over from Participant.\n pass\n\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n", "path": "gratipay/models/team.py"}]}
1,225
338
gh_patches_debug_22373
rasdani/github-patches
git_diff
zulip__zulip-10098
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> slash commands: Add /dark and /light commands. We have /night and /day, and people are starting to use them. We should add the aliases /dark and /light. </issue> <code> [start of zerver/lib/zcommand.py] 1 from typing import Any, Dict 2 from django.utils.translation import ugettext as _ 3 4 from zerver.models import UserProfile 5 from zerver.lib.actions import do_set_user_display_setting 6 from zerver.lib.exceptions import JsonableError 7 8 def process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]: 9 if not content.startswith('/'): 10 raise JsonableError(_('There should be a leading slash in the zcommand.')) 11 command = content[1:] 12 13 if command == 'ping': 14 ret = dict() # type: Dict[str, Any] 15 return ret 16 17 if command == 'night': 18 if user_profile.night_mode: 19 msg = 'You are still in night mode.' 20 else: 21 msg = 'Changed to night mode! To revert night mode, type `/day`.' 22 do_set_user_display_setting(user_profile, 'night_mode', True) 23 ret = dict(msg=msg) 24 return ret 25 26 if command == 'day': 27 if user_profile.night_mode: 28 msg = 'Changed to day mode! To revert day mode, type `/night`.' 29 do_set_user_display_setting(user_profile, 'night_mode', False) 30 else: 31 msg = 'You are still in day mode.' 32 ret = dict(msg=msg) 33 return ret 34 35 raise JsonableError(_('No such command: %s') % (command,)) 36 [end of zerver/lib/zcommand.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zerver/lib/zcommand.py b/zerver/lib/zcommand.py --- a/zerver/lib/zcommand.py +++ b/zerver/lib/zcommand.py @@ -14,18 +14,21 @@ ret = dict() # type: Dict[str, Any] return ret - if command == 'night': + night_commands = ['night', 'dark'] + day_commands = ['day', 'light'] + + if command in night_commands: if user_profile.night_mode: msg = 'You are still in night mode.' else: - msg = 'Changed to night mode! To revert night mode, type `/day`.' + msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,) do_set_user_display_setting(user_profile, 'night_mode', True) ret = dict(msg=msg) return ret - if command == 'day': + if command in day_commands: if user_profile.night_mode: - msg = 'Changed to day mode! To revert day mode, type `/night`.' + msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,) do_set_user_display_setting(user_profile, 'night_mode', False) else: msg = 'You are still in day mode.'
{"golden_diff": "diff --git a/zerver/lib/zcommand.py b/zerver/lib/zcommand.py\n--- a/zerver/lib/zcommand.py\n+++ b/zerver/lib/zcommand.py\n@@ -14,18 +14,21 @@\n ret = dict() # type: Dict[str, Any]\n return ret\n \n- if command == 'night':\n+ night_commands = ['night', 'dark']\n+ day_commands = ['day', 'light']\n+\n+ if command in night_commands:\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n- msg = 'Changed to night mode! To revert night mode, type `/day`.'\n+ msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n \n- if command == 'day':\n+ if command in day_commands:\n if user_profile.night_mode:\n- msg = 'Changed to day mode! To revert day mode, type `/night`.'\n+ msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n", "issue": "slash commands: Add /dark and /light commands.\nWe have /night and /day, and people are starting to use them. We should add the aliases /dark and /light.\n", "before_files": [{"content": "from typing import Any, Dict\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserProfile\nfrom zerver.lib.actions import do_set_user_display_setting\nfrom zerver.lib.exceptions import JsonableError\n\ndef process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]:\n if not content.startswith('/'):\n raise JsonableError(_('There should be a leading slash in the zcommand.'))\n command = content[1:]\n\n if command == 'ping':\n ret = dict() # type: Dict[str, Any]\n return ret\n\n if command == 'night':\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n msg = 'Changed to night mode! To revert night mode, type `/day`.'\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n\n if command == 'day':\n if user_profile.night_mode:\n msg = 'Changed to day mode! To revert day mode, type `/night`.'\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n ret = dict(msg=msg)\n return ret\n\n raise JsonableError(_('No such command: %s') % (command,))\n", "path": "zerver/lib/zcommand.py"}]}
932
297
gh_patches_debug_39460
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-151
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Get filtered set of records from table **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> At the moment, when we request records from a table, it's _all_ of the records or none. We should be able to filter a way that supports retrieving "groups" of records, where a group is defined as a set of records where some subset of the columns match a set of values, equivalent to the SQL: ```sql WHERE col1 = val1 AND col2 = val2 AND ... AND coln = valn ``` **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> We should create a function that lets us filter string columns using the pattern above, and another function that retrieves the distinct tuples for a set of columns (i.e., the groups in this context). **Additional context** <!-- Add any other context or screenshots about the feature request here.--> The interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) filtering query each time. </issue> <code> [start of db/records.py] 1 from sqlalchemy import delete, select 2 from sqlalchemy.inspection import inspect 3 4 5 def _get_primary_key_column(table): 6 primary_key_list = list(inspect(table).primary_key) 7 # We do not support getting by composite primary keys 8 assert len(primary_key_list) == 1 9 return primary_key_list[0] 10 11 12 def get_record(table, engine, id_value): 13 primary_key_column = _get_primary_key_column(table) 14 query = select(table).where(primary_key_column == id_value) 15 with engine.begin() as conn: 16 result = conn.execute(query).fetchall() 17 assert len(result) <= 1 18 return result[0] if result else None 19 20 21 def get_records(table, engine, limit=None, offset=None, order_by=[]): 22 """ 23 Returns records from a table. 24 25 Args: 26 table: SQLAlchemy table object 27 engine: SQLAlchemy engine object 28 limit: int, gives number of rows to return 29 offset: int, gives number of rows to skip 30 order_by: list of SQLAlchemy ColumnElements to order by. Should 31 usually be either a list of string column names, or a 32 list of columns from the given table. 33 """ 34 query = select(table).order_by(*order_by).limit(limit).offset(offset) 35 with engine.begin() as conn: 36 return conn.execute(query).fetchall() 37 38 39 def create_record_or_records(table, engine, record_data): 40 """ 41 record_data can be a dictionary, tuple, or list of dictionaries or tuples. 42 if record_data is a list, it creates multiple records. 43 """ 44 id_value = None 45 with engine.begin() as connection: 46 result = connection.execute(table.insert(), record_data) 47 # If there was only a single record created, return the record. 48 if result.rowcount == 1: 49 # We need to manually commit insertion so that we can retrieve the record. 50 connection.commit() 51 id_value = result.inserted_primary_key[0] 52 if id_value is not None: 53 return get_record(table, engine, id_value) 54 # Do not return any records if multiple rows were added. 55 return None 56 57 58 def create_records_from_csv(table, engine, csv_filename, column_names): 59 with open(csv_filename, 'rb') as csv_file: 60 with engine.begin() as conn: 61 cursor = conn.connection.cursor() 62 relation = '.'.join('"{}"'.format(part) for part in (table.schema, table.name)) 63 formatted_columns = '({})'.format(','.join([f'"{column_name}"' for column_name in column_names])) 64 copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER' 65 cursor.copy_expert(copy_sql, csv_file) 66 67 68 def update_record(table, engine, id_value, record_data): 69 primary_key_column = _get_primary_key_column(table) 70 with engine.begin() as connection: 71 connection.execute( 72 table.update().where(primary_key_column == id_value).values(record_data) 73 ) 74 return get_record(table, engine, id_value) 75 76 77 def delete_record(table, engine, id_value): 78 primary_key_column = _get_primary_key_column(table) 79 query = delete(table).where(primary_key_column == id_value) 80 with engine.begin() as conn: 81 return conn.execute(query) 82 [end of db/records.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/records.py b/db/records.py --- a/db/records.py +++ b/db/records.py @@ -1,6 +1,9 @@ -from sqlalchemy import delete, select +import logging +from sqlalchemy import delete, select, and_, Column from sqlalchemy.inspection import inspect +logger = logging.getLogger(__name__) + def _get_primary_key_column(table): primary_key_list = list(inspect(table).primary_key) @@ -18,7 +21,9 @@ return result[0] if result else None -def get_records(table, engine, limit=None, offset=None, order_by=[]): +def get_records( + table, engine, limit=None, offset=None, order_by=[], filters=[] +): """ Returns records from a table. @@ -30,12 +35,72 @@ order_by: list of SQLAlchemy ColumnElements to order by. Should usually be either a list of string column names, or a list of columns from the given table. + filters: list of tuples of type (ColumnElement, value), where + ColumnElement is an SQLAlchemy ColumnElement, and value + is a valid value for the associated column (i.e., the + type must be correct) """ - query = select(table).order_by(*order_by).limit(limit).offset(offset) + query = ( + select(table) + .order_by(*order_by) + .limit(limit) + .offset(offset) + .where(_build_filter_conjunction(table, filters)) + ) with engine.begin() as conn: return conn.execute(query).fetchall() +def _build_filter_conjunction(table, filters): + refined_filters = [ + (table.columns[col] if type(col) == str else col, value) + for col, value in filters + ] + # We need a default of True (rather than empty), since invoking and_ + # without arguments is deprecated. + return and_(True, *[col == value for col, value in refined_filters]) + + +def get_distinct_tuple_values( + column_list, engine, table=None, limit=None, offset=None, +): + """ + Returns distinct tuples from a given list of columns. + + Args: + column_list: list of column names or SQLAlchemy column objects + engine: SQLAlchemy engine object + table: SQLAlchemy table object + limit: int, gives number of rows to return + offset: int, gives number of rows to skip + + If no table is given, the column_list must consist entirely of + SQLAlchemy column objects associated with a table. + """ + if table is not None: + column_objects = [ + table.columns[col] if type(col) == str else col + for col in column_list + ] + else: + column_objects = column_list + try: + assert all([type(col) == Column for col in column_objects]) + except AssertionError as e: + logger.error("All columns must be str or sqlalchemy.Column type") + raise e + + query = ( + select(*column_objects) + .distinct() + .limit(limit) + .offset(offset) + ) + with engine.begin() as conn: + res = conn.execute(query).fetchall() + return [tuple(zip(column_objects, row)) for row in res] + + def create_record_or_records(table, engine, record_data): """ record_data can be a dictionary, tuple, or list of dictionaries or tuples.
{"golden_diff": "diff --git a/db/records.py b/db/records.py\n--- a/db/records.py\n+++ b/db/records.py\n@@ -1,6 +1,9 @@\n-from sqlalchemy import delete, select\n+import logging\n+from sqlalchemy import delete, select, and_, Column\n from sqlalchemy.inspection import inspect\n \n+logger = logging.getLogger(__name__)\n+\n \n def _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n@@ -18,7 +21,9 @@\n return result[0] if result else None\n \n \n-def get_records(table, engine, limit=None, offset=None, order_by=[]):\n+def get_records(\n+ table, engine, limit=None, offset=None, order_by=[], filters=[]\n+):\n \"\"\"\n Returns records from a table.\n \n@@ -30,12 +35,72 @@\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n+ filters: list of tuples of type (ColumnElement, value), where\n+ ColumnElement is an SQLAlchemy ColumnElement, and value\n+ is a valid value for the associated column (i.e., the\n+ type must be correct)\n \"\"\"\n- query = select(table).order_by(*order_by).limit(limit).offset(offset)\n+ query = (\n+ select(table)\n+ .order_by(*order_by)\n+ .limit(limit)\n+ .offset(offset)\n+ .where(_build_filter_conjunction(table, filters))\n+ )\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n \n \n+def _build_filter_conjunction(table, filters):\n+ refined_filters = [\n+ (table.columns[col] if type(col) == str else col, value)\n+ for col, value in filters\n+ ]\n+ # We need a default of True (rather than empty), since invoking and_\n+ # without arguments is deprecated.\n+ return and_(True, *[col == value for col, value in refined_filters])\n+\n+\n+def get_distinct_tuple_values(\n+ column_list, engine, table=None, limit=None, offset=None,\n+):\n+ \"\"\"\n+ Returns distinct tuples from a given list of columns.\n+\n+ Args:\n+ column_list: list of column names or SQLAlchemy column objects\n+ engine: SQLAlchemy engine object\n+ table: SQLAlchemy table object\n+ limit: int, gives number of rows to return\n+ offset: int, gives number of rows to skip\n+\n+ If no table is given, the column_list must consist entirely of\n+ SQLAlchemy column objects associated with a table.\n+ \"\"\"\n+ if table is not None:\n+ column_objects = [\n+ table.columns[col] if type(col) == str else col\n+ for col in column_list\n+ ]\n+ else:\n+ column_objects = column_list\n+ try:\n+ assert all([type(col) == Column for col in column_objects])\n+ except AssertionError as e:\n+ logger.error(\"All columns must be str or sqlalchemy.Column type\")\n+ raise e\n+\n+ query = (\n+ select(*column_objects)\n+ .distinct()\n+ .limit(limit)\n+ .offset(offset)\n+ )\n+ with engine.begin() as conn:\n+ res = conn.execute(query).fetchall()\n+ return [tuple(zip(column_objects, row)) for row in res]\n+\n+\n def create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n", "issue": "Get filtered set of records from table\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nAt the moment, when we request records from a table, it's _all_ of the records or none. We should be able to filter a way that supports retrieving \"groups\" of records, where a group is defined as a set of records where some subset of the columns match a set of values, equivalent to the SQL:\r\n\r\n```sql\r\nWHERE col1 = val1 AND col2 = val2 AND ... AND coln = valn\r\n```\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nWe should create a function that lets us filter string columns using the pattern above, and another function that retrieves the distinct tuples for a set of columns (i.e., the groups in this context).\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nThe interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) filtering query each time.\n", "before_files": [{"content": "from sqlalchemy import delete, select\nfrom sqlalchemy.inspection import inspect\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(table, engine, limit=None, offset=None, order_by=[]):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n \"\"\"\n query = select(table).order_by(*order_by).limit(limit).offset(offset)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(table, engine, csv_filename, column_names):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}]}
1,626
799
gh_patches_debug_2891
rasdani/github-patches
git_diff
getsentry__sentry-5094
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Webhook data does not have event id Webhook data contains issue id only. It would be nice to have event id as well. Discussed with @mattrobenolt on IRC. Documenting it here with this issue. </issue> <code> [start of src/sentry/plugins/sentry_webhooks/plugin.py] 1 from __future__ import absolute_import 2 3 import logging 4 import six 5 import sentry 6 7 from django import forms 8 from django.conf import settings 9 from django.utils.translation import ugettext_lazy as _ 10 11 from sentry.exceptions import PluginError 12 from sentry.plugins.bases import notify 13 from sentry.http import is_valid_url, safe_urlopen 14 from sentry.utils.safe import safe_execute 15 16 17 def validate_urls(value, **kwargs): 18 output = [] 19 for url in value.split('\n'): 20 url = url.strip() 21 if not url: 22 continue 23 if not url.startswith(('http://', 'https://')): 24 raise PluginError('Not a valid URL.') 25 if not is_valid_url(url): 26 raise PluginError('Not a valid URL.') 27 output.append(url) 28 return '\n'.join(output) 29 30 31 class WebHooksOptionsForm(notify.NotificationConfigurationForm): 32 urls = forms.CharField( 33 label=_('Callback URLs'), 34 widget=forms.Textarea(attrs={ 35 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}), 36 help_text=_('Enter callback URLs to POST new events to (one per line).')) 37 38 def clean_url(self): 39 value = self.cleaned_data.get('url') 40 return validate_urls(value) 41 42 43 class WebHooksPlugin(notify.NotificationPlugin): 44 author = 'Sentry Team' 45 author_url = 'https://github.com/getsentry/sentry' 46 version = sentry.VERSION 47 description = "Integrates web hooks." 48 resource_links = [ 49 ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'), 50 ('Source', 'https://github.com/getsentry/sentry'), 51 ] 52 53 slug = 'webhooks' 54 title = 'WebHooks' 55 conf_title = title 56 conf_key = 'webhooks' 57 # TODO(dcramer): remove when this is migrated to React 58 project_conf_form = WebHooksOptionsForm 59 timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3) 60 logger = logging.getLogger('sentry.plugins.webhooks') 61 user_agent = 'sentry-webhooks/%s' % version 62 63 def is_configured(self, project, **kwargs): 64 return bool(self.get_option('urls', project)) 65 66 def get_config(self, project, **kwargs): 67 return [{ 68 'name': 'urls', 69 'label': 'Callback URLs', 70 'type': 'textarea', 71 'help': 'Enter callback URLs to POST new events to (one per line).', 72 'placeholder': 'https://sentry.io/callback/url', 73 'validators': [validate_urls], 74 'required': False 75 }] 76 77 def get_group_data(self, group, event): 78 data = { 79 'id': six.text_type(group.id), 80 'project': group.project.slug, 81 'project_name': group.project.name, 82 'logger': event.get_tag('logger'), 83 'level': event.get_tag('level'), 84 'culprit': group.culprit, 85 'message': event.get_legacy_message(), 86 'url': group.get_absolute_url(), 87 } 88 data['event'] = dict(event.data or {}) 89 data['event']['tags'] = event.get_tags() 90 return data 91 92 def get_webhook_urls(self, project): 93 urls = self.get_option('urls', project) 94 if not urls: 95 return () 96 return filter(bool, urls.strip().splitlines()) 97 98 def send_webhook(self, url, payload): 99 return safe_urlopen( 100 url=url, 101 json=payload, 102 timeout=self.timeout, 103 verify_ssl=False, 104 ) 105 106 def notify_users(self, group, event, fail_silently=False): 107 payload = self.get_group_data(group, event) 108 for url in self.get_webhook_urls(group.project): 109 safe_execute(self.send_webhook, url, payload, _with_transaction=False) 110 [end of src/sentry/plugins/sentry_webhooks/plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py --- a/src/sentry/plugins/sentry_webhooks/plugin.py +++ b/src/sentry/plugins/sentry_webhooks/plugin.py @@ -87,6 +87,8 @@ } data['event'] = dict(event.data or {}) data['event']['tags'] = event.get_tags() + data['event']['event_id'] = event.event_id + data['event']['id'] = event.id return data def get_webhook_urls(self, project):
{"golden_diff": "diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py\n--- a/src/sentry/plugins/sentry_webhooks/plugin.py\n+++ b/src/sentry/plugins/sentry_webhooks/plugin.py\n@@ -87,6 +87,8 @@\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n+ data['event']['event_id'] = event.event_id\n+ data['event']['id'] = event.id\n return data\n \n def get_webhook_urls(self, project):\n", "issue": "Webhook data does not have event id\nWebhook data contains issue id only. It would be nice to have event id as well.\r\n\r\nDiscussed with @mattrobenolt on IRC. Documenting it here with this issue.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport six\nimport sentry\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.exceptions import PluginError\nfrom sentry.plugins.bases import notify\nfrom sentry.http import is_valid_url, safe_urlopen\nfrom sentry.utils.safe import safe_execute\n\n\ndef validate_urls(value, **kwargs):\n output = []\n for url in value.split('\\n'):\n url = url.strip()\n if not url:\n continue\n if not url.startswith(('http://', 'https://')):\n raise PluginError('Not a valid URL.')\n if not is_valid_url(url):\n raise PluginError('Not a valid URL.')\n output.append(url)\n return '\\n'.join(output)\n\n\nclass WebHooksOptionsForm(notify.NotificationConfigurationForm):\n urls = forms.CharField(\n label=_('Callback URLs'),\n widget=forms.Textarea(attrs={\n 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),\n help_text=_('Enter callback URLs to POST new events to (one per line).'))\n\n def clean_url(self):\n value = self.cleaned_data.get('url')\n return validate_urls(value)\n\n\nclass WebHooksPlugin(notify.NotificationPlugin):\n author = 'Sentry Team'\n author_url = 'https://github.com/getsentry/sentry'\n version = sentry.VERSION\n description = \"Integrates web hooks.\"\n resource_links = [\n ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),\n ('Source', 'https://github.com/getsentry/sentry'),\n ]\n\n slug = 'webhooks'\n title = 'WebHooks'\n conf_title = title\n conf_key = 'webhooks'\n # TODO(dcramer): remove when this is migrated to React\n project_conf_form = WebHooksOptionsForm\n timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)\n logger = logging.getLogger('sentry.plugins.webhooks')\n user_agent = 'sentry-webhooks/%s' % version\n\n def is_configured(self, project, **kwargs):\n return bool(self.get_option('urls', project))\n\n def get_config(self, project, **kwargs):\n return [{\n 'name': 'urls',\n 'label': 'Callback URLs',\n 'type': 'textarea',\n 'help': 'Enter callback URLs to POST new events to (one per line).',\n 'placeholder': 'https://sentry.io/callback/url',\n 'validators': [validate_urls],\n 'required': False\n }]\n\n def get_group_data(self, group, event):\n data = {\n 'id': six.text_type(group.id),\n 'project': group.project.slug,\n 'project_name': group.project.name,\n 'logger': event.get_tag('logger'),\n 'level': event.get_tag('level'),\n 'culprit': group.culprit,\n 'message': event.get_legacy_message(),\n 'url': group.get_absolute_url(),\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n return data\n\n def get_webhook_urls(self, project):\n urls = self.get_option('urls', project)\n if not urls:\n return ()\n return filter(bool, urls.strip().splitlines())\n\n def send_webhook(self, url, payload):\n return safe_urlopen(\n url=url,\n json=payload,\n timeout=self.timeout,\n verify_ssl=False,\n )\n\n def notify_users(self, group, event, fail_silently=False):\n payload = self.get_group_data(group, event)\n for url in self.get_webhook_urls(group.project):\n safe_execute(self.send_webhook, url, payload, _with_transaction=False)\n", "path": "src/sentry/plugins/sentry_webhooks/plugin.py"}]}
1,632
130
gh_patches_debug_604
rasdani/github-patches
git_diff
pex-tool__pex-1419
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.46 On the docket: + [x] Fix Pip proprietary URL env marker handling. #1417 + [x] Un-reify installed wheel script shebangs. #1410 + [x] Support deterministic repository extract tool. #1411 + [x] support setuptools scripts #1379 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.45" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.45" +__version__ = "2.1.46"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.45\"\n+__version__ = \"2.1.46\"\n", "issue": "Release 2.1.46\nOn the docket:\r\n+ [x] Fix Pip proprietary URL env marker handling. #1417 \r\n+ [x] Un-reify installed wheel script shebangs. #1410\r\n+ [x] Support deterministic repository extract tool. #1411\r\n+ [x] support setuptools scripts #1379\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.45\"\n", "path": "pex/version.py"}]}
665
97
gh_patches_debug_22416
rasdani/github-patches
git_diff
privacyidea__privacyidea-2563
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Container audit fails in 3.5 The container audit will fail in version 3.5 due to a missing parameter in the constructor. https://community.privacyidea.org/t/logging-error-after-update-to-3-5/1811/2 </issue> <code> [start of privacyidea/lib/auditmodules/containeraudit.py] 1 # -*- coding: utf-8 -*- 2 # 3 # 2019-11-07 Cornelius Kölbel <[email protected]> 4 # initial code for writing audit information to a file 5 # 6 # This code is free software; you can redistribute it and/or 7 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE 8 # License as published by the Free Software Foundation; either 9 # version 3 of the License, or any later version. 10 # 11 # This code is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU AFFERO GENERAL PUBLIC LICENSE for more details. 15 # 16 # You should have received a copy of the GNU Affero General Public 17 # License along with this program. If not, see <http://www.gnu.org/licenses/>. 18 # 19 # 20 __doc__ = """The Container Audit Module allows to write audit information to several different 21 audit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the 22 Logger Audit Module. This way audit information can be saved in the SQL database and at the same time 23 be passed to a file or external services via the Python logging facility. 24 25 The Container Audit Module is configured like this: 26 27 PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit' 28 PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit'] 29 PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit' 30 31 You also have to provide the configuration parameters for the referenced audit modules. 32 33 """ 34 35 import logging 36 from privacyidea.lib.auditmodules.base import (Audit as AuditBase) 37 from privacyidea.lib.utils import get_module_class 38 39 40 log = logging.getLogger(__name__) 41 42 43 class Audit(AuditBase): 44 """ 45 This is the ContainerAudit module, which writes the audit entries 46 to a list of audit modules. 47 """ 48 49 def __init__(self, config=None): 50 super(Audit, self).__init__(config) 51 self.name = "containeraudit" 52 write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE') 53 read_conf = self.config.get('PI_AUDIT_CONTAINER_READ') 54 # Initialize all modules 55 self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf] 56 self.read_module = get_module_class(read_conf, "Audit", "log")(config) 57 if not self.read_module.is_readable: 58 log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module)) 59 60 @property 61 def has_data(self): 62 return any([x.has_data for x in self.write_modules]) 63 64 def log(self, param): 65 """ 66 Call the log method for all writeable modules 67 """ 68 for module in self.write_modules: 69 module.log(param) 70 71 def add_to_log(self, param, add_with_comma=False): 72 """ 73 Call the add_to_log method for all writeable modules 74 """ 75 for module in self.write_modules: 76 module.add_to_log(param, add_with_comma) 77 78 def add_policy(self, policyname): 79 """ 80 Call the add_policy method for all writeable modules 81 """ 82 for module in self.write_modules: 83 module.add_policy(policyname) 84 85 def search(self, search_dict, page_size=15, page=1, sortorder="asc", 86 timelimit=None): 87 """ 88 Call the search method for the one readable module 89 """ 90 return self.read_module.search(search_dict, page_size=page_size, page=page, 91 sortorder=sortorder, timelimit=timelimit) 92 93 def get_count(self, search_dict, timedelta=None, success=None): 94 """ 95 Call the count method for the one readable module 96 """ 97 return self.read_module.get_count(search_dict, timedelta=timedelta, success=success) 98 99 def csv_generator(self, param=None, user=None, timelimit=None): 100 """ 101 Call the csv_generator method for the one readable module 102 """ 103 return self.read_module.csv_generator(param=param, user=user, 104 timelimit=timelimit) 105 106 def get_total(self, param, AND=True, display_error=True, timelimit=None): 107 """ 108 Call the total method for the one readable module 109 """ 110 return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit) 111 112 def finalize_log(self): 113 """ 114 Call the finalize method of all writeable audit modules 115 """ 116 for module in self.write_modules: 117 module.finalize_log() 118 [end of privacyidea/lib/auditmodules/containeraudit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py --- a/privacyidea/lib/auditmodules/containeraudit.py +++ b/privacyidea/lib/auditmodules/containeraudit.py @@ -46,14 +46,15 @@ to a list of audit modules. """ - def __init__(self, config=None): - super(Audit, self).__init__(config) + def __init__(self, config=None, startdate=None): + super(Audit, self).__init__(config, startdate) self.name = "containeraudit" write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE') read_conf = self.config.get('PI_AUDIT_CONTAINER_READ') # Initialize all modules - self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf] - self.read_module = get_module_class(read_conf, "Audit", "log")(config) + self.write_modules = [get_module_class(audit_module, "Audit", "log")(config, startdate) + for audit_module in write_conf] + self.read_module = get_module_class(read_conf, "Audit", "log")(config, startdate) if not self.read_module.is_readable: log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module))
{"golden_diff": "diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py\n--- a/privacyidea/lib/auditmodules/containeraudit.py\n+++ b/privacyidea/lib/auditmodules/containeraudit.py\n@@ -46,14 +46,15 @@\n to a list of audit modules.\n \"\"\"\n \n- def __init__(self, config=None):\n- super(Audit, self).__init__(config)\n+ def __init__(self, config=None, startdate=None):\n+ super(Audit, self).__init__(config, startdate)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n- self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n- self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n+ self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config, startdate)\n+ for audit_module in write_conf]\n+ self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config, startdate)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n", "issue": "Container audit fails in 3.5\nThe container audit will fail in version 3.5 due to a missing parameter in the constructor.\r\n\r\nhttps://community.privacyidea.org/t/logging-error-after-update-to-3-5/1811/2\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2019-11-07 Cornelius K\u00f6lbel <[email protected]>\n# initial code for writing audit information to a file\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"The Container Audit Module allows to write audit information to several different\naudit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the \nLogger Audit Module. This way audit information can be saved in the SQL database and at the same time\nbe passed to a file or external services via the Python logging facility. \n\nThe Container Audit Module is configured like this:\n\n PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'\n PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']\n PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'\n\nYou also have to provide the configuration parameters for the referenced audit modules.\n\n\"\"\"\n\nimport logging\nfrom privacyidea.lib.auditmodules.base import (Audit as AuditBase)\nfrom privacyidea.lib.utils import get_module_class\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Audit(AuditBase):\n \"\"\"\n This is the ContainerAudit module, which writes the audit entries\n to a list of audit modules.\n \"\"\"\n\n def __init__(self, config=None):\n super(Audit, self).__init__(config)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n\n @property\n def has_data(self):\n return any([x.has_data for x in self.write_modules])\n\n def log(self, param):\n \"\"\"\n Call the log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.log(param)\n\n def add_to_log(self, param, add_with_comma=False):\n \"\"\"\n Call the add_to_log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_to_log(param, add_with_comma)\n\n def add_policy(self, policyname):\n \"\"\"\n Call the add_policy method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_policy(policyname)\n\n def search(self, search_dict, page_size=15, page=1, sortorder=\"asc\",\n timelimit=None):\n \"\"\"\n Call the search method for the one readable module\n \"\"\"\n return self.read_module.search(search_dict, page_size=page_size, page=page,\n sortorder=sortorder, timelimit=timelimit)\n\n def get_count(self, search_dict, timedelta=None, success=None):\n \"\"\"\n Call the count method for the one readable module\n \"\"\"\n return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)\n\n def csv_generator(self, param=None, user=None, timelimit=None):\n \"\"\"\n Call the csv_generator method for the one readable module\n \"\"\"\n return self.read_module.csv_generator(param=param, user=user,\n timelimit=timelimit)\n\n def get_total(self, param, AND=True, display_error=True, timelimit=None):\n \"\"\"\n Call the total method for the one readable module\n \"\"\"\n return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)\n\n def finalize_log(self):\n \"\"\"\n Call the finalize method of all writeable audit modules\n \"\"\"\n for module in self.write_modules:\n module.finalize_log()\n", "path": "privacyidea/lib/auditmodules/containeraudit.py"}]}
1,864
313
gh_patches_debug_47859
rasdani/github-patches
git_diff
saleor__saleor-903
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Server Error (500) when adding attribute Hi, First of all thanks for this excellent software, makes my life easier. I deployed it on heroku using the heroku elements (https://elements.heroku.com/buttons/mirumee/saleor). Then I tried to add an attribute but that resulted in Server Error (500) page. Is this expected behavior? Any settings that have to be changed? If yes then any way to do that on heroku? Thanks! </issue> <code> [start of saleor/product/models/utils.py] 1 from django.utils.encoding import smart_text 2 3 4 def get_attributes_display_map(obj, attributes): 5 display_map = {} 6 for attribute in attributes: 7 value = obj.attributes.get(smart_text(attribute.pk)) 8 if value: 9 choices = {smart_text(a.pk): a for a in attribute.values.all()} 10 choice_obj = choices.get(value) 11 if choice_obj: 12 display_map[attribute.pk] = choice_obj 13 else: 14 display_map[attribute.pk] = value_pk 15 return display_map 16 [end of saleor/product/models/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/product/models/utils.py b/saleor/product/models/utils.py --- a/saleor/product/models/utils.py +++ b/saleor/product/models/utils.py @@ -11,5 +11,5 @@ if choice_obj: display_map[attribute.pk] = choice_obj else: - display_map[attribute.pk] = value_pk + display_map[attribute.pk] = value return display_map
{"golden_diff": "diff --git a/saleor/product/models/utils.py b/saleor/product/models/utils.py\n--- a/saleor/product/models/utils.py\n+++ b/saleor/product/models/utils.py\n@@ -11,5 +11,5 @@\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n- display_map[attribute.pk] = value_pk\n+ display_map[attribute.pk] = value\n return display_map\n", "issue": "Server Error (500) when adding attribute\nHi,\r\n\r\nFirst of all thanks for this excellent software, makes my life easier.\r\n\r\nI deployed it on heroku using the heroku elements (https://elements.heroku.com/buttons/mirumee/saleor).\r\n\r\nThen I tried to add an attribute but that resulted in Server Error (500) page. Is this expected behavior? Any settings that have to be changed? If yes then any way to do that on heroku?\r\n\r\nThanks!\r\n\n", "before_files": [{"content": "from django.utils.encoding import smart_text\n\n\ndef get_attributes_display_map(obj, attributes):\n display_map = {}\n for attribute in attributes:\n value = obj.attributes.get(smart_text(attribute.pk))\n if value:\n choices = {smart_text(a.pk): a for a in attribute.values.all()}\n choice_obj = choices.get(value)\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n display_map[attribute.pk] = value_pk\n return display_map\n", "path": "saleor/product/models/utils.py"}]}
771
100
gh_patches_debug_48735
rasdani/github-patches
git_diff
microsoft__torchgeo-309
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Open in Colab URL broken in 0.1.1 The latest 0.1.1 release broke the "Open in Colab" URL in our tutorials. Still trying to fix this. </issue> <code> [start of docs/conf.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 # Configuration file for the Sphinx documentation builder. 5 # 6 # This file only contains a selection of the most common options. For a full 7 # list see the documentation: 8 # https://www.sphinx-doc.org/en/master/usage/configuration.html 9 10 # -- Path setup -------------------------------------------------------------- 11 12 import os 13 import sys 14 15 import pytorch_sphinx_theme 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 sys.path.insert(0, os.path.abspath("..")) 21 22 import torchgeo # noqa: E402 23 24 # -- Project information ----------------------------------------------------- 25 26 project = "torchgeo" 27 copyright = "2021, Microsoft Corporation" 28 author = torchgeo.__author__ 29 version = ".".join(torchgeo.__version__.split(".")[:2]) 30 release = torchgeo.__version__ 31 32 33 # -- General configuration --------------------------------------------------- 34 35 # Add any Sphinx extension module names here, as strings. They can be 36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 # ones. 38 extensions = [ 39 "sphinx.ext.autodoc", 40 "sphinx.ext.intersphinx", 41 "sphinx.ext.napoleon", 42 "sphinx.ext.todo", 43 "sphinx.ext.viewcode", 44 "nbsphinx", 45 ] 46 47 # List of patterns, relative to source directory, that match files and 48 # directories to ignore when looking for source files. 49 # This pattern also affects html_static_path and html_extra_path. 50 exclude_patterns = ["_build"] 51 52 # Sphinx 3.0+ required for: 53 # autodoc_typehints = "description" 54 needs_sphinx = "3.0" 55 56 nitpicky = True 57 nitpick_ignore = [ 58 # https://github.com/sphinx-doc/sphinx/issues/8127 59 ("py:class", ".."), 60 # TODO: can't figure out why this isn't found 61 ("py:class", "LightningDataModule"), 62 # Undocumented class 63 ("py:class", "torchvision.models.resnet.ResNet"), 64 ] 65 66 67 # -- Options for HTML output ------------------------------------------------- 68 69 # The theme to use for HTML and HTML Help pages. See the documentation for 70 # a list of builtin themes. 71 html_theme = "pytorch_sphinx_theme" 72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] 73 74 # Theme options are theme-specific and customize the look and feel of a theme 75 # further. For a list of options available for each theme, see the 76 # documentation. 77 html_theme_options = { 78 "collapse_navigation": False, 79 "display_version": True, 80 "logo_only": True, 81 "pytorch_project": "docs", 82 "navigation_with_keys": True, 83 "analytics_id": "UA-209075005-1", 84 } 85 86 html_favicon = os.path.join("..", "logo", "favicon.ico") 87 88 html_static_path = ["_static"] 89 html_css_files = ["workaround.css"] 90 91 # -- Extension configuration ------------------------------------------------- 92 93 # sphinx.ext.autodoc 94 autodoc_default_options = { 95 "members": True, 96 "special-members": True, 97 "show-inheritance": True, 98 } 99 autodoc_member_order = "bysource" 100 autodoc_typehints = "description" 101 102 # sphinx.ext.intersphinx 103 intersphinx_mapping = { 104 "matplotlib": ("https://matplotlib.org/stable/", None), 105 "python": ("https://docs.python.org/3", None), 106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None), 107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None), 108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None), 109 "torch": ("https://pytorch.org/docs/stable", None), 110 "torchvision": ("https://pytorch.org/vision/stable", None), 111 } 112 113 # nbsphinx 114 nbsphinx_execute = "never" 115 # TODO: branch/tag should change depending on which version of docs you look at 116 # TODO: width option of image directive is broken, see: 117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140 118 nbsphinx_prolog = """ 119 {% set colab = "https://colab.research.google.com" %} 120 {% set repo = "microsoft/torchgeo" %} 121 {% set branch = "main" %} 122 123 .. image:: {{ colab }}/assets/colab-badge.svg 124 :class: colabbadge 125 :alt: Open in Colab 126 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb 127 """ 128 129 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme 130 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599 131 # NOTE: This will likely break nbsphinx widgets 132 nbsphinx_requirejs_path = "" 133 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -118,7 +118,11 @@ nbsphinx_prolog = """ {% set colab = "https://colab.research.google.com" %} {% set repo = "microsoft/torchgeo" %} -{% set branch = "main" %} +{% if "dev" in env.config.release %} + {% set branch = "main" %} +{% else %} + {% set branch = "releases/v" ~ env.config.version %} +{% endif %} .. image:: {{ colab }}/assets/colab-badge.svg :class: colabbadge
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -118,7 +118,11 @@\n nbsphinx_prolog = \"\"\"\n {% set colab = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n-{% set branch = \"main\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases/v\" ~ env.config.version %}\n+{% endif %}\n \n .. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n", "issue": "Open in Colab URL broken in 0.1.1\nThe latest 0.1.1 release broke the \"Open in Colab\" URL in our tutorials. Still trying to fix this.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% set branch = \"main\" %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]}
1,972
152
gh_patches_debug_18980
rasdani/github-patches
git_diff
mne-tools__mne-bids-320
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [joss] list supported python versions in pypi piggy-backing off of one of @TomDonoghue's comments: > Is there a minimum version of Python3 required? I guess that there is some minimum required sub-version of Py3 required, but it's not clear what that would be, and it might be useful to note that. you can add supported python versions for pypi, you can add these: ``` Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 ``` [in your setup.py](https://github.com/mne-tools/mne-bids/blob/20fbb881afaab75206db3d8c2d4b226c4c3bc212/setup.py#L50) you can also set [`python_requires`](https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires) in `setup.py` to ensure people do not use the incorrect python version with your package. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 """Setup MNE-BIDS.""" 3 import os 4 from setuptools import setup, find_packages 5 6 # get the version 7 version = None 8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid: 9 for line in (line.strip() for line in fid): 10 if line.startswith('__version__'): 11 version = line.split('=')[1].strip().strip('\'') 12 break 13 if version is None: 14 raise RuntimeError('Could not determine version') 15 16 17 descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS ' 18 'specification and facilitating their analysis with MNE-Python') 19 20 DISTNAME = 'mne-bids' 21 DESCRIPTION = descr 22 MAINTAINER = 'Mainak Jas' 23 MAINTAINER_EMAIL = '[email protected]' 24 URL = 'https://mne.tools/mne-bids/' 25 LICENSE = 'BSD (3-clause)' 26 DOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git' 27 VERSION = version 28 29 if __name__ == "__main__": 30 setup(name=DISTNAME, 31 maintainer=MAINTAINER, 32 maintainer_email=MAINTAINER_EMAIL, 33 description=DESCRIPTION, 34 license=LICENSE, 35 url=URL, 36 version=VERSION, 37 download_url=DOWNLOAD_URL, 38 long_description=open('README.rst').read(), 39 long_description_content_type='text/x-rst', 40 classifiers=[ 41 'Intended Audience :: Science/Research', 42 'Intended Audience :: Developers', 43 'License :: OSI Approved', 44 'Programming Language :: Python', 45 'Topic :: Software Development', 46 'Topic :: Scientific/Engineering', 47 'Operating System :: Microsoft :: Windows', 48 'Operating System :: POSIX', 49 'Operating System :: Unix', 50 'Operating System :: MacOS', 51 ], 52 platforms='any', 53 packages=find_packages(), 54 entry_points={'console_scripts': [ 55 'mne_bids = mne_bids.commands.run:main', 56 ]}, 57 project_urls={ 58 'Documentation': 'https://mne.tools/mne-bids', 59 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues', 60 'Source': 'https://github.com/mne-tools/mne-bids', 61 }, 62 ) 63 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ download_url=DOWNLOAD_URL, long_description=open('README.rst').read(), long_description_content_type='text/x-rst', + python_requires='~=3.5', classifiers=[ 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', @@ -48,6 +49,9 @@ 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', ], platforms='any', packages=find_packages(),
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n+ python_requires='~=3.5',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n@@ -48,6 +49,9 @@\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n ],\n platforms='any',\n packages=find_packages(),\n", "issue": "[joss] list supported python versions in pypi\npiggy-backing off of one of @TomDonoghue's comments:\r\n> Is there a minimum version of Python3 required? I guess that there is some minimum required sub-version of Py3 required, but it's not clear what that would be, and it might be useful to note that.\r\n\r\nyou can add supported python versions for pypi, you can add these:\r\n```\r\nProgramming Language :: Python :: 3.5\r\nProgramming Language :: Python :: 3.6\r\nProgramming Language :: Python :: 3.7\r\n```\r\n[in your setup.py](https://github.com/mne-tools/mne-bids/blob/20fbb881afaab75206db3d8c2d4b226c4c3bc212/setup.py#L50)\r\nyou can also set [`python_requires`](https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires) in `setup.py` to ensure people do not use the incorrect python version with your package.\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n 'specification and facilitating their analysis with MNE-Python')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n entry_points={'console_scripts': [\n 'mne_bids = mne_bids.commands.run:main',\n ]},\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}]}
1,367
177
gh_patches_debug_18943
rasdani/github-patches
git_diff
ansible__ansible-lint-996
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E208 is too aggressive ##### Summary When E208 is triggered, it should be sure that a file is being created. There are modules which only optionally create the file and, when not used in that form, then they should not require a mode be set. Such an example are ini_file, lineinfile, or blockinfile. These modules are frequently used in their default mode with `create: false`. Someone should not be required to set `mode: "0644"` or such in this situation. ##### Issue Type - Bug Report ##### Ansible and Ansible Lint details <!--- Paste verbatim output between tripple backticks --> ```console (paste below) ansible --version ansible 2.9.12 config file = /home/greg/.ansible.cfg configured module search path = ['/home/greg/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/greg/src/ansible_collections/devroles/system/.tox/lint_all/lib/python3.8/site-packages/ansible executable location = .tox/lint_all/bin/ansible python version = 3.8.5 (default, Aug 12 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)] ansible-lint --version ansible-lint 4.3.1 ``` - ansible installation method: one of source, pip, OS package pip - ansible-lint installation method: one of source, pip, OS package pip ##### OS / ENVIRONMENT Fedora 32 ##### STEPS TO REPRODUCE ```yaml - name: should not produce E208 lineinfile: line: "# some content here" file: "{{ ansible_user_dir }}/.bashrc" - name: SHOULD produce E208 lineinfile: line: "# more content file: "{{ ansible_user_dir }}/.bash_profile" create: true ``` ##### Desired Behaviour ansible-lint should not error when it encounters a missing "mode" parameter for these modules, when `create: false`. Note that `create: false` is the default behavior of those modules ##### Actual Behaviour ansible-lint throws E208 on both of the above steps </issue> <code> [start of lib/ansiblelint/rules/MissingFilePermissionsRule.py] 1 # Copyright (c) 2020 Sorin Sbarnea <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 from ansiblelint.rules import AnsibleLintRule 21 22 23 class MissingFilePermissionsRule(AnsibleLintRule): 24 id = "208" 25 shortdesc = 'File permissions not mentioned' 26 description = ( 27 "Missing mode parameter can cause unexpected file permissions based " 28 "on version of Ansible being used. Be explicit, or if you still " 29 "want the default behavior you can use ``mode: preserve`` to avoid " 30 "hitting this rule. See " 31 "https://github.com/ansible/ansible/issues/71200" 32 ) 33 severity = 'VERY_HIGH' 34 tags = ['unpredictability'] 35 version_added = 'v4.3.0' 36 37 _modules = ( 38 'copy', 39 'file', 40 'ini_file', 41 'lineinfile', 42 'replace', 43 'template', 44 'unarchive', 45 ) 46 47 def matchtask(self, file, task): 48 if task["action"]["__ansible_module__"] not in self._modules: 49 return False 50 51 if task['action'].get('state', None) == "absent": 52 return False 53 54 if task['action'].get('state', None) == "link": 55 return False 56 57 mode = task['action'].get('mode', None) 58 return mode is None 59 [end of lib/ansiblelint/rules/MissingFilePermissionsRule.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansiblelint/rules/MissingFilePermissionsRule.py b/lib/ansiblelint/rules/MissingFilePermissionsRule.py --- a/lib/ansiblelint/rules/MissingFilePermissionsRule.py +++ b/lib/ansiblelint/rules/MissingFilePermissionsRule.py @@ -35,17 +35,28 @@ version_added = 'v4.3.0' _modules = ( + 'assemble', + 'archive', 'copy', 'file', - 'ini_file', - 'lineinfile', 'replace', 'template', 'unarchive', ) + _modules_with_create = ( + 'blockinfile', + 'ini_file', + 'lineinfile' + ) + def matchtask(self, file, task): - if task["action"]["__ansible_module__"] not in self._modules: + if task["action"]["__ansible_module__"] not in self._modules and \ + task["action"]["__ansible_module__"] not in self._modules_with_create: + return False + + if task["action"]["__ansible_module__"] in self._modules_with_create and \ + not task["action"].get("create", False): return False if task['action'].get('state', None) == "absent":
{"golden_diff": "diff --git a/lib/ansiblelint/rules/MissingFilePermissionsRule.py b/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n--- a/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n+++ b/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n@@ -35,17 +35,28 @@\n version_added = 'v4.3.0'\n \n _modules = (\n+ 'assemble',\n+ 'archive',\n 'copy',\n 'file',\n- 'ini_file',\n- 'lineinfile',\n 'replace',\n 'template',\n 'unarchive',\n )\n \n+ _modules_with_create = (\n+ 'blockinfile',\n+ 'ini_file',\n+ 'lineinfile'\n+ )\n+\n def matchtask(self, file, task):\n- if task[\"action\"][\"__ansible_module__\"] not in self._modules:\n+ if task[\"action\"][\"__ansible_module__\"] not in self._modules and \\\n+ task[\"action\"][\"__ansible_module__\"] not in self._modules_with_create:\n+ return False\n+\n+ if task[\"action\"][\"__ansible_module__\"] in self._modules_with_create and \\\n+ not task[\"action\"].get(\"create\", False):\n return False\n \n if task['action'].get('state', None) == \"absent\":\n", "issue": "E208 is too aggressive\n##### Summary\r\nWhen E208 is triggered, it should be sure that a file is being created. There are modules which only optionally create the file and, when not used in that form, then they should not require a mode be set. Such an example are ini_file, lineinfile, or blockinfile. These modules are frequently used in their default mode with `create: false`. Someone should not be required to set `mode: \"0644\"` or such in this situation.\r\n\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between tripple backticks -->\r\n```console (paste below)\r\nansible --version\r\nansible 2.9.12\r\n config file = /home/greg/.ansible.cfg\r\n configured module search path = ['/home/greg/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/greg/src/ansible_collections/devroles/system/.tox/lint_all/lib/python3.8/site-packages/ansible\r\n executable location = .tox/lint_all/bin/ansible\r\n python version = 3.8.5 (default, Aug 12 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]\r\n\r\nansible-lint --version\r\nansible-lint 4.3.1\r\n```\r\n\r\n- ansible installation method: one of source, pip, OS package\r\npip\r\n- ansible-lint installation method: one of source, pip, OS package\r\npip\r\n\r\n##### OS / ENVIRONMENT\r\nFedora 32\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n```yaml\r\n- name: should not produce E208\r\n lineinfile:\r\n line: \"# some content here\"\r\n file: \"{{ ansible_user_dir }}/.bashrc\"\r\n\r\n- name: SHOULD produce E208\r\n lineinfile:\r\n line: \"# more content\r\n file: \"{{ ansible_user_dir }}/.bash_profile\"\r\n create: true\r\n```\r\n##### Desired Behaviour\r\nansible-lint should not error when it encounters a missing \"mode\" parameter for these modules, when `create: false`. Note that `create: false` is the default behavior of those modules\r\n\r\n##### Actual Behaviour\r\nansible-lint throws E208 on both of the above steps\n", "before_files": [{"content": "# Copyright (c) 2020 Sorin Sbarnea <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom ansiblelint.rules import AnsibleLintRule\n\n\nclass MissingFilePermissionsRule(AnsibleLintRule):\n id = \"208\"\n shortdesc = 'File permissions not mentioned'\n description = (\n \"Missing mode parameter can cause unexpected file permissions based \"\n \"on version of Ansible being used. Be explicit, or if you still \"\n \"want the default behavior you can use ``mode: preserve`` to avoid \"\n \"hitting this rule. See \"\n \"https://github.com/ansible/ansible/issues/71200\"\n )\n severity = 'VERY_HIGH'\n tags = ['unpredictability']\n version_added = 'v4.3.0'\n\n _modules = (\n 'copy',\n 'file',\n 'ini_file',\n 'lineinfile',\n 'replace',\n 'template',\n 'unarchive',\n )\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] not in self._modules:\n return False\n\n if task['action'].get('state', None) == \"absent\":\n return False\n\n if task['action'].get('state', None) == \"link\":\n return False\n\n mode = task['action'].get('mode', None)\n return mode is None\n", "path": "lib/ansiblelint/rules/MissingFilePermissionsRule.py"}]}
1,705
291
gh_patches_debug_22774
rasdani/github-patches
git_diff
vispy__vispy-1596
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> XYZAxisVisuals Override Defaults It looks like XYZAxisVisual is not overridable in the **init** function for the verts and color arguments? Passing in `pos=my_custom_verts` results in `TypeError: __init__() got multiple values for keyword argument 'pos'`. The `**kwds` argument looks like it is being passed through to the Line class, via LineVisual. Does a method exist to specify the verts, color, and / or connect kwargs? I am hesitant to submit a PR modifying **kwds since I am not 100% sure how the passing is working. </issue> <code> [start of vispy/visuals/xyz_axis.py] 1 2 import numpy as np 3 4 from .line import LineVisual 5 6 7 class XYZAxisVisual(LineVisual): 8 """ 9 Simple 3D axis for indicating coordinate system orientation. Axes are 10 x=red, y=green, z=blue. 11 """ 12 def __init__(self, **kwargs): 13 verts = np.array([[0, 0, 0], 14 [1, 0, 0], 15 [0, 0, 0], 16 [0, 1, 0], 17 [0, 0, 0], 18 [0, 0, 1]]) 19 color = np.array([[1, 0, 0, 1], 20 [1, 0, 0, 1], 21 [0, 1, 0, 1], 22 [0, 1, 0, 1], 23 [0, 0, 1, 1], 24 [0, 0, 1, 1]]) 25 LineVisual.__init__(self, pos=verts, color=color, connect='segments', 26 method='gl', **kwargs) 27 [end of vispy/visuals/xyz_axis.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vispy/visuals/xyz_axis.py b/vispy/visuals/xyz_axis.py --- a/vispy/visuals/xyz_axis.py +++ b/vispy/visuals/xyz_axis.py @@ -10,17 +10,24 @@ x=red, y=green, z=blue. """ def __init__(self, **kwargs): - verts = np.array([[0, 0, 0], - [1, 0, 0], - [0, 0, 0], - [0, 1, 0], - [0, 0, 0], - [0, 0, 1]]) + pos = np.array([[0, 0, 0], + [1, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 1]]) color = np.array([[1, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 1, 1]]) - LineVisual.__init__(self, pos=verts, color=color, connect='segments', - method='gl', **kwargs) + connect = 'segments' + method = 'gl' + + kwargs.setdefault('pos', pos) + kwargs.setdefault('color', color) + kwargs.setdefault('connect', connect) + kwargs.setdefault('method', method) + + LineVisual.__init__(self, **kwargs)
{"golden_diff": "diff --git a/vispy/visuals/xyz_axis.py b/vispy/visuals/xyz_axis.py\n--- a/vispy/visuals/xyz_axis.py\n+++ b/vispy/visuals/xyz_axis.py\n@@ -10,17 +10,24 @@\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n- verts = np.array([[0, 0, 0],\n- [1, 0, 0],\n- [0, 0, 0],\n- [0, 1, 0],\n- [0, 0, 0],\n- [0, 0, 1]])\n+ pos = np.array([[0, 0, 0],\n+ [1, 0, 0],\n+ [0, 0, 0],\n+ [0, 1, 0],\n+ [0, 0, 0],\n+ [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n- LineVisual.__init__(self, pos=verts, color=color, connect='segments',\n- method='gl', **kwargs)\n+ connect = 'segments'\n+ method = 'gl'\n+\n+ kwargs.setdefault('pos', pos)\n+ kwargs.setdefault('color', color)\n+ kwargs.setdefault('connect', connect)\n+ kwargs.setdefault('method', method)\n+\n+ LineVisual.__init__(self, **kwargs)\n", "issue": "XYZAxisVisuals Override Defaults\nIt looks like XYZAxisVisual is not overridable in the **init** function for the verts and color arguments? Passing in `pos=my_custom_verts` results in `TypeError: __init__() got multiple values for keyword argument 'pos'`.\n\nThe `**kwds` argument looks like it is being passed through to the Line class, via LineVisual. Does a method exist to specify the verts, color, and / or connect kwargs? I am hesitant to submit a PR modifying **kwds since I am not 100% sure how the passing is working.\n\n", "before_files": [{"content": "\nimport numpy as np\n\nfrom .line import LineVisual\n\n\nclass XYZAxisVisual(LineVisual):\n \"\"\"\n Simple 3D axis for indicating coordinate system orientation. Axes are\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n verts = np.array([[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n LineVisual.__init__(self, pos=verts, color=color, connect='segments',\n method='gl', **kwargs)\n", "path": "vispy/visuals/xyz_axis.py"}]}
954
407
gh_patches_debug_30150
rasdani/github-patches
git_diff
ocf__ocfweb-141
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docs don't auto-reload in dev Because we only read docs once, changes to the Markdown files require manually killing and restarting the server. </issue> <code> [start of ocfweb/docs/markdown_based.py] 1 """Documents backed by Markdown. 2 3 This is the most common type of doc. It reads a Markdown fil and renders it in 4 a standard template. 5 6 Markdown documents can specify the meta attributes: 7 8 - [[!meta title="Page title"]] 9 Changes the page title; all templates must specify this. 10 11 - [[!meta template="my_template.html"]] 12 Changes the page template; most templates should *not* specify this, 13 unless they want to customize something (e.g. the sidebar) 14 """ 15 import os 16 from functools import partial 17 from pathlib import Path 18 19 from django.shortcuts import render 20 21 from ocfweb.component.markdown import markdown_and_toc 22 from ocfweb.component.markdown import text_and_meta 23 from ocfweb.docs.doc import Document 24 25 26 DOCS_DIR = Path(__file__).parent.joinpath('docs') 27 28 29 def render_markdown_doc(meta, text, doc, request): 30 html, toc = markdown_and_toc(text) 31 return render( 32 request, 33 meta.get('template', 'doc.html'), 34 { 35 'title': doc.title, 36 'doc': doc, 37 'html': html, 38 'toc': toc, 39 }, 40 ) 41 42 43 def get_markdown_docs(): 44 for path in DOCS_DIR.glob('**/*.md'): 45 name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR))) 46 47 # sanity check that the file is under the directory we expect 48 assert DOCS_DIR in path.parents 49 50 with path.open() as f: 51 text, meta = text_and_meta(f) 52 53 if 'title' not in meta: 54 raise ValueError('Document {} lacks required title meta variable.'.format(name)) 55 56 yield Document( 57 name='/' + name, 58 title=meta['title'], 59 render=partial(render_markdown_doc, meta, text) 60 ) 61 [end of ocfweb/docs/markdown_based.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ocfweb/docs/markdown_based.py b/ocfweb/docs/markdown_based.py --- a/ocfweb/docs/markdown_based.py +++ b/ocfweb/docs/markdown_based.py @@ -1,6 +1,6 @@ """Documents backed by Markdown. -This is the most common type of doc. It reads a Markdown fil and renders it in +This is the most common type of doc. It reads a Markdown file and renders it in a standard template. Markdown documents can specify the meta attributes: @@ -16,6 +16,7 @@ from functools import partial from pathlib import Path +from django.conf import settings from django.shortcuts import render from ocfweb.component.markdown import markdown_and_toc @@ -26,13 +27,20 @@ DOCS_DIR = Path(__file__).parent.joinpath('docs') -def render_markdown_doc(meta, text, doc, request): +def render_markdown_doc(path, meta, text, doc, request): + + # Reload markdown docs if in development + if settings.DEBUG: + with path.open() as f: + text, meta = text_and_meta(f) + html, toc = markdown_and_toc(text) + return render( request, meta.get('template', 'doc.html'), { - 'title': doc.title, + 'title': meta['title'], 'doc': doc, 'html': html, 'toc': toc, @@ -56,5 +64,5 @@ yield Document( name='/' + name, title=meta['title'], - render=partial(render_markdown_doc, meta, text) + render=partial(render_markdown_doc, path, meta, text), )
{"golden_diff": "diff --git a/ocfweb/docs/markdown_based.py b/ocfweb/docs/markdown_based.py\n--- a/ocfweb/docs/markdown_based.py\n+++ b/ocfweb/docs/markdown_based.py\n@@ -1,6 +1,6 @@\n \"\"\"Documents backed by Markdown.\n \n-This is the most common type of doc. It reads a Markdown fil and renders it in\n+This is the most common type of doc. It reads a Markdown file and renders it in\n a standard template.\n \n Markdown documents can specify the meta attributes:\n@@ -16,6 +16,7 @@\n from functools import partial\n from pathlib import Path\n \n+from django.conf import settings\n from django.shortcuts import render\n \n from ocfweb.component.markdown import markdown_and_toc\n@@ -26,13 +27,20 @@\n DOCS_DIR = Path(__file__).parent.joinpath('docs')\n \n \n-def render_markdown_doc(meta, text, doc, request):\n+def render_markdown_doc(path, meta, text, doc, request):\n+\n+ # Reload markdown docs if in development\n+ if settings.DEBUG:\n+ with path.open() as f:\n+ text, meta = text_and_meta(f)\n+\n html, toc = markdown_and_toc(text)\n+\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n- 'title': doc.title,\n+ 'title': meta['title'],\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n@@ -56,5 +64,5 @@\n yield Document(\n name='/' + name,\n title=meta['title'],\n- render=partial(render_markdown_doc, meta, text)\n+ render=partial(render_markdown_doc, path, meta, text),\n )\n", "issue": "Docs don't auto-reload in dev\nBecause we only read docs once, changes to the Markdown files require manually killing and restarting the server.\n\n", "before_files": [{"content": "\"\"\"Documents backed by Markdown.\n\nThis is the most common type of doc. It reads a Markdown fil and renders it in\na standard template.\n\nMarkdown documents can specify the meta attributes:\n\n - [[!meta title=\"Page title\"]]\n Changes the page title; all templates must specify this.\n\n - [[!meta template=\"my_template.html\"]]\n Changes the page template; most templates should *not* specify this,\n unless they want to customize something (e.g. the sidebar)\n\"\"\"\nimport os\nfrom functools import partial\nfrom pathlib import Path\n\nfrom django.shortcuts import render\n\nfrom ocfweb.component.markdown import markdown_and_toc\nfrom ocfweb.component.markdown import text_and_meta\nfrom ocfweb.docs.doc import Document\n\n\nDOCS_DIR = Path(__file__).parent.joinpath('docs')\n\n\ndef render_markdown_doc(meta, text, doc, request):\n html, toc = markdown_and_toc(text)\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n 'title': doc.title,\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n },\n )\n\n\ndef get_markdown_docs():\n for path in DOCS_DIR.glob('**/*.md'):\n name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR)))\n\n # sanity check that the file is under the directory we expect\n assert DOCS_DIR in path.parents\n\n with path.open() as f:\n text, meta = text_and_meta(f)\n\n if 'title' not in meta:\n raise ValueError('Document {} lacks required title meta variable.'.format(name))\n\n yield Document(\n name='/' + name,\n title=meta['title'],\n render=partial(render_markdown_doc, meta, text)\n )\n", "path": "ocfweb/docs/markdown_based.py"}]}
1,073
391
gh_patches_debug_12226
rasdani/github-patches
git_diff
googleapis__python-bigquery-643
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> deps: expand extras to support pyarrow v4 We're actually already testing with pyarrow v4 in some of the samples tests, so this should be safe to expand in our `setup.py` </issue> <code> [start of setup.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = "google-cloud-bigquery" 24 description = "Google BigQuery API client library" 25 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = "Development Status :: 5 - Production/Stable" 31 dependencies = [ 32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev", 33 "proto-plus >= 1.10.0", 34 "google-cloud-core >= 1.4.1, < 2.0dev", 35 "google-resumable-media >= 0.6.0, < 2.0dev", 36 "packaging >= 14.3", 37 "protobuf >= 3.12.0", 38 "requests >= 2.18.0, < 3.0.0dev", 39 ] 40 extras = { 41 "bqstorage": [ 42 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev", 43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not 44 # installed, even though `google-cloud-bigquery-storage` specifies it 45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here. 46 # See: https://github.com/googleapis/python-bigquery/issues/83 The 47 # grpc.Channel.close() method isn't added until 1.32.0. 48 # https://github.com/grpc/grpc/pull/15254 49 "grpcio >= 1.32.0, < 2.0dev", 50 "pyarrow >= 1.0.0, < 4.0dev", 51 ], 52 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"], 53 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"], 54 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], 55 "opentelemetry": [ 56 "opentelemetry-api >= 0.11b0", 57 "opentelemetry-sdk >= 0.11b0", 58 "opentelemetry-instrumentation >= 0.11b0", 59 ], 60 } 61 62 all_extras = [] 63 64 for extra in extras: 65 # Exclude this extra from all to avoid overly strict dependencies on core 66 # libraries such as pyarrow. 67 # https://github.com/googleapis/python-bigquery/issues/563 68 if extra in {"bignumeric_type"}: 69 continue 70 all_extras.extend(extras[extra]) 71 72 extras["all"] = all_extras 73 74 # Setup boilerplate below this line. 75 76 package_root = os.path.abspath(os.path.dirname(__file__)) 77 78 readme_filename = os.path.join(package_root, "README.rst") 79 with io.open(readme_filename, encoding="utf-8") as readme_file: 80 readme = readme_file.read() 81 82 version = {} 83 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp: 84 exec(fp.read(), version) 85 version = version["__version__"] 86 87 # Only include packages under the 'google' namespace. Do not include tests, 88 # benchmarks, etc. 89 packages = [ 90 package 91 for package in setuptools.PEP420PackageFinder.find() 92 if package.startswith("google") 93 ] 94 95 # Determine which namespaces are needed. 96 namespaces = ["google"] 97 if "google.cloud" in packages: 98 namespaces.append("google.cloud") 99 100 101 setuptools.setup( 102 name=name, 103 version=version, 104 description=description, 105 long_description=readme, 106 author="Google LLC", 107 author_email="[email protected]", 108 license="Apache 2.0", 109 url="https://github.com/googleapis/python-bigquery", 110 classifiers=[ 111 release_status, 112 "Intended Audience :: Developers", 113 "License :: OSI Approved :: Apache Software License", 114 "Programming Language :: Python", 115 "Programming Language :: Python :: 3", 116 "Programming Language :: Python :: 3.6", 117 "Programming Language :: Python :: 3.7", 118 "Programming Language :: Python :: 3.8", 119 "Programming Language :: Python :: 3.9", 120 "Operating System :: OS Independent", 121 "Topic :: Internet", 122 ], 123 platforms="Posix; MacOS X; Windows", 124 packages=packages, 125 namespace_packages=namespaces, 126 install_requires=dependencies, 127 extras_require=extras, 128 python_requires=">=3.6, <3.10", 129 include_package_data=True, 130 zip_safe=False, 131 ) 132 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -47,10 +47,10 @@ # grpc.Channel.close() method isn't added until 1.32.0. # https://github.com/grpc/grpc/pull/15254 "grpcio >= 1.32.0, < 2.0dev", - "pyarrow >= 1.0.0, < 4.0dev", + "pyarrow >= 1.0.0, < 5.0dev", ], - "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"], - "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"], + "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 5.0dev"], + "bignumeric_type": ["pyarrow >= 3.0.0, < 5.0dev"], "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], "opentelemetry": [ "opentelemetry-api >= 0.11b0",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -47,10 +47,10 @@\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n- \"pyarrow >= 1.0.0, < 4.0dev\",\n+ \"pyarrow >= 1.0.0, < 5.0dev\",\n ],\n- \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n- \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n+ \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 5.0dev\"],\n+ \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 5.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n", "issue": "deps: expand extras to support pyarrow v4\nWe're actually already testing with pyarrow v4 in some of the samples tests, so this should be safe to expand in our `setup.py`\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]}
2,030
303
gh_patches_debug_15693
rasdani/github-patches
git_diff
pypa__pip-2396
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip.utils.filesystem.check_path_owner should treat group membership properly I have two users who share same .pip/cache directory: `a` and `b`. Both users are members of the group `pip`. There is third user, `pip` who is also member of group `pip` and that user handles all download and caching for users a and b. `.pip/cache` directory configured to have group writeable permission and sticky bit, to make sure all newly created files and folders are owned by group `pip`. Function [check_path_owner](https://github.com/pypa/pip/blob/6.0.6/pip/utils/filesystem.py#L7) only checks if owner is same user, but it is somewhat wrong. You should check group membership as well. Another option, is to create new command-line option, which will allow to ignore this check. Either solution will work. </issue> <code> [start of pip/utils/filesystem.py] 1 import os 2 import os.path 3 4 from pip.compat import get_path_uid 5 6 7 def check_path_owner(path): 8 # If we don't have a way to check the effective uid of this process, then 9 # we'll just assume that we own the directory. 10 if not hasattr(os, "geteuid"): 11 return True 12 13 previous = None 14 while path != previous: 15 if os.path.lexists(path): 16 # Actually do the ownership check 17 try: 18 if get_path_uid(path) != os.geteuid(): 19 return False 20 except OSError: 21 return False 22 return True 23 else: 24 previous, path = path, os.path.dirname(path) 25 [end of pip/utils/filesystem.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py --- a/pip/utils/filesystem.py +++ b/pip/utils/filesystem.py @@ -13,12 +13,16 @@ previous = None while path != previous: if os.path.lexists(path): - # Actually do the ownership check - try: - if get_path_uid(path) != os.geteuid(): + # Check if path is writable by current user. + if os.geteuid() == 0: + # Special handling for root user in order to handle properly + # cases where users use sudo without -H flag. + try: + path_uid = get_path_uid(path) + except OSError: return False - except OSError: - return False - return True + return path_uid == 0 + else: + return os.access(path, os.W_OK) else: previous, path = path, os.path.dirname(path)
{"golden_diff": "diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py\n--- a/pip/utils/filesystem.py\n+++ b/pip/utils/filesystem.py\n@@ -13,12 +13,16 @@\n previous = None\n while path != previous:\n if os.path.lexists(path):\n- # Actually do the ownership check\n- try:\n- if get_path_uid(path) != os.geteuid():\n+ # Check if path is writable by current user.\n+ if os.geteuid() == 0:\n+ # Special handling for root user in order to handle properly\n+ # cases where users use sudo without -H flag.\n+ try:\n+ path_uid = get_path_uid(path)\n+ except OSError:\n return False\n- except OSError:\n- return False\n- return True\n+ return path_uid == 0\n+ else:\n+ return os.access(path, os.W_OK)\n else:\n previous, path = path, os.path.dirname(path)\n", "issue": "pip.utils.filesystem.check_path_owner should treat group membership properly\nI have two users who share same .pip/cache directory: `a` and `b`. Both users are members of the group `pip`. There is third user, `pip` who is also member of group `pip` and that user handles all download and caching for users a and b. `.pip/cache` directory configured to have group writeable permission and sticky bit, to make sure all newly created files and folders are owned by group `pip`. \n\nFunction [check_path_owner](https://github.com/pypa/pip/blob/6.0.6/pip/utils/filesystem.py#L7) only checks if owner is same user, but it is somewhat wrong. You should check group membership as well. \n\nAnother option, is to create new command-line option, which will allow to ignore this check. \n\nEither solution will work.\n\n", "before_files": [{"content": "import os\nimport os.path\n\nfrom pip.compat import get_path_uid\n\n\ndef check_path_owner(path):\n # If we don't have a way to check the effective uid of this process, then\n # we'll just assume that we own the directory.\n if not hasattr(os, \"geteuid\"):\n return True\n\n previous = None\n while path != previous:\n if os.path.lexists(path):\n # Actually do the ownership check\n try:\n if get_path_uid(path) != os.geteuid():\n return False\n except OSError:\n return False\n return True\n else:\n previous, path = path, os.path.dirname(path)\n", "path": "pip/utils/filesystem.py"}]}
909
225
gh_patches_debug_21949
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-205
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Capture Celery Queue Name </issue> <code> [start of src/scout_apm/celery.py] 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 from celery.signals import task_postrun, task_prerun 5 6 import scout_apm.core 7 from scout_apm.core.tracked_request import TrackedRequest 8 9 10 # TODO: Capture queue. 11 # https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa 12 def prerun_callback(sender=None, headers=None, body=None, **kwargs): 13 name = kwargs["task"].name 14 15 tr = TrackedRequest.instance() 16 tr.mark_real_request() 17 span = tr.start_span(operation=("Job/" + name)) 18 span.tag("queue", "default") 19 20 21 def postrun_callback(sender=None, headers=None, body=None, **kwargs): 22 tr = TrackedRequest.instance() 23 tr.stop_span() 24 25 26 def install(): 27 installed = scout_apm.core.install() 28 if not installed: 29 return 30 31 task_prerun.connect(prerun_callback) 32 task_postrun.connect(postrun_callback) 33 34 35 def uninstall(): 36 task_prerun.disconnect(prerun_callback) 37 task_postrun.disconnect(postrun_callback) 38 [end of src/scout_apm/celery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py --- a/src/scout_apm/celery.py +++ b/src/scout_apm/celery.py @@ -7,20 +7,22 @@ from scout_apm.core.tracked_request import TrackedRequest -# TODO: Capture queue. -# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa -def prerun_callback(sender=None, headers=None, body=None, **kwargs): - name = kwargs["task"].name +def prerun_callback(task=None, **kwargs): + tracked_request = TrackedRequest.instance() + tracked_request.mark_real_request() - tr = TrackedRequest.instance() - tr.mark_real_request() - span = tr.start_span(operation=("Job/" + name)) - span.tag("queue", "default") + delivery_info = task.request.delivery_info + tracked_request.tag("is_eager", delivery_info.get("is_eager", False)) + tracked_request.tag("exchange", delivery_info.get("exchange", "unknown")) + tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown")) + tracked_request.tag("queue", delivery_info.get("queue", "unknown")) + tracked_request.start_span(operation=("Job/" + task.name)) -def postrun_callback(sender=None, headers=None, body=None, **kwargs): - tr = TrackedRequest.instance() - tr.stop_span() + +def postrun_callback(task=None, **kwargs): + tracked_request = TrackedRequest.instance() + tracked_request.stop_span() def install():
{"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -7,20 +7,22 @@\n from scout_apm.core.tracked_request import TrackedRequest\n \n \n-# TODO: Capture queue.\n-# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\n-def prerun_callback(sender=None, headers=None, body=None, **kwargs):\n- name = kwargs[\"task\"].name\n+def prerun_callback(task=None, **kwargs):\n+ tracked_request = TrackedRequest.instance()\n+ tracked_request.mark_real_request()\n \n- tr = TrackedRequest.instance()\n- tr.mark_real_request()\n- span = tr.start_span(operation=(\"Job/\" + name))\n- span.tag(\"queue\", \"default\")\n+ delivery_info = task.request.delivery_info\n+ tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n+ tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n+ tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n+ tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n \n+ tracked_request.start_span(operation=(\"Job/\" + task.name))\n \n-def postrun_callback(sender=None, headers=None, body=None, **kwargs):\n- tr = TrackedRequest.instance()\n- tr.stop_span()\n+\n+def postrun_callback(task=None, **kwargs):\n+ tracked_request = TrackedRequest.instance()\n+ tracked_request.stop_span()\n \n \n def install():\n", "issue": "Capture Celery Queue Name\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom celery.signals import task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\n# TODO: Capture queue.\n# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\ndef prerun_callback(sender=None, headers=None, body=None, **kwargs):\n name = kwargs[\"task\"].name\n\n tr = TrackedRequest.instance()\n tr.mark_real_request()\n span = tr.start_span(operation=(\"Job/\" + name))\n span.tag(\"queue\", \"default\")\n\n\ndef postrun_callback(sender=None, headers=None, body=None, **kwargs):\n tr = TrackedRequest.instance()\n tr.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}]}
902
398
gh_patches_debug_2999
rasdani/github-patches
git_diff
iterative__dvc-2457
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dvc remove CLI documentation inconsistency `dvc remove` (without `targets`) prints help which states that `targets` are optional, and if not specified will remove all DVC-files. Clearly not the case. ```bash $ dvc remove [...] targets DVC-files to remove. Optional. (Finds all DVC-files in the workspace by default.) ``` </issue> <code> [start of dvc/command/remove.py] 1 from __future__ import unicode_literals 2 3 import argparse 4 import logging 5 6 import dvc.prompt as prompt 7 from dvc.exceptions import DvcException 8 from dvc.command.base import CmdBase, append_doc_link 9 10 11 logger = logging.getLogger(__name__) 12 13 14 class CmdRemove(CmdBase): 15 def _is_outs_only(self, target): 16 if not self.args.purge: 17 return True 18 19 if self.args.force: 20 return False 21 22 msg = "Are you sure you want to remove {} with its outputs?".format( 23 target 24 ) 25 26 if prompt.confirm(msg): 27 return False 28 29 raise DvcException( 30 "Cannot purge without a confirmation from the user." 31 " Use '-f' to force." 32 ) 33 34 def run(self): 35 for target in self.args.targets: 36 try: 37 outs_only = self._is_outs_only(target) 38 self.repo.remove(target, outs_only=outs_only) 39 except DvcException: 40 logger.exception("failed to remove {}".format(target)) 41 return 1 42 return 0 43 44 45 def add_parser(subparsers, parent_parser): 46 REMOVE_HELP = "Remove DVC-file outputs." 47 remove_parser = subparsers.add_parser( 48 "remove", 49 parents=[parent_parser], 50 description=append_doc_link(REMOVE_HELP, "remove"), 51 help=REMOVE_HELP, 52 formatter_class=argparse.RawDescriptionHelpFormatter, 53 ) 54 remove_parser_group = remove_parser.add_mutually_exclusive_group() 55 remove_parser_group.add_argument( 56 "-o", 57 "--outs", 58 action="store_true", 59 default=True, 60 help="Only remove DVC-file outputs. (Default)", 61 ) 62 remove_parser_group.add_argument( 63 "-p", 64 "--purge", 65 action="store_true", 66 default=False, 67 help="Remove DVC-file and all its outputs.", 68 ) 69 remove_parser.add_argument( 70 "-f", 71 "--force", 72 action="store_true", 73 default=False, 74 help="Force purge.", 75 ) 76 remove_parser.add_argument( 77 "targets", 78 nargs="+", 79 help="DVC-files to remove. Optional. " 80 "(Finds all DVC-files in the workspace by default.)", 81 ) 82 remove_parser.set_defaults(func=CmdRemove) 83 [end of dvc/command/remove.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/remove.py b/dvc/command/remove.py --- a/dvc/command/remove.py +++ b/dvc/command/remove.py @@ -74,9 +74,6 @@ help="Force purge.", ) remove_parser.add_argument( - "targets", - nargs="+", - help="DVC-files to remove. Optional. " - "(Finds all DVC-files in the workspace by default.)", + "targets", nargs="+", help="DVC-files to remove." ) remove_parser.set_defaults(func=CmdRemove)
{"golden_diff": "diff --git a/dvc/command/remove.py b/dvc/command/remove.py\n--- a/dvc/command/remove.py\n+++ b/dvc/command/remove.py\n@@ -74,9 +74,6 @@\n help=\"Force purge.\",\n )\n remove_parser.add_argument(\n- \"targets\",\n- nargs=\"+\",\n- help=\"DVC-files to remove. Optional. \"\n- \"(Finds all DVC-files in the workspace by default.)\",\n+ \"targets\", nargs=\"+\", help=\"DVC-files to remove.\"\n )\n remove_parser.set_defaults(func=CmdRemove)\n", "issue": "dvc remove CLI documentation inconsistency\n`dvc remove` (without `targets`) prints help which states that `targets` are optional, and if not specified will remove all DVC-files. Clearly not the case.\r\n\r\n```bash\r\n$ dvc remove\r\n[...]\r\n targets DVC-files to remove. Optional. (Finds all DVC-files in the\r\n workspace by default.)\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nimport dvc.prompt as prompt\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdRemove(CmdBase):\n def _is_outs_only(self, target):\n if not self.args.purge:\n return True\n\n if self.args.force:\n return False\n\n msg = \"Are you sure you want to remove {} with its outputs?\".format(\n target\n )\n\n if prompt.confirm(msg):\n return False\n\n raise DvcException(\n \"Cannot purge without a confirmation from the user.\"\n \" Use '-f' to force.\"\n )\n\n def run(self):\n for target in self.args.targets:\n try:\n outs_only = self._is_outs_only(target)\n self.repo.remove(target, outs_only=outs_only)\n except DvcException:\n logger.exception(\"failed to remove {}\".format(target))\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n REMOVE_HELP = \"Remove DVC-file outputs.\"\n remove_parser = subparsers.add_parser(\n \"remove\",\n parents=[parent_parser],\n description=append_doc_link(REMOVE_HELP, \"remove\"),\n help=REMOVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remove_parser_group = remove_parser.add_mutually_exclusive_group()\n remove_parser_group.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=True,\n help=\"Only remove DVC-file outputs. (Default)\",\n )\n remove_parser_group.add_argument(\n \"-p\",\n \"--purge\",\n action=\"store_true\",\n default=False,\n help=\"Remove DVC-file and all its outputs.\",\n )\n remove_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Force purge.\",\n )\n remove_parser.add_argument(\n \"targets\",\n nargs=\"+\",\n help=\"DVC-files to remove. Optional. \"\n \"(Finds all DVC-files in the workspace by default.)\",\n )\n remove_parser.set_defaults(func=CmdRemove)\n", "path": "dvc/command/remove.py"}]}
1,259
126
gh_patches_debug_82
rasdani/github-patches
git_diff
fidals__shopelectro-719
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add canonicals to category page For example this two pages contains no canonicals: - https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2 - ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical Add canonicals to category page For example this two pages contains no canonicals: - https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2 - ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical </issue> <code> [start of shopelectro/context.py] 1 from functools import partial 2 3 from catalog.newcontext import Context, Tags 4 5 6 class Page(Context): 7 8 def __init__(self, page, tags: Tags): 9 self._page = page 10 self._tags = tags 11 12 def context(self): 13 def template_context(page, tag_titles, tags): 14 return { 15 'page': page, 16 'tag_titles': tag_titles, 17 'tags': tags, 18 } 19 20 tags_qs = self._tags.qs() 21 self._page.get_template_render_context = partial( 22 template_context, self._page, tags_qs.as_title(), tags_qs 23 ) 24 25 return { 26 'page': self._page, 27 'skip_canonical': tags_qs.exists(), 28 } 29 [end of shopelectro/context.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/context.py b/shopelectro/context.py --- a/shopelectro/context.py +++ b/shopelectro/context.py @@ -24,5 +24,4 @@ return { 'page': self._page, - 'skip_canonical': tags_qs.exists(), }
{"golden_diff": "diff --git a/shopelectro/context.py b/shopelectro/context.py\n--- a/shopelectro/context.py\n+++ b/shopelectro/context.py\n@@ -24,5 +24,4 @@\n \n return {\n 'page': self._page,\n- 'skip_canonical': tags_qs.exists(),\n }\n", "issue": "Add canonicals to category page\nFor example this two pages contains no canonicals:\r\n- https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2\r\n- ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical\nAdd canonicals to category page\nFor example this two pages contains no canonicals:\r\n- https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2\r\n- ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical\n", "before_files": [{"content": "from functools import partial\n\nfrom catalog.newcontext import Context, Tags\n\n\nclass Page(Context):\n\n def __init__(self, page, tags: Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n 'skip_canonical': tags_qs.exists(),\n }\n", "path": "shopelectro/context.py"}]}
902
74
gh_patches_debug_17860
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-6112
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cleanup exception that are not logged as error After #4495 got merged @agjohnson suggested to have an attribute in the Exception class and check for that attribute before log the exception, instead of defining a list for the warning exceptions as I did at: https://github.com/rtfd/readthedocs.org/pull/4495/files#diff-ca52b098301dd315a834b3556ab9a7d5R424 Also, there are more exceptions that have to treat in the same way: `ProjectConfigurationError` for example. https://sentry.io/read-the-docs/readthedocs-org/issues/668248681/ </issue> <code> [start of readthedocs/vcs_support/base.py] 1 # -*- coding: utf-8 -*- 2 3 """Base classes for VCS backends.""" 4 import logging 5 import os 6 import shutil 7 8 9 log = logging.getLogger(__name__) 10 11 12 class VCSVersion: 13 14 """ 15 Represents a Version (tag or branch) in a VCS. 16 17 This class should only be instantiated in BaseVCS subclasses. 18 19 It can act as a context manager to temporarily switch to this tag (eg to 20 build docs for this tag). 21 """ 22 23 def __init__(self, repository, identifier, verbose_name): 24 self.repository = repository 25 self.identifier = identifier 26 self.verbose_name = verbose_name 27 28 def __repr__(self): 29 return '<VCSVersion: {}:{}'.format( 30 self.repository.repo_url, 31 self.verbose_name, 32 ) 33 34 35 class BaseVCS: 36 37 """ 38 Base for VCS Classes. 39 40 VCS commands are ran inside a ``LocalEnvironment``. 41 """ 42 43 supports_tags = False # Whether this VCS supports tags or not. 44 supports_branches = False # Whether this VCS supports branches or not. 45 supports_submodules = False 46 47 # ========================================================================= 48 # General methods 49 # ========================================================================= 50 51 # Defining a base API, so we'll have unused args 52 # pylint: disable=unused-argument 53 def __init__( 54 self, project, version_slug, environment=None, 55 verbose_name=None, version_type=None, **kwargs 56 ): 57 self.default_branch = project.default_branch 58 self.project = project 59 self.name = project.name 60 self.repo_url = project.clean_repo 61 self.working_dir = project.checkout_path(version_slug) 62 # required for External versions 63 self.verbose_name = verbose_name 64 self.version_type = version_type 65 66 from readthedocs.doc_builder.environments import LocalEnvironment 67 self.environment = environment or LocalEnvironment(project) 68 69 # Update the env variables with the proper VCS env variables 70 self.environment.environment.update(self.env) 71 72 def check_working_dir(self): 73 if not os.path.exists(self.working_dir): 74 os.makedirs(self.working_dir) 75 76 def make_clean_working_dir(self): 77 """Ensures that the working dir exists and is empty.""" 78 shutil.rmtree(self.working_dir, ignore_errors=True) 79 self.check_working_dir() 80 81 @property 82 def env(self): 83 environment = os.environ.copy() 84 85 # TODO: kind of a hack 86 del environment['PATH'] 87 88 return environment 89 90 def update(self): 91 """ 92 Update a local copy of the repository in self.working_dir. 93 94 If self.working_dir is already a valid local copy of the repository, 95 update the repository, else create a new local copy of the repository. 96 """ 97 self.check_working_dir() 98 99 def run(self, *cmd, **kwargs): 100 kwargs.update({ 101 'cwd': self.working_dir, 102 'shell': False, 103 }) 104 105 build_cmd = self.environment.run(*cmd, **kwargs) 106 # Return a tuple to keep compatibility 107 return (build_cmd.exit_code, build_cmd.output, build_cmd.error) 108 109 # ========================================================================= 110 # Tag / Branch related methods 111 # These methods only apply if supports_tags = True and/or 112 # support_branches = True 113 # ========================================================================= 114 115 @property 116 def tags(self): 117 """ 118 Returns a list of VCSVersion objects. 119 120 See VCSVersion for more information. 121 """ 122 raise NotImplementedError 123 124 @property 125 def branches(self): 126 """ 127 Returns a list of VCSVersion objects. 128 129 See VCSVersion for more information. 130 """ 131 raise NotImplementedError 132 133 @property 134 def commit(self): 135 """Returns a string representing the current commit.""" 136 raise NotImplementedError 137 138 def checkout(self, identifier=None): 139 """ 140 Set the state to the given identifier. 141 142 If identifier is None, checkout to the latest revision. 143 144 The type and format of identifier may change from VCS to VCS, so each 145 backend is responsible to understand it's identifiers. 146 """ 147 self.check_working_dir() 148 149 def update_submodules(self, config): 150 """ 151 Update the submodules of the current checkout. 152 153 :type config: readthedocs.config.BuildConfigBase 154 """ 155 raise NotImplementedError 156 [end of readthedocs/vcs_support/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/vcs_support/base.py b/readthedocs/vcs_support/base.py --- a/readthedocs/vcs_support/base.py +++ b/readthedocs/vcs_support/base.py @@ -1,10 +1,11 @@ -# -*- coding: utf-8 -*- - """Base classes for VCS backends.""" import logging import os import shutil +from readthedocs.doc_builder.exceptions import BuildEnvironmentWarning +from readthedocs.projects.exceptions import RepositoryError + log = logging.getLogger(__name__) @@ -102,7 +103,13 @@ 'shell': False, }) - build_cmd = self.environment.run(*cmd, **kwargs) + try: + build_cmd = self.environment.run(*cmd, **kwargs) + except BuildEnvironmentWarning as e: + # Re raise as RepositoryError, + # so isn't logged as ERROR. + raise RepositoryError(str(e)) + # Return a tuple to keep compatibility return (build_cmd.exit_code, build_cmd.output, build_cmd.error)
{"golden_diff": "diff --git a/readthedocs/vcs_support/base.py b/readthedocs/vcs_support/base.py\n--- a/readthedocs/vcs_support/base.py\n+++ b/readthedocs/vcs_support/base.py\n@@ -1,10 +1,11 @@\n-# -*- coding: utf-8 -*-\n-\n \"\"\"Base classes for VCS backends.\"\"\"\n import logging\n import os\n import shutil\n \n+from readthedocs.doc_builder.exceptions import BuildEnvironmentWarning\n+from readthedocs.projects.exceptions import RepositoryError\n+\n \n log = logging.getLogger(__name__)\n \n@@ -102,7 +103,13 @@\n 'shell': False,\n })\n \n- build_cmd = self.environment.run(*cmd, **kwargs)\n+ try:\n+ build_cmd = self.environment.run(*cmd, **kwargs)\n+ except BuildEnvironmentWarning as e:\n+ # Re raise as RepositoryError,\n+ # so isn't logged as ERROR.\n+ raise RepositoryError(str(e))\n+\n # Return a tuple to keep compatibility\n return (build_cmd.exit_code, build_cmd.output, build_cmd.error)\n", "issue": "Cleanup exception that are not logged as error\nAfter #4495 got merged @agjohnson suggested to have an attribute in the Exception class and check for that attribute before log the exception, instead of defining a list for the warning exceptions as I did at:\r\n\r\nhttps://github.com/rtfd/readthedocs.org/pull/4495/files#diff-ca52b098301dd315a834b3556ab9a7d5R424\r\n\r\nAlso, there are more exceptions that have to treat in the same way: `ProjectConfigurationError` for example.\r\n\r\nhttps://sentry.io/read-the-docs/readthedocs-org/issues/668248681/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Base classes for VCS backends.\"\"\"\nimport logging\nimport os\nimport shutil\n\n\nlog = logging.getLogger(__name__)\n\n\nclass VCSVersion:\n\n \"\"\"\n Represents a Version (tag or branch) in a VCS.\n\n This class should only be instantiated in BaseVCS subclasses.\n\n It can act as a context manager to temporarily switch to this tag (eg to\n build docs for this tag).\n \"\"\"\n\n def __init__(self, repository, identifier, verbose_name):\n self.repository = repository\n self.identifier = identifier\n self.verbose_name = verbose_name\n\n def __repr__(self):\n return '<VCSVersion: {}:{}'.format(\n self.repository.repo_url,\n self.verbose_name,\n )\n\n\nclass BaseVCS:\n\n \"\"\"\n Base for VCS Classes.\n\n VCS commands are ran inside a ``LocalEnvironment``.\n \"\"\"\n\n supports_tags = False # Whether this VCS supports tags or not.\n supports_branches = False # Whether this VCS supports branches or not.\n supports_submodules = False\n\n # =========================================================================\n # General methods\n # =========================================================================\n\n # Defining a base API, so we'll have unused args\n # pylint: disable=unused-argument\n def __init__(\n self, project, version_slug, environment=None,\n verbose_name=None, version_type=None, **kwargs\n ):\n self.default_branch = project.default_branch\n self.project = project\n self.name = project.name\n self.repo_url = project.clean_repo\n self.working_dir = project.checkout_path(version_slug)\n # required for External versions\n self.verbose_name = verbose_name\n self.version_type = version_type\n\n from readthedocs.doc_builder.environments import LocalEnvironment\n self.environment = environment or LocalEnvironment(project)\n\n # Update the env variables with the proper VCS env variables\n self.environment.environment.update(self.env)\n\n def check_working_dir(self):\n if not os.path.exists(self.working_dir):\n os.makedirs(self.working_dir)\n\n def make_clean_working_dir(self):\n \"\"\"Ensures that the working dir exists and is empty.\"\"\"\n shutil.rmtree(self.working_dir, ignore_errors=True)\n self.check_working_dir()\n\n @property\n def env(self):\n environment = os.environ.copy()\n\n # TODO: kind of a hack\n del environment['PATH']\n\n return environment\n\n def update(self):\n \"\"\"\n Update a local copy of the repository in self.working_dir.\n\n If self.working_dir is already a valid local copy of the repository,\n update the repository, else create a new local copy of the repository.\n \"\"\"\n self.check_working_dir()\n\n def run(self, *cmd, **kwargs):\n kwargs.update({\n 'cwd': self.working_dir,\n 'shell': False,\n })\n\n build_cmd = self.environment.run(*cmd, **kwargs)\n # Return a tuple to keep compatibility\n return (build_cmd.exit_code, build_cmd.output, build_cmd.error)\n\n # =========================================================================\n # Tag / Branch related methods\n # These methods only apply if supports_tags = True and/or\n # support_branches = True\n # =========================================================================\n\n @property\n def tags(self):\n \"\"\"\n Returns a list of VCSVersion objects.\n\n See VCSVersion for more information.\n \"\"\"\n raise NotImplementedError\n\n @property\n def branches(self):\n \"\"\"\n Returns a list of VCSVersion objects.\n\n See VCSVersion for more information.\n \"\"\"\n raise NotImplementedError\n\n @property\n def commit(self):\n \"\"\"Returns a string representing the current commit.\"\"\"\n raise NotImplementedError\n\n def checkout(self, identifier=None):\n \"\"\"\n Set the state to the given identifier.\n\n If identifier is None, checkout to the latest revision.\n\n The type and format of identifier may change from VCS to VCS, so each\n backend is responsible to understand it's identifiers.\n \"\"\"\n self.check_working_dir()\n\n def update_submodules(self, config):\n \"\"\"\n Update the submodules of the current checkout.\n\n :type config: readthedocs.config.BuildConfigBase\n \"\"\"\n raise NotImplementedError\n", "path": "readthedocs/vcs_support/base.py"}]}
1,977
237
gh_patches_debug_19309
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-2048
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support absolute-form HTTP requests with IPv6 addresses ##### Steps to reproduce the problem: 1. MITMDump proxy IPv6 flow 2. Log ``` 172.17.15.1:53074: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:180.97.8.37]/mmsns/9KavCVwReibwDKBMmibrWUdVZZbHCQ0bV3R89mboKO6QDls7Sxcl4tfbHvLIHFbj3NASftTH2VAGw/150?tp=wxpc&length=2208&width=1242&idx=1&token=WSEN6qDsKwV8A02w3onOGQYfxnkibdqSOkmHhZGNB4DGicdGyTltMQXCTF7lr4IJR8Jz4lKQBBW47EV1CP33SGjg HTTP/1.1' 172.17.15.1:53075: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:b461:819]/mmcrhead/Q3auHgzwzM606QEH0kXoF60vMh5Iiay7B3DiauET3kCpbBwEfgzhNqOSeJ6y4geORGPxEcKf36Totd4sHQcwvBEg/0 HTTP/1.1' ``` ##### Any other comments? What have you tried so far? No ##### System information ``` Mitmproxy version: 1.0.2 Python version: 3.6.0 Platform: Darwin-15.6.0-x86_64-i386-64bit SSL version: OpenSSL 1.0.2j 26 Sep 2016 Mac version: 10.11.6 ('', '', '') x86_6 ``` </issue> <code> [start of mitmproxy/net/check.py] 1 import re 2 3 # Allow underscore in host name 4 _label_valid = re.compile(b"(?!-)[A-Z\d\-_]{1,63}(?<!-)$", re.IGNORECASE) 5 6 7 def is_valid_host(host: bytes) -> bool: 8 """ 9 Checks if a hostname is valid. 10 """ 11 try: 12 host.decode("idna") 13 except ValueError: 14 return False 15 if len(host) > 255: 16 return False 17 if host and host[-1:] == b".": 18 host = host[:-1] 19 return all(_label_valid.match(x) for x in host.split(b".")) 20 21 22 def is_valid_port(port): 23 return 0 <= port <= 65535 24 [end of mitmproxy/net/check.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/net/check.py b/mitmproxy/net/check.py --- a/mitmproxy/net/check.py +++ b/mitmproxy/net/check.py @@ -1,3 +1,4 @@ +import ipaddress import re # Allow underscore in host name @@ -6,17 +7,26 @@ def is_valid_host(host: bytes) -> bool: """ - Checks if a hostname is valid. + Checks if the passed bytes are a valid DNS hostname or an IPv4/IPv6 address. """ try: host.decode("idna") except ValueError: return False + # RFC1035: 255 bytes or less. if len(host) > 255: return False if host and host[-1:] == b".": host = host[:-1] - return all(_label_valid.match(x) for x in host.split(b".")) + # DNS hostname + if all(_label_valid.match(x) for x in host.split(b".")): + return True + # IPv4/IPv6 address + try: + ipaddress.ip_address(host.decode('idna')) + return True + except ValueError: + return False def is_valid_port(port):
{"golden_diff": "diff --git a/mitmproxy/net/check.py b/mitmproxy/net/check.py\n--- a/mitmproxy/net/check.py\n+++ b/mitmproxy/net/check.py\n@@ -1,3 +1,4 @@\n+import ipaddress\n import re\n \n # Allow underscore in host name\n@@ -6,17 +7,26 @@\n \n def is_valid_host(host: bytes) -> bool:\n \"\"\"\n- Checks if a hostname is valid.\n+ Checks if the passed bytes are a valid DNS hostname or an IPv4/IPv6 address.\n \"\"\"\n try:\n host.decode(\"idna\")\n except ValueError:\n return False\n+ # RFC1035: 255 bytes or less.\n if len(host) > 255:\n return False\n if host and host[-1:] == b\".\":\n host = host[:-1]\n- return all(_label_valid.match(x) for x in host.split(b\".\"))\n+ # DNS hostname\n+ if all(_label_valid.match(x) for x in host.split(b\".\")):\n+ return True\n+ # IPv4/IPv6 address\n+ try:\n+ ipaddress.ip_address(host.decode('idna'))\n+ return True\n+ except ValueError:\n+ return False\n \n \n def is_valid_port(port):\n", "issue": "Support absolute-form HTTP requests with IPv6 addresses\n##### Steps to reproduce the problem:\r\n\r\n1. MITMDump proxy IPv6 flow\r\n2. Log\r\n```\r\n172.17.15.1:53074: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:180.97.8.37]/mmsns/9KavCVwReibwDKBMmibrWUdVZZbHCQ0bV3R89mboKO6QDls7Sxcl4tfbHvLIHFbj3NASftTH2VAGw/150?tp=wxpc&length=2208&width=1242&idx=1&token=WSEN6qDsKwV8A02w3onOGQYfxnkibdqSOkmHhZGNB4DGicdGyTltMQXCTF7lr4IJR8Jz4lKQBBW47EV1CP33SGjg HTTP/1.1'\r\n172.17.15.1:53075: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:b461:819]/mmcrhead/Q3auHgzwzM606QEH0kXoF60vMh5Iiay7B3DiauET3kCpbBwEfgzhNqOSeJ6y4geORGPxEcKf36Totd4sHQcwvBEg/0 HTTP/1.1'\r\n```\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\nNo\r\n\r\n\r\n##### System information\r\n```\r\nMitmproxy version: 1.0.2\r\nPython version: 3.6.0\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2j 26 Sep 2016\r\nMac version: 10.11.6 ('', '', '') x86_6\r\n```\r\n\r\n\n", "before_files": [{"content": "import re\n\n# Allow underscore in host name\n_label_valid = re.compile(b\"(?!-)[A-Z\\d\\-_]{1,63}(?<!-)$\", re.IGNORECASE)\n\n\ndef is_valid_host(host: bytes) -> bool:\n \"\"\"\n Checks if a hostname is valid.\n \"\"\"\n try:\n host.decode(\"idna\")\n except ValueError:\n return False\n if len(host) > 255:\n return False\n if host and host[-1:] == b\".\":\n host = host[:-1]\n return all(_label_valid.match(x) for x in host.split(b\".\"))\n\n\ndef is_valid_port(port):\n return 0 <= port <= 65535\n", "path": "mitmproxy/net/check.py"}]}
1,218
284
gh_patches_debug_16329
rasdani/github-patches
git_diff
gratipay__gratipay.com-3934
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can't submit new team after changing image. Can't believe this didn't come up yet. I noticed this while exploring [create.json.spt](https://github.com/gratipay/gratipay.com/blob/master/www/teams/create.json.spt) which inspires the new [edit.json.spt](https://github.com/gratipay/gratipay.com/pull/3923/files#diff-6). The way it is written right now, we first write the team details to the db (with a unique generated `slug`) and _then_ try to save the team image. If a user uploads an image of size > 1Mb or an image which is not a jpg or png, the team creation won't be successful as far as the user is concerned and he'll resubmit the team application form with an appropriate image. But when he does again, we would have already created a slug for that team name resulting in a misleading message of `Sorry, there is already a team using <slug>.` when in fact the `slug` was created because we wrote the team details to the db first. </issue> <code> [start of gratipay/utils/images.py] 1 import zipfile 2 from cStringIO import StringIO 3 4 import requests 5 6 def imgize(image, image_type): 7 large = None 8 small = None 9 crops = requests.post( 'http://gip.rocks/v1', 10 data=image, 11 headers={'Content-Type': image_type} 12 ) 13 if crops.status_code == 200: 14 zf = zipfile.ZipFile(StringIO(crops.content)) 15 large = zf.open('160').read() 16 small = zf.open('48').read() 17 18 return crops.status_code, large, small [end of gratipay/utils/images.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/utils/images.py b/gratipay/utils/images.py --- a/gratipay/utils/images.py +++ b/gratipay/utils/images.py @@ -8,11 +8,22 @@ small = None crops = requests.post( 'http://gip.rocks/v1', data=image, - headers={'Content-Type': image_type} - ) + headers={'Content-Type': image_type}) + if crops.status_code == 200: zf = zipfile.ZipFile(StringIO(crops.content)) large = zf.open('160').read() small = zf.open('48').read() + return large, small + elif crops.status_code == 413: + raise ImageTooLarge + elif crops.status_code == 415: + raise InvalidImageType + else: + raise UnknownImageError + +class ImageTooLarge(Exception): pass + +class InvalidImageType(Exception): pass - return crops.status_code, large, small \ No newline at end of file +class UnknownImageError(Exception): pass
{"golden_diff": "diff --git a/gratipay/utils/images.py b/gratipay/utils/images.py\n--- a/gratipay/utils/images.py\n+++ b/gratipay/utils/images.py\n@@ -8,11 +8,22 @@\n small = None\n crops = requests.post( 'http://gip.rocks/v1',\n data=image,\n- headers={'Content-Type': image_type}\n- )\n+ headers={'Content-Type': image_type})\n+\n if crops.status_code == 200:\n zf = zipfile.ZipFile(StringIO(crops.content))\n large = zf.open('160').read()\n small = zf.open('48').read()\n+ return large, small\n+ elif crops.status_code == 413:\n+ raise ImageTooLarge\n+ elif crops.status_code == 415:\n+ raise InvalidImageType\n+ else:\n+ raise UnknownImageError\n+\n+class ImageTooLarge(Exception): pass\n+\n+class InvalidImageType(Exception): pass\n \n- return crops.status_code, large, small\n\\ No newline at end of file\n+class UnknownImageError(Exception): pass\n", "issue": "Can't submit new team after changing image.\nCan't believe this didn't come up yet. I noticed this while exploring [create.json.spt](https://github.com/gratipay/gratipay.com/blob/master/www/teams/create.json.spt) which inspires the new [edit.json.spt](https://github.com/gratipay/gratipay.com/pull/3923/files#diff-6). \n\nThe way it is written right now, we first write the team details to the db (with a unique generated `slug`) and _then_ try to save the team image. If a user uploads an image of size > 1Mb or an image which is not a jpg or png, the team creation won't be successful as far as the user is concerned and he'll resubmit the team application form with an appropriate image. But when he does again, we would have already created a slug for that team name resulting in a misleading message of `Sorry, there is already a team using <slug>.` when in fact the `slug` was created because we wrote the team details to the db first.\n\n", "before_files": [{"content": "import zipfile\nfrom cStringIO import StringIO\n\nimport requests\n\ndef imgize(image, image_type):\n large = None\n small = None\n crops = requests.post( 'http://gip.rocks/v1',\n data=image,\n headers={'Content-Type': image_type}\n )\n if crops.status_code == 200:\n zf = zipfile.ZipFile(StringIO(crops.content))\n large = zf.open('160').read()\n small = zf.open('48').read()\n\n return crops.status_code, large, small", "path": "gratipay/utils/images.py"}]}
914
248
gh_patches_debug_8456
rasdani/github-patches
git_diff
microsoft__ptvsd-797
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add ability to launch the debugger in non-debug mode Currently we can only launch the debugger in non-debug mode when using `-m`. I'd like to have the same feature by importing PTVSD and invoking a function, similar to debugging using the `debug` function in `debugger.py` Basically this is necessary to launch the debugger in non-debug mode when using a launcher script. </issue> <code> [start of ptvsd/debugger.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 import sys 6 7 from ptvsd._local import run_module, run_file 8 9 10 # TODO: not needed? 11 DONT_DEBUG = [] 12 13 LOCALHOST = 'localhost' 14 15 RUNNERS = { 16 'module': run_module, # python -m spam 17 'script': run_file, # python spam.py 18 'code': run_file, # python -c 'print("spam")' 19 None: run_file, # catchall 20 } 21 22 23 def debug(filename, port_num, debug_id, debug_options, run_as, 24 _runners=RUNNERS, _extra=None, *args, **kwargs): 25 # TODO: docstring 26 if _extra is None: 27 _extra = sys.argv[1:] 28 address = (LOCALHOST, port_num) 29 try: 30 run = _runners[run_as] 31 except KeyError: 32 # TODO: fail? 33 run = _runners[None] 34 if _extra: 35 args = _extra + list(args) 36 kwargs.setdefault('singlesession', True) 37 run(address, filename, *args, **kwargs) 38 [end of ptvsd/debugger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py --- a/ptvsd/debugger.py +++ b/ptvsd/debugger.py @@ -4,7 +4,7 @@ import sys -from ptvsd._local import run_module, run_file +from ptvsd._local import run_module, run_file, run_main # TODO: not needed? @@ -35,3 +35,9 @@ args = _extra + list(args) kwargs.setdefault('singlesession', True) run(address, filename, *args, **kwargs) + + +def run(filename, port_num, run_as, + *args, **kwargs): + address = (LOCALHOST, port_num) + run_main(address, filename, run_as, *args, **kwargs)
{"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -4,7 +4,7 @@\n \n import sys\n \n-from ptvsd._local import run_module, run_file\n+from ptvsd._local import run_module, run_file, run_main\n \n \n # TODO: not needed?\n@@ -35,3 +35,9 @@\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n+\n+\n+def run(filename, port_num, run_as,\n+ *args, **kwargs):\n+ address = (LOCALHOST, port_num)\n+ run_main(address, filename, run_as, *args, **kwargs)\n", "issue": "Add ability to launch the debugger in non-debug mode\nCurrently we can only launch the debugger in non-debug mode when using `-m`.\r\nI'd like to have the same feature by importing PTVSD and invoking a function, similar to debugging using the `debug` function in `debugger.py`\r\n\r\nBasically this is necessary to launch the debugger in non-debug mode when using a launcher script.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nfrom ptvsd._local import run_module, run_file\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n # TODO: docstring\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n", "path": "ptvsd/debugger.py"}]}
955
184
gh_patches_debug_1811
rasdani/github-patches
git_diff
iterative__dvc-2364
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> status: change nothing to reproduce message If I use DVC only to version data/models and don't care about pipelines, this message: `Pipelines are up to date. Nothing to reproduce.` looks really strange. Let's change it to something more generic: `Data and pipelines are up to date.` or something similar </issue> <code> [start of dvc/command/status.py] 1 from __future__ import unicode_literals 2 3 import logging 4 5 from dvc.command.data_sync import CmdDataBase 6 from dvc.utils.compat import str 7 8 9 logger = logging.getLogger(__name__) 10 11 12 class CmdDataStatus(CmdDataBase): 13 STATUS_LEN = 20 14 STATUS_INDENT = "\t" 15 UP_TO_DATE_MSG = "Pipelines are up to date. Nothing to reproduce." 16 17 def _normalize(self, s): 18 s += ":" 19 assert len(s) < self.STATUS_LEN 20 return s + (self.STATUS_LEN - len(s)) * " " 21 22 def _show(self, status, indent=0): 23 ind = indent * self.STATUS_INDENT 24 25 if isinstance(status, str): 26 logger.info("{}{}".format(ind, status)) 27 return 28 29 if isinstance(status, list): 30 for entry in status: 31 self._show(entry, indent) 32 return 33 34 assert isinstance(status, dict) 35 36 for key, value in status.items(): 37 if isinstance(value, str): 38 logger.info("{}{}{}".format(ind, self._normalize(value), key)) 39 elif value: 40 logger.info("{}{}:".format(ind, key)) 41 self._show(value, indent + 1) 42 43 def run(self): 44 indent = 1 if self.args.cloud else 0 45 try: 46 st = self.repo.status( 47 targets=self.args.targets, 48 jobs=self.args.jobs, 49 cloud=self.args.cloud, 50 remote=self.args.remote, 51 all_branches=self.args.all_branches, 52 all_tags=self.args.all_tags, 53 with_deps=self.args.with_deps, 54 ) 55 if st: 56 if self.args.quiet: 57 return 1 58 else: 59 self._show(st, indent) 60 else: 61 logger.info(self.UP_TO_DATE_MSG) 62 63 except Exception: 64 logger.exception("failed to obtain data status") 65 return 1 66 return 0 67 [end of dvc/command/status.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/status.py b/dvc/command/status.py --- a/dvc/command/status.py +++ b/dvc/command/status.py @@ -12,7 +12,7 @@ class CmdDataStatus(CmdDataBase): STATUS_LEN = 20 STATUS_INDENT = "\t" - UP_TO_DATE_MSG = "Pipelines are up to date. Nothing to reproduce." + UP_TO_DATE_MSG = "Data and pipelines are up to date." def _normalize(self, s): s += ":"
{"golden_diff": "diff --git a/dvc/command/status.py b/dvc/command/status.py\n--- a/dvc/command/status.py\n+++ b/dvc/command/status.py\n@@ -12,7 +12,7 @@\n class CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n- UP_TO_DATE_MSG = \"Pipelines are up to date. Nothing to reproduce.\"\n+ UP_TO_DATE_MSG = \"Data and pipelines are up to date.\"\n \n def _normalize(self, s):\n s += \":\"\n", "issue": "status: change nothing to reproduce message\nIf I use DVC only to version data/models and don't care about pipelines, this message:\r\n\r\n`Pipelines are up to date. Nothing to reproduce.` \r\n\r\nlooks really strange.\r\n\r\nLet's change it to something more generic:\r\n\r\n`Data and pipelines are up to date.` \r\n\r\nor something similar\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Pipelines are up to date. Nothing to reproduce.\"\n\n def _normalize(self, s):\n s += \":\"\n assert len(s) < self.STATUS_LEN\n return s + (self.STATUS_LEN - len(s)) * \" \"\n\n def _show(self, status, indent=0):\n ind = indent * self.STATUS_INDENT\n\n if isinstance(status, str):\n logger.info(\"{}{}\".format(ind, status))\n return\n\n if isinstance(status, list):\n for entry in status:\n self._show(entry, indent)\n return\n\n assert isinstance(status, dict)\n\n for key, value in status.items():\n if isinstance(value, str):\n logger.info(\"{}{}{}\".format(ind, self._normalize(value), key))\n elif value:\n logger.info(\"{}{}:\".format(ind, key))\n self._show(value, indent + 1)\n\n def run(self):\n indent = 1 if self.args.cloud else 0\n try:\n st = self.repo.status(\n targets=self.args.targets,\n jobs=self.args.jobs,\n cloud=self.args.cloud,\n remote=self.args.remote,\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n with_deps=self.args.with_deps,\n )\n if st:\n if self.args.quiet:\n return 1\n else:\n self._show(st, indent)\n else:\n logger.info(self.UP_TO_DATE_MSG)\n\n except Exception:\n logger.exception(\"failed to obtain data status\")\n return 1\n return 0\n", "path": "dvc/command/status.py"}]}
1,134
118
gh_patches_debug_16628
rasdani/github-patches
git_diff
jazzband__pip-tools-595
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> README broken on PyPI (must be reStructuredText) The [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown. Solution A: Convert to reST --------------------- 1. Rename the current `README.md` to `README.rst` 1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst)) 1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50)) Solution B: Process before Upload ------------------- 1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14)) 1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49)) ------------ Both solutions above will render a nicely formatted, HTML-styled package description on PyPI. Quality Assurance -------------- Optionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description. </issue> <code> [start of setup.py] 1 """ 2 pip-tools keeps your pinned dependencies fresh. 3 """ 4 from setuptools import find_packages, setup 5 6 setup( 7 name='pip-tools', 8 use_scm_version=True, 9 url='https://github.com/jazzband/pip-tools/', 10 license='BSD', 11 author='Vincent Driessen', 12 author_email='[email protected]', 13 description=__doc__, 14 packages=find_packages(exclude=['tests']), 15 setup_requires=['setuptools_scm'], 16 install_requires=[ 17 'click>=6', 18 'first', 19 'six', 20 'setuptools' 21 ], 22 extras_require={ 23 ':python_version < "3.0"': ['contextlib2'] 24 }, 25 zip_safe=False, 26 entry_points={ 27 'console_scripts': [ 28 'pip-compile = piptools.scripts.compile:cli', 29 'pip-sync = piptools.scripts.sync:cli', 30 ], 31 }, 32 platforms='any', 33 classifiers=[ 34 'Development Status :: 5 - Production/Stable', 35 'Intended Audience :: Developers', 36 'Intended Audience :: System Administrators', 37 'License :: OSI Approved :: BSD License', 38 'Operating System :: OS Independent', 39 'Programming Language :: Python', 40 'Programming Language :: Python :: 2', 41 'Programming Language :: Python :: 2.7', 42 'Programming Language :: Python :: 3', 43 'Programming Language :: Python :: 3.4', 44 'Programming Language :: Python :: 3.5', 45 'Programming Language :: Python :: 3.6', 46 'Topic :: System :: Systems Administration', 47 ] 48 ) 49 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,8 +1,14 @@ """ pip-tools keeps your pinned dependencies fresh. """ +from os.path import abspath, dirname, join from setuptools import find_packages, setup +def read_file(filename): + """Read the contents of a file located relative to setup.py""" + with open(join(abspath(dirname(__file__)), filename)) as thefile: + return thefile.read() + setup( name='pip-tools', use_scm_version=True, @@ -11,6 +17,7 @@ author='Vincent Driessen', author_email='[email protected]', description=__doc__, + long_description=read_file('README.rst'), packages=find_packages(exclude=['tests']), setup_requires=['setuptools_scm'], install_requires=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,14 @@\n \"\"\"\n pip-tools keeps your pinned dependencies fresh.\n \"\"\"\n+from os.path import abspath, dirname, join\n from setuptools import find_packages, setup\n \n+def read_file(filename):\n+ \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n+ with open(join(abspath(dirname(__file__)), filename)) as thefile:\n+ return thefile.read()\n+\n setup(\n name='pip-tools',\n use_scm_version=True,\n@@ -11,6 +17,7 @@\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n+ long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n", "issue": "README broken on PyPI (must be reStructuredText)\nThe [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.\r\n\r\nSolution A: Convert to reST\r\n---------------------\r\n\r\n1. Rename the current `README.md` to `README.rst`\r\n1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))\r\n1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))\r\n\r\nSolution B: Process before Upload\r\n-------------------\r\n\r\n1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))\r\n1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))\r\n\r\n------------\r\n\r\nBoth solutions above will render a nicely formatted, HTML-styled package description on PyPI.\r\n\r\nQuality Assurance\r\n--------------\r\n\r\nOptionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom setuptools import find_packages, setup\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n extras_require={\n ':python_version < \"3.0\"': ['contextlib2']\n },\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]}
1,340
193
gh_patches_debug_21303
rasdani/github-patches
git_diff
nltk__nltk-2819
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WordNetLemmatizer in nltk.stem module What's the parameter of WordNetLemmatizer.lemmatize() in nltk.stem module? Turn to the document, what are the candidate value of the parameter **'pos'**? ![image](https://user-images.githubusercontent.com/62245023/134791412-1ff85ba5-5eb9-4859-a3f1-3b48bdd5a6fa.png) The default value is 'Noun'. But use the function pos_tag() to get the pos of the word, the value appears to come from several options. </issue> <code> [start of nltk/stem/wordnet.py] 1 # Natural Language Toolkit: WordNet stemmer interface 2 # 3 # Copyright (C) 2001-2021 NLTK Project 4 # Author: Steven Bird <[email protected]> 5 # Edward Loper <[email protected]> 6 # URL: <http://nltk.org/> 7 # For license information, see LICENSE.TXT 8 9 from nltk.corpus import wordnet 10 from nltk.corpus.reader.wordnet import NOUN 11 12 13 class WordNetLemmatizer: 14 """ 15 WordNet Lemmatizer 16 17 Lemmatize using WordNet's built-in morphy function. 18 Returns the input word unchanged if it cannot be found in WordNet. 19 20 >>> from nltk.stem import WordNetLemmatizer 21 >>> wnl = WordNetLemmatizer() 22 >>> print(wnl.lemmatize('dogs')) 23 dog 24 >>> print(wnl.lemmatize('churches')) 25 church 26 >>> print(wnl.lemmatize('aardwolves')) 27 aardwolf 28 >>> print(wnl.lemmatize('abaci')) 29 abacus 30 >>> print(wnl.lemmatize('hardrock')) 31 hardrock 32 """ 33 34 def __init__(self): 35 pass 36 37 def lemmatize(self, word, pos=NOUN): 38 lemmas = wordnet._morphy(word, pos) 39 return min(lemmas, key=len) if lemmas else word 40 41 def __repr__(self): 42 return "<WordNetLemmatizer>" 43 [end of nltk/stem/wordnet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nltk/stem/wordnet.py b/nltk/stem/wordnet.py --- a/nltk/stem/wordnet.py +++ b/nltk/stem/wordnet.py @@ -6,8 +6,7 @@ # URL: <http://nltk.org/> # For license information, see LICENSE.TXT -from nltk.corpus import wordnet -from nltk.corpus.reader.wordnet import NOUN +from nltk.corpus import wordnet as wn class WordNetLemmatizer: @@ -31,11 +30,19 @@ hardrock """ - def __init__(self): - pass - - def lemmatize(self, word, pos=NOUN): - lemmas = wordnet._morphy(word, pos) + def lemmatize(self, word: str, pos: str = wn.NOUN) -> str: + """Lemmatize `word` using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + :param word: The input word to lemmatize. + :type word: str + :param pos: The Part Of Speech tag. Valid options are `"n"` for nouns, + `"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"` + for satellite adjectives. + :param pos: str + :return: The lemma of `word`, for the given `pos`. + """ + lemmas = wn._morphy(word, pos) return min(lemmas, key=len) if lemmas else word def __repr__(self):
{"golden_diff": "diff --git a/nltk/stem/wordnet.py b/nltk/stem/wordnet.py\n--- a/nltk/stem/wordnet.py\n+++ b/nltk/stem/wordnet.py\n@@ -6,8 +6,7 @@\n # URL: <http://nltk.org/>\n # For license information, see LICENSE.TXT\n \n-from nltk.corpus import wordnet\n-from nltk.corpus.reader.wordnet import NOUN\n+from nltk.corpus import wordnet as wn\n \n \n class WordNetLemmatizer:\n@@ -31,11 +30,19 @@\n hardrock\n \"\"\"\n \n- def __init__(self):\n- pass\n-\n- def lemmatize(self, word, pos=NOUN):\n- lemmas = wordnet._morphy(word, pos)\n+ def lemmatize(self, word: str, pos: str = wn.NOUN) -> str:\n+ \"\"\"Lemmatize `word` using WordNet's built-in morphy function.\n+ Returns the input word unchanged if it cannot be found in WordNet.\n+\n+ :param word: The input word to lemmatize.\n+ :type word: str\n+ :param pos: The Part Of Speech tag. Valid options are `\"n\"` for nouns,\n+ `\"v\"` for verbs, `\"a\"` for adjectives, `\"r\"` for adverbs and `\"s\"`\n+ for satellite adjectives.\n+ :param pos: str\n+ :return: The lemma of `word`, for the given `pos`.\n+ \"\"\"\n+ lemmas = wn._morphy(word, pos)\n return min(lemmas, key=len) if lemmas else word\n \n def __repr__(self):\n", "issue": "WordNetLemmatizer in nltk.stem module\nWhat's the parameter of WordNetLemmatizer.lemmatize() in nltk.stem module?\r\nTurn to the document, what are the candidate value of the parameter **'pos'**?\r\n![image](https://user-images.githubusercontent.com/62245023/134791412-1ff85ba5-5eb9-4859-a3f1-3b48bdd5a6fa.png)\r\nThe default value is 'Noun'. But use the function pos_tag() to get the pos of the word, the value appears to come from several options.\n", "before_files": [{"content": "# Natural Language Toolkit: WordNet stemmer interface\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Author: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom nltk.corpus import wordnet\nfrom nltk.corpus.reader.wordnet import NOUN\n\n\nclass WordNetLemmatizer:\n \"\"\"\n WordNet Lemmatizer\n\n Lemmatize using WordNet's built-in morphy function.\n Returns the input word unchanged if it cannot be found in WordNet.\n\n >>> from nltk.stem import WordNetLemmatizer\n >>> wnl = WordNetLemmatizer()\n >>> print(wnl.lemmatize('dogs'))\n dog\n >>> print(wnl.lemmatize('churches'))\n church\n >>> print(wnl.lemmatize('aardwolves'))\n aardwolf\n >>> print(wnl.lemmatize('abaci'))\n abacus\n >>> print(wnl.lemmatize('hardrock'))\n hardrock\n \"\"\"\n\n def __init__(self):\n pass\n\n def lemmatize(self, word, pos=NOUN):\n lemmas = wordnet._morphy(word, pos)\n return min(lemmas, key=len) if lemmas else word\n\n def __repr__(self):\n return \"<WordNetLemmatizer>\"\n", "path": "nltk/stem/wordnet.py"}]}
1,095
377
gh_patches_debug_21029
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-655
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create classes to represent ionization state distributions My plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. Eventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352. This will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway. </issue> <code> [start of plasmapy/examples/plot_dispersion_function.py] 1 """ 2 The plasma dispersion function 3 ============================== 4 5 Let's import some basics (and `PlasmaPy`!) 6 """ 7 8 9 import numpy as np 10 import matplotlib.pyplot as plt 11 import plasmapy 12 13 14 ####################################################################### 15 help(plasmapy.mathematics.plasma_dispersion_func) 16 17 18 ####################################################################### 19 # We'll now make some sample data to visualize the dispersion function: 20 21 x = np.linspace(-1, 1, 1000) 22 X, Y = np.meshgrid(x, x) 23 Z = X + 1j * Y 24 print(Z.shape) 25 26 ####################################################################### 27 # Before we start plotting, let's make a visualization function first: 28 29 30 def plot_complex(X, Y, Z, N=50): 31 fig, (real_axis, imag_axis) = plt.subplots(1, 2) 32 real_axis.contourf(X, Y, Z.real, N) 33 imag_axis.contourf(X, Y, Z.imag, N) 34 real_axis.set_title("Real values") 35 imag_axis.set_title("Imaginary values") 36 for ax in [real_axis, imag_axis]: 37 ax.set_xlabel("Real values") 38 ax.set_ylabel("Imaginary values") 39 fig.tight_layout() 40 41 42 plot_complex(X, Y, Z) 43 44 ####################################################################### 45 # We can now apply our visualization function to our simple 46 47 F = plasmapy.mathematics.plasma_dispersion_func(Z) 48 plot_complex(X, Y, F) 49 50 51 ####################################################################### 52 # So this is going to be a hack and I'm not 100% sure the dispersion function 53 # is quite what I think it is, but let's find the area where the dispersion 54 # function has a lesser than zero real part because I think it may be important 55 # (brb reading Fried and Conte): 56 57 plot_complex(X, Y, F.real < 0) 58 59 60 ####################################################################### 61 # We can also visualize the derivative: 62 63 F = plasmapy.mathematics.plasma_dispersion_func_deriv(Z) 64 plot_complex(X, Y, F) 65 66 ####################################################################### 67 # Plotting the same function on a larger area: 68 69 x = np.linspace(-2, 2, 2000) 70 X, Y = np.meshgrid(x, x) 71 Z = X + 1j * Y 72 print(Z.shape) 73 74 ####################################################################### 75 76 F = plasmapy.mathematics.plasma_dispersion_func(Z) 77 plot_complex(X, Y, F, 100) 78 79 ####################################################################### 80 # Now we examine the derivative of the dispersion function as a function 81 # of the phase velocity of an electromagnetic wave propagating through 82 # the plasma. This is recreating figure 5.1 in: 83 # J. Sheffield, D. Froula, S. H. Glenzer, and N. C. Luhmann Jr, 84 # Plasma scattering of electromagnetic radiation: theory and measurement 85 # techniques. Chapter 5 Pg 106 (Academic press, 2010). 86 87 xs = np.linspace(0, 4, 100) 88 ws = (-1 / 2) * plasmapy.mathematics.plasma_dispersion_func_deriv(xs) 89 wRe = np.real(ws) 90 wIm = np.imag(ws) 91 92 plt.plot(xs, wRe, label="Re") 93 plt.plot(xs, wIm, label="Im") 94 plt.axis([0, 4, -0.3, 1]) 95 plt.legend(loc='upper right', 96 frameon=False, 97 labelspacing=0.001, 98 fontsize=14, 99 borderaxespad=0.1) 100 plt.show() [end of plasmapy/examples/plot_dispersion_function.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plasmapy/examples/plot_dispersion_function.py b/plasmapy/examples/plot_dispersion_function.py --- a/plasmapy/examples/plot_dispersion_function.py +++ b/plasmapy/examples/plot_dispersion_function.py @@ -10,7 +10,6 @@ import matplotlib.pyplot as plt import plasmapy - ####################################################################### help(plasmapy.mathematics.plasma_dispersion_func) @@ -41,9 +40,10 @@ plot_complex(X, Y, Z) -####################################################################### -# We can now apply our visualization function to our simple +############################################################################### +# We can now apply our visualization function to our simple dispersion relation +# sphinx_gallery_thumbnail_number = 2 F = plasmapy.mathematics.plasma_dispersion_func(Z) plot_complex(X, Y, F) @@ -97,4 +97,4 @@ labelspacing=0.001, fontsize=14, borderaxespad=0.1) -plt.show() \ No newline at end of file +plt.show()
{"golden_diff": "diff --git a/plasmapy/examples/plot_dispersion_function.py b/plasmapy/examples/plot_dispersion_function.py\n--- a/plasmapy/examples/plot_dispersion_function.py\n+++ b/plasmapy/examples/plot_dispersion_function.py\n@@ -10,7 +10,6 @@\n import matplotlib.pyplot as plt\n import plasmapy\n \n-\n #######################################################################\n help(plasmapy.mathematics.plasma_dispersion_func)\n \n@@ -41,9 +40,10 @@\n \n plot_complex(X, Y, Z)\n \n-#######################################################################\n-# We can now apply our visualization function to our simple\n+###############################################################################\n+# We can now apply our visualization function to our simple dispersion relation\n \n+# sphinx_gallery_thumbnail_number = 2\n F = plasmapy.mathematics.plasma_dispersion_func(Z)\n plot_complex(X, Y, F)\n \n@@ -97,4 +97,4 @@\n labelspacing=0.001,\n fontsize=14,\n borderaxespad=0.1)\n-plt.show()\n\\ No newline at end of file\n+plt.show()\n", "issue": "Create classes to represent ionization state distributions\nMy plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. \r\n\r\nEventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352.\r\n\r\nThis will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe plasma dispersion function\n==============================\n\nLet's import some basics (and `PlasmaPy`!)\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plasmapy\n\n\n#######################################################################\nhelp(plasmapy.mathematics.plasma_dispersion_func)\n\n\n#######################################################################\n# We'll now make some sample data to visualize the dispersion function:\n\nx = np.linspace(-1, 1, 1000)\nX, Y = np.meshgrid(x, x)\nZ = X + 1j * Y\nprint(Z.shape)\n\n#######################################################################\n# Before we start plotting, let's make a visualization function first:\n\n\ndef plot_complex(X, Y, Z, N=50):\n fig, (real_axis, imag_axis) = plt.subplots(1, 2)\n real_axis.contourf(X, Y, Z.real, N)\n imag_axis.contourf(X, Y, Z.imag, N)\n real_axis.set_title(\"Real values\")\n imag_axis.set_title(\"Imaginary values\")\n for ax in [real_axis, imag_axis]:\n ax.set_xlabel(\"Real values\")\n ax.set_ylabel(\"Imaginary values\")\n fig.tight_layout()\n\n\nplot_complex(X, Y, Z)\n\n#######################################################################\n# We can now apply our visualization function to our simple\n\nF = plasmapy.mathematics.plasma_dispersion_func(Z)\nplot_complex(X, Y, F)\n\n\n#######################################################################\n# So this is going to be a hack and I'm not 100% sure the dispersion function\n# is quite what I think it is, but let's find the area where the dispersion\n# function has a lesser than zero real part because I think it may be important\n# (brb reading Fried and Conte):\n\nplot_complex(X, Y, F.real < 0)\n\n\n#######################################################################\n# We can also visualize the derivative:\n\nF = plasmapy.mathematics.plasma_dispersion_func_deriv(Z)\nplot_complex(X, Y, F)\n\n#######################################################################\n# Plotting the same function on a larger area:\n\nx = np.linspace(-2, 2, 2000)\nX, Y = np.meshgrid(x, x)\nZ = X + 1j * Y\nprint(Z.shape)\n\n#######################################################################\n\nF = plasmapy.mathematics.plasma_dispersion_func(Z)\nplot_complex(X, Y, F, 100)\n\n#######################################################################\n# Now we examine the derivative of the dispersion function as a function\n# of the phase velocity of an electromagnetic wave propagating through\n# the plasma. This is recreating figure 5.1 in:\n# J. Sheffield, D. Froula, S. H. Glenzer, and N. C. Luhmann Jr,\n# Plasma scattering of electromagnetic radiation: theory and measurement\n# techniques. Chapter 5 Pg 106 (Academic press, 2010).\n\nxs = np.linspace(0, 4, 100)\nws = (-1 / 2) * plasmapy.mathematics.plasma_dispersion_func_deriv(xs)\nwRe = np.real(ws)\nwIm = np.imag(ws)\n\nplt.plot(xs, wRe, label=\"Re\")\nplt.plot(xs, wIm, label=\"Im\")\nplt.axis([0, 4, -0.3, 1])\nplt.legend(loc='upper right',\n frameon=False,\n labelspacing=0.001,\n fontsize=14,\n borderaxespad=0.1)\nplt.show()", "path": "plasmapy/examples/plot_dispersion_function.py"}]}
1,687
240
gh_patches_debug_10872
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1887
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exception processing E3037 for AWS::S3::Bucket.Transition.TransitionDate ``` $ cfn-lint --version cfn-lint 0.44.5 ``` The `TransitionDate` property is defined with `PrimitiveType: "Timestamp"`: ```yaml AWSTemplateFormatVersion: 2010-09-09 Resources: Bucket: Type: AWS::S3::Bucket Properties: LifecycleConfiguration: Rules: - Status: Enabled Transitions: - StorageClass: INTELLIGENT_TIERING TransitionDate: 2021-01-01T00:00:00.000Z ``` This is a valid template and can be successfully deployed, but `cfn-lint` fails with: ``` $ cfn-lint scratch.yml E0002 Unknown exception while processing rule E3037: Object of type datetime is not JSON serializable scratch.yml:1:1 ``` Running with `--debug` shows the exception is generated at https://github.com/aws-cloudformation/cfn-python-lint/blob/c7658511bd7066417682103f21f71983c67ea6d0/src/cfnlint/rules/resources/properties/ListDuplicates.py#L36 Quoting the TransitionDate value suppresses this error, e.g. `TransitionDate: "2021-01-01T00:00:00.000Z"` </issue> <code> [start of src/cfnlint/rules/resources/properties/ListDuplicates.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import hashlib 6 import json 7 from cfnlint.rules import CloudFormationLintRule 8 from cfnlint.rules import RuleMatch 9 10 from cfnlint.helpers import RESOURCE_SPECS 11 12 13 class ListDuplicates(CloudFormationLintRule): 14 """Check if duplicates exist in a List""" 15 id = 'E3037' 16 shortdesc = 'Check if a list has duplicate values' 17 description = 'Certain lists don\'t support duplicate items. ' \ 18 'Check when duplicates are provided but not supported.' 19 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue' 20 tags = ['resources', 'property', 'list'] 21 22 def initialize(self, cfn): 23 """Initialize the rule""" 24 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'): 25 self.resource_property_types.append(resource_type_spec) 26 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'): 27 self.resource_sub_property_types.append(property_type_spec) 28 29 def _check_duplicates(self, values, path, scenario=None): 30 """ Check for Duplicates """ 31 matches = [] 32 33 list_items = [] 34 if isinstance(values, list): 35 for index, value in enumerate(values): 36 value_hash = hashlib.sha1(json.dumps( 37 value, sort_keys=True).encode('utf-8')).hexdigest() 38 if value_hash in list_items: 39 if not scenario: 40 message = 'List has a duplicate value at {0}' 41 matches.append( 42 RuleMatch(path + [index], message.format('/'.join(map(str, path + [index]))))) 43 else: 44 scenario_text = ' and '.join( 45 ['condition "%s" is %s' % (k, v) for (k, v) in scenario.items()]) 46 message = 'List has a duplicate value at {0} when {1}' 47 matches.append(RuleMatch(path, message.format( 48 '/'.join(map(str, path)), scenario_text))) 49 50 list_items.append(value_hash) 51 52 return matches 53 54 def check_duplicates(self, values, path, cfn): 55 """ Check for duplicates """ 56 matches = [] 57 58 if isinstance(values, list): 59 matches.extend(self._check_duplicates(values, path)) 60 elif isinstance(values, dict): 61 props = cfn.get_object_without_conditions(values) 62 for prop in props: 63 matches.extend(self._check_duplicates( 64 prop.get('Object'), path, prop.get('Scenario'))) 65 66 return matches 67 68 def check(self, cfn, properties, value_specs, path): 69 """Check itself""" 70 matches = list() 71 for p_value, p_path in properties.items_safe(path[:]): 72 for prop in p_value: 73 if prop in value_specs: 74 property_type = value_specs.get(prop).get('Type') 75 duplicates_allowed = value_specs.get(prop).get('DuplicatesAllowed', True) 76 if property_type == 'List' and not duplicates_allowed: 77 matches.extend( 78 self.check_duplicates( 79 p_value[prop], p_path + [prop], cfn 80 ) 81 ) 82 83 return matches 84 85 def match_resource_sub_properties(self, properties, property_type, path, cfn): 86 """Match for sub properties""" 87 matches = list() 88 89 specs = RESOURCE_SPECS.get(cfn.regions[0]).get( 90 'PropertyTypes').get(property_type, {}).get('Properties', {}) 91 matches.extend(self.check(cfn, properties, specs, path)) 92 93 return matches 94 95 def match_resource_properties(self, properties, resource_type, path, cfn): 96 """Check CloudFormation Properties""" 97 matches = list() 98 99 specs = RESOURCE_SPECS.get(cfn.regions[0]).get( 100 'ResourceTypes').get(resource_type, {}).get('Properties', {}) 101 matches.extend(self.check(cfn, properties, specs, path)) 102 103 return matches 104 [end of src/cfnlint/rules/resources/properties/ListDuplicates.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/properties/ListDuplicates.py b/src/cfnlint/rules/resources/properties/ListDuplicates.py --- a/src/cfnlint/rules/resources/properties/ListDuplicates.py +++ b/src/cfnlint/rules/resources/properties/ListDuplicates.py @@ -34,7 +34,7 @@ if isinstance(values, list): for index, value in enumerate(values): value_hash = hashlib.sha1(json.dumps( - value, sort_keys=True).encode('utf-8')).hexdigest() + value, sort_keys=True, default=str).encode('utf-8')).hexdigest() if value_hash in list_items: if not scenario: message = 'List has a duplicate value at {0}'
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ListDuplicates.py b/src/cfnlint/rules/resources/properties/ListDuplicates.py\n--- a/src/cfnlint/rules/resources/properties/ListDuplicates.py\n+++ b/src/cfnlint/rules/resources/properties/ListDuplicates.py\n@@ -34,7 +34,7 @@\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(json.dumps(\n- value, sort_keys=True).encode('utf-8')).hexdigest()\n+ value, sort_keys=True, default=str).encode('utf-8')).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = 'List has a duplicate value at {0}'\n", "issue": "Exception processing E3037 for AWS::S3::Bucket.Transition.TransitionDate\n```\r\n$ cfn-lint --version\r\ncfn-lint 0.44.5\r\n```\r\n\r\nThe `TransitionDate` property is defined with `PrimitiveType: \"Timestamp\"`:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nResources:\r\n Bucket:\r\n Type: AWS::S3::Bucket\r\n Properties:\r\n LifecycleConfiguration:\r\n Rules:\r\n - Status: Enabled\r\n Transitions:\r\n - StorageClass: INTELLIGENT_TIERING\r\n TransitionDate: 2021-01-01T00:00:00.000Z\r\n```\r\n\r\nThis is a valid template and can be successfully deployed, but `cfn-lint` fails with:\r\n\r\n```\r\n$ cfn-lint scratch.yml\r\nE0002 Unknown exception while processing rule E3037: Object of type datetime is not JSON serializable\r\nscratch.yml:1:1\r\n```\r\n\r\nRunning with `--debug` shows the exception is generated at https://github.com/aws-cloudformation/cfn-python-lint/blob/c7658511bd7066417682103f21f71983c67ea6d0/src/cfnlint/rules/resources/properties/ListDuplicates.py#L36\r\n\r\nQuoting the TransitionDate value suppresses this error, e.g. `TransitionDate: \"2021-01-01T00:00:00.000Z\"`\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport hashlib\nimport json\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass ListDuplicates(CloudFormationLintRule):\n \"\"\"Check if duplicates exist in a List\"\"\"\n id = 'E3037'\n shortdesc = 'Check if a list has duplicate values'\n description = 'Certain lists don\\'t support duplicate items. ' \\\n 'Check when duplicates are provided but not supported.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'\n tags = ['resources', 'property', 'list']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _check_duplicates(self, values, path, scenario=None):\n \"\"\" Check for Duplicates \"\"\"\n matches = []\n\n list_items = []\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(json.dumps(\n value, sort_keys=True).encode('utf-8')).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = 'List has a duplicate value at {0}'\n matches.append(\n RuleMatch(path + [index], message.format('/'.join(map(str, path + [index])))))\n else:\n scenario_text = ' and '.join(\n ['condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n message = 'List has a duplicate value at {0} when {1}'\n matches.append(RuleMatch(path, message.format(\n '/'.join(map(str, path)), scenario_text)))\n\n list_items.append(value_hash)\n\n return matches\n\n def check_duplicates(self, values, path, cfn):\n \"\"\" Check for duplicates \"\"\"\n matches = []\n\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n matches.extend(self._check_duplicates(\n prop.get('Object'), path, prop.get('Scenario')))\n\n return matches\n\n def check(self, cfn, properties, value_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n property_type = value_specs.get(prop).get('Type')\n duplicates_allowed = value_specs.get(prop).get('DuplicatesAllowed', True)\n if property_type == 'List' and not duplicates_allowed:\n matches.extend(\n self.check_duplicates(\n p_value[prop], p_path + [prop], cfn\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ListDuplicates.py"}]}
1,957
156
gh_patches_debug_33558
rasdani/github-patches
git_diff
wagtail__wagtail-170
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Broken URL for jquery.ui.datepicker when 'en-US' used as lang This isn't a big deal at all, but wanted to post just in case anyone wants to take a look. When loading a page with `jquery.ui.datepicker.js`, I notice in console that a call to http://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-en-US.js returns a 404. I searched out the CDN for the directory in which the file is attempting to be called: http://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/ As you can see, there is no `../jquery.ui.datepicker-en-US.js` present (not that there necessarily ought to be) The call stems from: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailadmin/templatetags/localize.py#L42 The interpolation inserts `en-US` into the URI Again, no big deal... just FYI Cheers, all! Edit: I should add, this issue does _not_ break usability - a fallback seems to be in place. </issue> <code> [start of wagtail/wagtailadmin/templatetags/localize.py] 1 from django import template 2 from django.conf import settings 3 from django.utils import formats 4 from django.utils.translation import get_language 5 6 register = template.Library() 7 8 # For reasons unkown, the el (greek) locale in django/conf/locale/el/formats.py 9 # *did not* contain a DATE_INPUT_FORMATS -- so it fell back to using the US 10 # date format (mm/dd/yy) which is not the correct one for Greece (dd/mm/yy). 11 # This means that if we used a localized datepicker django *won't* be able to 12 # parse the dates! So a test here checks if DATE_INPUT_FORMATS is actually 13 # defined in a format module. If yes then it will just return an empty string 14 # so that the normal, localized date format from datepicker will be used. 15 # If DATE_INPUT_FORMATS is not defined then it will return 16 @register.assignment_tag 17 def get_date_format_override(): 18 if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True: 19 20 for m in formats.get_format_modules(): 21 if hasattr(m, 'DATE_INPUT_FORMATS'): 22 return '' 23 else: # fall back to the ISO to be sure date will be parsed 24 return 'yy-mm-dd' 25 else: # Fall back to ISO if I18N is *not* used 26 return 'yy-mm-dd' 27 28 # Get the correct i18n + l10n settings for datepicker depending on current 29 # thread language 30 @register.simple_tag 31 def get_localized_datepicker_js(): 32 if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True and \ 33 hasattr(settings, 'USE_L10N') and settings.USE_L10N==True: 34 35 lang = get_language() 36 37 if '-' in lang: 38 lang_parts = lang.split('-') 39 lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper() 40 else: 41 lang=lang.lower() 42 return '<script src="//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js"></script>'.format( 43 lang 44 ) 45 46 else: # Don't write anything if we don't use I18N and L10N 47 return '' 48 [end of wagtail/wagtailadmin/templatetags/localize.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailadmin/templatetags/localize.py b/wagtail/wagtailadmin/templatetags/localize.py --- a/wagtail/wagtailadmin/templatetags/localize.py +++ b/wagtail/wagtailadmin/templatetags/localize.py @@ -1,5 +1,6 @@ from django import template from django.conf import settings +from django.templatetags.static import static from django.utils import formats from django.utils.translation import get_language @@ -25,6 +26,15 @@ else: # Fall back to ISO if I18N is *not* used return 'yy-mm-dd' +# This is a list of all supported langs for jquery-ui datepicker which exist in +# wagtailadmin/js/venor/i18n/. In case any new translations are added there the +# language code should also be added in this list. +SUPPORTED_DATEPICKER_LANGS = ['af', 'ar-DZ', 'ar', 'az', 'be', 'bg', 'bs', 'ca', 'cs', 'cy-GB', 'da', 'de', + 'el', 'en-AU', 'en-GB', 'en-NZ', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr-CA', 'fr-CH', 'fr', 'gl', + 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'ka', 'kk', 'km', 'ko', 'ky', 'lb', 'lt', 'lv', + 'mk', 'ml', 'ms', 'nb', 'nl-BE', 'nl', 'nn', 'no', 'pl', 'pt-BR', 'pt', 'rm', 'ro', 'ru', 'sk', 'sl', 'sq', + 'sr-SR', 'sr', 'sv', 'ta', 'th', 'tj', 'tr', 'uk', 'vi', 'zh-CN', 'zh-HK', 'zh-TW' +] # Get the correct i18n + l10n settings for datepicker depending on current # thread language @register.simple_tag @@ -39,10 +49,14 @@ lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper() else: lang=lang.lower() - return '<script src="//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js"></script>'.format( - lang - ) + if lang in SUPPORTED_DATEPICKER_LANGS: + translation_file = static("wagtailadmin/js/vendor/i18n/jquery.ui.datepicker-{0}.js".format( + lang + )) + return '<script src="{0}"></script>'.format(translation_file) + else: # Don't return anything if language is not supported + return '' - else: # Don't write anything if we don't use I18N and L10N + else: # Don't return anything if we don't use I18N and L10N return '' \ No newline at end of file
{"golden_diff": "diff --git a/wagtail/wagtailadmin/templatetags/localize.py b/wagtail/wagtailadmin/templatetags/localize.py\n--- a/wagtail/wagtailadmin/templatetags/localize.py\n+++ b/wagtail/wagtailadmin/templatetags/localize.py\n@@ -1,5 +1,6 @@\n from django import template\n from django.conf import settings\n+from django.templatetags.static import static\n from django.utils import formats\n from django.utils.translation import get_language\n \n@@ -25,6 +26,15 @@\n else: # Fall back to ISO if I18N is *not* used\n return 'yy-mm-dd'\n \n+# This is a list of all supported langs for jquery-ui datepicker which exist in\n+# wagtailadmin/js/venor/i18n/. In case any new translations are added there the\n+# language code should also be added in this list.\n+SUPPORTED_DATEPICKER_LANGS = ['af', 'ar-DZ', 'ar', 'az', 'be', 'bg', 'bs', 'ca', 'cs', 'cy-GB', 'da', 'de',\n+ 'el', 'en-AU', 'en-GB', 'en-NZ', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr-CA', 'fr-CH', 'fr', 'gl',\n+ 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'ka', 'kk', 'km', 'ko', 'ky', 'lb', 'lt', 'lv',\n+ 'mk', 'ml', 'ms', 'nb', 'nl-BE', 'nl', 'nn', 'no', 'pl', 'pt-BR', 'pt', 'rm', 'ro', 'ru', 'sk', 'sl', 'sq',\n+ 'sr-SR', 'sr', 'sv', 'ta', 'th', 'tj', 'tr', 'uk', 'vi', 'zh-CN', 'zh-HK', 'zh-TW'\n+]\n # Get the correct i18n + l10n settings for datepicker depending on current \n # thread language \n @register.simple_tag\n@@ -39,10 +49,14 @@\n lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper()\n else:\n lang=lang.lower()\n- return '<script src=\"//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js\"></script>'.format(\n- lang\n- )\n+ if lang in SUPPORTED_DATEPICKER_LANGS:\n+ translation_file = static(\"wagtailadmin/js/vendor/i18n/jquery.ui.datepicker-{0}.js\".format(\n+ lang\n+ ))\n+ return '<script src=\"{0}\"></script>'.format(translation_file)\n+ else: # Don't return anything if language is not supported\n+ return ''\n \n- else: # Don't write anything if we don't use I18N and L10N\n+ else: # Don't return anything if we don't use I18N and L10N\n return '' \n \n\\ No newline at end of file\n", "issue": "Broken URL for jquery.ui.datepicker when 'en-US' used as lang \nThis isn't a big deal at all, but wanted to post just in case anyone wants to take a look.\n\nWhen loading a page with `jquery.ui.datepicker.js`, I notice in console that a call to http://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-en-US.js returns a 404.\n\nI searched out the CDN for the directory in which the file is attempting to be called:\nhttp://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/\n\nAs you can see, there is no `../jquery.ui.datepicker-en-US.js` present (not that there necessarily ought to be)\n\nThe call stems from:\nhttps://github.com/torchbox/wagtail/blob/master/wagtail/wagtailadmin/templatetags/localize.py#L42\n\nThe interpolation inserts `en-US` into the URI\n\nAgain, no big deal... just FYI\n\nCheers, all!\n\nEdit:\n\nI should add, this issue does _not_ break usability - a fallback seems to be in place.\n\n", "before_files": [{"content": "from django import template\nfrom django.conf import settings\nfrom django.utils import formats\nfrom django.utils.translation import get_language\n\nregister = template.Library()\n\n# For reasons unkown, the el (greek) locale in django/conf/locale/el/formats.py \n# *did not* contain a DATE_INPUT_FORMATS -- so it fell back to using the US \n# date format (mm/dd/yy) which is not the correct one for Greece (dd/mm/yy). \n# This means that if we used a localized datepicker django *won't* be able to\n# parse the dates! So a test here checks if DATE_INPUT_FORMATS is actually \n# defined in a format module. If yes then it will just return an empty string \n# so that the normal, localized date format from datepicker will be used.\n# If DATE_INPUT_FORMATS is not defined then it will return\[email protected]_tag\ndef get_date_format_override():\n if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True:\n \n for m in formats.get_format_modules():\n if hasattr(m, 'DATE_INPUT_FORMATS'):\n return ''\n else: # fall back to the ISO to be sure date will be parsed\n return 'yy-mm-dd'\n else: # Fall back to ISO if I18N is *not* used\n return 'yy-mm-dd'\n\n# Get the correct i18n + l10n settings for datepicker depending on current \n# thread language \[email protected]_tag\ndef get_localized_datepicker_js():\n if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True and \\\n hasattr(settings, 'USE_L10N') and settings.USE_L10N==True:\n \n lang = get_language()\n \n if '-' in lang:\n lang_parts = lang.split('-')\n lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper()\n else:\n lang=lang.lower()\n return '<script src=\"//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js\"></script>'.format(\n lang\n )\n \n else: # Don't write anything if we don't use I18N and L10N\n return '' \n ", "path": "wagtail/wagtailadmin/templatetags/localize.py"}]}
1,371
736
gh_patches_debug_6828
rasdani/github-patches
git_diff
kartoza__prj.app-162
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Landing page gives a 404 </issue> <code> [start of django_project/base/views/error_views.py] 1 # coding=utf-8 2 """Our custom error views""" 3 from django.shortcuts import render_to_response 4 from django.template import RequestContext 5 from base.models.project import Project 6 7 8 def custom_404(request, template_name='404.html'): 9 """Our custom 404 view 10 11 We want to include a list of all public and approved Projects in the 404 12 view 13 :param request: Request obj 14 :type request: HttpRequest 15 16 :param template_name: The template to render 17 :type template_name: str 18 19 :return: Response obj 20 :rtype: HttpResponse 21 22 """ 23 public_projects = Project.objects.filter(approved=True, private=False) 24 return render_to_response(template_name, { 25 'request_path': request.path, 26 'projects': public_projects 27 }, context_instance=RequestContext(request)) 28 [end of django_project/base/views/error_views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django_project/base/views/error_views.py b/django_project/base/views/error_views.py --- a/django_project/base/views/error_views.py +++ b/django_project/base/views/error_views.py @@ -21,7 +21,11 @@ """ public_projects = Project.objects.filter(approved=True, private=False) - return render_to_response(template_name, { - 'request_path': request.path, - 'projects': public_projects - }, context_instance=RequestContext(request)) + + response = render_to_response( + template_name, { + 'request_path': request.path, + 'projects': public_projects}, + context_instance=RequestContext(request)) + response.status_code = 404 + return response
{"golden_diff": "diff --git a/django_project/base/views/error_views.py b/django_project/base/views/error_views.py\n--- a/django_project/base/views/error_views.py\n+++ b/django_project/base/views/error_views.py\n@@ -21,7 +21,11 @@\n \n \"\"\"\n public_projects = Project.objects.filter(approved=True, private=False)\n- return render_to_response(template_name, {\n- 'request_path': request.path,\n- 'projects': public_projects\n- }, context_instance=RequestContext(request))\n+\n+ response = render_to_response(\n+ template_name, {\n+ 'request_path': request.path,\n+ 'projects': public_projects},\n+ context_instance=RequestContext(request))\n+ response.status_code = 404\n+ return response\n", "issue": "Landing page gives a 404\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Our custom error views\"\"\"\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom base.models.project import Project\n\n\ndef custom_404(request, template_name='404.html'):\n \"\"\"Our custom 404 view\n\n We want to include a list of all public and approved Projects in the 404\n view\n :param request: Request obj\n :type request: HttpRequest\n\n :param template_name: The template to render\n :type template_name: str\n\n :return: Response obj\n :rtype: HttpResponse\n\n \"\"\"\n public_projects = Project.objects.filter(approved=True, private=False)\n return render_to_response(template_name, {\n 'request_path': request.path,\n 'projects': public_projects\n }, context_instance=RequestContext(request))\n", "path": "django_project/base/views/error_views.py"}]}
782
170
gh_patches_debug_38348
rasdani/github-patches
git_diff
PaddlePaddle__models-312
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> resnet模型配置的问题 目前resnet的配置有一些问题,可见 https://github.com/PaddlePaddle/models/issues/308#issuecomment-331384031 </issue> <code> [start of image_classification/resnet.py] 1 import paddle.v2 as paddle 2 3 __all__ = ['resnet_imagenet', 'resnet_cifar10'] 4 5 6 def conv_bn_layer(input, 7 ch_out, 8 filter_size, 9 stride, 10 padding, 11 active_type=paddle.activation.Relu(), 12 ch_in=None): 13 tmp = paddle.layer.img_conv( 14 input=input, 15 filter_size=filter_size, 16 num_channels=ch_in, 17 num_filters=ch_out, 18 stride=stride, 19 padding=padding, 20 act=paddle.activation.Linear(), 21 bias_attr=False) 22 return paddle.layer.batch_norm(input=tmp, act=active_type) 23 24 25 def shortcut(input, ch_in, ch_out, stride): 26 if ch_in != ch_out: 27 return conv_bn_layer(input, ch_out, 1, stride, 0, 28 paddle.activation.Linear()) 29 else: 30 return input 31 32 33 def basicblock(input, ch_in, ch_out, stride): 34 short = shortcut(input, ch_in, ch_out, stride) 35 conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) 36 conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear()) 37 return paddle.layer.addto( 38 input=[short, conv2], act=paddle.activation.Relu()) 39 40 41 def bottleneck(input, ch_in, ch_out, stride): 42 short = shortcut(input, ch_in, ch_out * 4, stride) 43 conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) 44 conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) 45 conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, 46 paddle.activation.Linear()) 47 return paddle.layer.addto( 48 input=[short, conv3], act=paddle.activation.Relu()) 49 50 51 def layer_warp(block_func, input, ch_in, ch_out, count, stride): 52 conv = block_func(input, ch_in, ch_out, stride) 53 for i in range(1, count): 54 conv = block_func(conv, ch_out, ch_out, 1) 55 return conv 56 57 58 def resnet_imagenet(input, class_dim, depth=50): 59 cfg = { 60 18: ([2, 2, 2, 1], basicblock), 61 34: ([3, 4, 6, 3], basicblock), 62 50: ([3, 4, 6, 3], bottleneck), 63 101: ([3, 4, 23, 3], bottleneck), 64 152: ([3, 8, 36, 3], bottleneck) 65 } 66 stages, block_func = cfg[depth] 67 conv1 = conv_bn_layer( 68 input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) 69 pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2) 70 res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1) 71 res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2) 72 res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2) 73 res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2) 74 pool2 = paddle.layer.img_pool( 75 input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) 76 out = paddle.layer.fc( 77 input=pool2, size=class_dim, act=paddle.activation.Softmax()) 78 return out 79 80 81 def resnet_cifar10(input, class_dim, depth=32): 82 # depth should be one of 20, 32, 44, 56, 110, 1202 83 assert (depth - 2) % 6 == 0 84 n = (depth - 2) / 6 85 nStages = {16, 64, 128} 86 conv1 = conv_bn_layer( 87 input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) 88 res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) 89 res2 = layer_warp(basicblock, res1, 16, 32, n, 2) 90 res3 = layer_warp(basicblock, res2, 32, 64, n, 2) 91 pool = paddle.layer.img_pool( 92 input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) 93 out = paddle.layer.fc( 94 input=pool, size=class_dim, act=paddle.activation.Softmax()) 95 return out 96 [end of image_classification/resnet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/image_classification/resnet.py b/image_classification/resnet.py --- a/image_classification/resnet.py +++ b/image_classification/resnet.py @@ -22,24 +22,24 @@ return paddle.layer.batch_norm(input=tmp, act=active_type) -def shortcut(input, ch_in, ch_out, stride): - if ch_in != ch_out: +def shortcut(input, ch_out, stride): + if input.num_filters != ch_out: return conv_bn_layer(input, ch_out, 1, stride, 0, paddle.activation.Linear()) else: return input -def basicblock(input, ch_in, ch_out, stride): - short = shortcut(input, ch_in, ch_out, stride) +def basicblock(input, ch_out, stride): + short = shortcut(input, ch_out, stride) conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear()) return paddle.layer.addto( input=[short, conv2], act=paddle.activation.Relu()) -def bottleneck(input, ch_in, ch_out, stride): - short = shortcut(input, ch_in, ch_out * 4, stride) +def bottleneck(input, ch_out, stride): + short = shortcut(input, ch_out * 4, stride) conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, @@ -48,10 +48,10 @@ input=[short, conv3], act=paddle.activation.Relu()) -def layer_warp(block_func, input, ch_in, ch_out, count, stride): - conv = block_func(input, ch_in, ch_out, stride) +def layer_warp(block_func, input, ch_out, count, stride): + conv = block_func(input, ch_out, stride) for i in range(1, count): - conv = block_func(conv, ch_out, ch_out, 1) + conv = block_func(conv, ch_out, 1) return conv @@ -67,10 +67,10 @@ conv1 = conv_bn_layer( input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2) - res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1) - res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2) - res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2) - res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2) + res1 = layer_warp(block_func, pool1, 64, stages[0], 1) + res2 = layer_warp(block_func, res1, 128, stages[1], 2) + res3 = layer_warp(block_func, res2, 256, stages[2], 2) + res4 = layer_warp(block_func, res3, 512, stages[3], 2) pool2 = paddle.layer.img_pool( input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) out = paddle.layer.fc(
{"golden_diff": "diff --git a/image_classification/resnet.py b/image_classification/resnet.py\n--- a/image_classification/resnet.py\n+++ b/image_classification/resnet.py\n@@ -22,24 +22,24 @@\n return paddle.layer.batch_norm(input=tmp, act=active_type)\n \n \n-def shortcut(input, ch_in, ch_out, stride):\n- if ch_in != ch_out:\n+def shortcut(input, ch_out, stride):\n+ if input.num_filters != ch_out:\n return conv_bn_layer(input, ch_out, 1, stride, 0,\n paddle.activation.Linear())\n else:\n return input\n \n \n-def basicblock(input, ch_in, ch_out, stride):\n- short = shortcut(input, ch_in, ch_out, stride)\n+def basicblock(input, ch_out, stride):\n+ short = shortcut(input, ch_out, stride)\n conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv2], act=paddle.activation.Relu())\n \n \n-def bottleneck(input, ch_in, ch_out, stride):\n- short = shortcut(input, ch_in, ch_out * 4, stride)\n+def bottleneck(input, ch_out, stride):\n+ short = shortcut(input, ch_out * 4, stride)\n conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)\n conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0,\n@@ -48,10 +48,10 @@\n input=[short, conv3], act=paddle.activation.Relu())\n \n \n-def layer_warp(block_func, input, ch_in, ch_out, count, stride):\n- conv = block_func(input, ch_in, ch_out, stride)\n+def layer_warp(block_func, input, ch_out, count, stride):\n+ conv = block_func(input, ch_out, stride)\n for i in range(1, count):\n- conv = block_func(conv, ch_out, ch_out, 1)\n+ conv = block_func(conv, ch_out, 1)\n return conv\n \n \n@@ -67,10 +67,10 @@\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)\n pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2)\n- res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1)\n- res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2)\n- res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2)\n- res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2)\n+ res1 = layer_warp(block_func, pool1, 64, stages[0], 1)\n+ res2 = layer_warp(block_func, res1, 128, stages[1], 2)\n+ res3 = layer_warp(block_func, res2, 256, stages[2], 2)\n+ res4 = layer_warp(block_func, res3, 512, stages[3], 2)\n pool2 = paddle.layer.img_pool(\n input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n", "issue": "resnet\u6a21\u578b\u914d\u7f6e\u7684\u95ee\u9898\n\u76ee\u524dresnet\u7684\u914d\u7f6e\u6709\u4e00\u4e9b\u95ee\u9898\uff0c\u53ef\u89c1 https://github.com/PaddlePaddle/models/issues/308#issuecomment-331384031\n", "before_files": [{"content": "import paddle.v2 as paddle\n\n__all__ = ['resnet_imagenet', 'resnet_cifar10']\n\n\ndef conv_bn_layer(input,\n ch_out,\n filter_size,\n stride,\n padding,\n active_type=paddle.activation.Relu(),\n ch_in=None):\n tmp = paddle.layer.img_conv(\n input=input,\n filter_size=filter_size,\n num_channels=ch_in,\n num_filters=ch_out,\n stride=stride,\n padding=padding,\n act=paddle.activation.Linear(),\n bias_attr=False)\n return paddle.layer.batch_norm(input=tmp, act=active_type)\n\n\ndef shortcut(input, ch_in, ch_out, stride):\n if ch_in != ch_out:\n return conv_bn_layer(input, ch_out, 1, stride, 0,\n paddle.activation.Linear())\n else:\n return input\n\n\ndef basicblock(input, ch_in, ch_out, stride):\n short = shortcut(input, ch_in, ch_out, stride)\n conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv2], act=paddle.activation.Relu())\n\n\ndef bottleneck(input, ch_in, ch_out, stride):\n short = shortcut(input, ch_in, ch_out * 4, stride)\n conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)\n conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0,\n paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv3], act=paddle.activation.Relu())\n\n\ndef layer_warp(block_func, input, ch_in, ch_out, count, stride):\n conv = block_func(input, ch_in, ch_out, stride)\n for i in range(1, count):\n conv = block_func(conv, ch_out, ch_out, 1)\n return conv\n\n\ndef resnet_imagenet(input, class_dim, depth=50):\n cfg = {\n 18: ([2, 2, 2, 1], basicblock),\n 34: ([3, 4, 6, 3], basicblock),\n 50: ([3, 4, 6, 3], bottleneck),\n 101: ([3, 4, 23, 3], bottleneck),\n 152: ([3, 8, 36, 3], bottleneck)\n }\n stages, block_func = cfg[depth]\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)\n pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2)\n res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1)\n res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2)\n res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2)\n res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2)\n pool2 = paddle.layer.img_pool(\n input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n input=pool2, size=class_dim, act=paddle.activation.Softmax())\n return out\n\n\ndef resnet_cifar10(input, class_dim, depth=32):\n # depth should be one of 20, 32, 44, 56, 110, 1202\n assert (depth - 2) % 6 == 0\n n = (depth - 2) / 6\n nStages = {16, 64, 128}\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1)\n res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)\n res2 = layer_warp(basicblock, res1, 16, 32, n, 2)\n res3 = layer_warp(basicblock, res2, 32, 64, n, 2)\n pool = paddle.layer.img_pool(\n input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n input=pool, size=class_dim, act=paddle.activation.Softmax())\n return out\n", "path": "image_classification/resnet.py"}]}
1,880
851
gh_patches_debug_30359
rasdani/github-patches
git_diff
apluslms__a-plus-1293
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Course staff may create duplicate student groups Course staff may create student groups (course/models.py class StudentGroup) that contain exactly the same group members as an existing group. Duplicate groups should not be allowed. The course staff UI for editing groups is in the URL http://localhost:8000/def/current/teachers/groups/ (in the left navigation menu, it is the "Groups" link under the heading Course staff). Course staff may also create new groups (or edit existing groups) that are empty (no members) or only have one member. Groups should always have at least two members. When students create groups in the "form a group" page (with user personal codes), A+ already prevents empty and duplicate groups. </issue> <code> [start of course/forms.py] 1 from typing import Any 2 3 from django import forms 4 from django.contrib.humanize.templatetags.humanize import ordinal 5 from django.utils.safestring import mark_safe 6 from django.utils.text import format_lazy 7 from django.utils.translation import gettext_lazy as _ 8 9 from aplus.api import api_reverse 10 from exercise.models import SubmissionDraft 11 from lib.fields import UsersSearchSelectField 12 from .models import Enrollment, StudentGroup 13 from userprofile.models import UserProfile 14 15 16 class GroupsForm(forms.Form): 17 18 def __init__(self, *args, **kwargs): 19 self.profile = kwargs.pop('profile') 20 self.instance = kwargs.pop('instance') 21 self.content = kwargs.pop('content') 22 super().__init__(*args, **kwargs) 23 total = self.content.total() 24 min_size = max(total.min_group_size, 2) 25 max_size = total.max_group_size 26 27 for n in range(2, max_size + 1): 28 widget = forms.TextInput(attrs={'class':'form-control'}) 29 field = forms.CharField(widget=widget, required=(n <= min_size)) 30 field.label = mark_safe(format_lazy(_('GROUP_MEMBER_LABEL -- {num}'), num=ordinal(n))) 31 self.fields['member{:d}'.format(n)] = field 32 33 def clean(self): 34 super().clean() 35 36 self.member_profiles = [self.profile] 37 for key in self.fields.keys(): 38 if key in self.cleaned_data and self.cleaned_data[key]: 39 enrollment = Enrollment.objects.filter( 40 course_instance=self.instance, 41 personal_code=self.cleaned_data[key].upper() 42 ).first() 43 if not enrollment: 44 self.add_error(key, _('ERROR_CODE_NOT_RECOGNIZED')) 45 elif enrollment.user_profile in self.member_profiles: 46 self.add_error(key, _('ERROR_USER_ALREADY_IN_GROUP')) 47 else: 48 self.member_profiles.append(enrollment.user_profile) 49 50 if not self.errors and len(self.member_profiles) > 1: 51 if StudentGroup.get_exact(self.instance, self.member_profiles): 52 self.add_error(None, _('ERROR_GROUP_ALREADY_EXISTS')) 53 54 return self.cleaned_data 55 56 def save(self): 57 group = StudentGroup(course_instance=self.instance) 58 group.save() 59 group.members.add(*self.member_profiles) 60 return group 61 62 63 class GroupSelectForm(forms.Form): 64 group = forms.IntegerField(required=True) 65 66 def __init__(self, *args, **kwargs): 67 self.profile = kwargs.pop('profile') 68 self.instance = kwargs.pop('instance') 69 super().__init__(*args, **kwargs) 70 71 def clean(self): 72 super().clean() 73 self.selected_group = None 74 if 'group' in self.cleaned_data: 75 gid = self.cleaned_data['group'] 76 if gid != 0: 77 group = self.profile.groups.filter(id=gid, course_instance=self.instance).first() 78 if group: 79 self.selected_group = group 80 else: 81 self.add_error('group', 'Invalid group id') 82 return self.cleaned_data 83 84 def save(self) -> Enrollment: 85 enrollment = self.instance.get_enrollment_for(self.profile.user) 86 enrollment.selected_group = self.selected_group 87 enrollment.save() 88 # Deactivate all drafts when changing groups. 89 SubmissionDraft.objects.filter( 90 exercise__course_module__course_instance=self.instance, 91 submitter=self.profile, 92 active=True, 93 ).update(active=False) 94 return enrollment 95 96 97 class GroupEditForm(forms.ModelForm): 98 99 members = UsersSearchSelectField(queryset=UserProfile.objects.none(), 100 initial_queryset=UserProfile.objects.none(), 101 label=_('LABEL_MEMBERS'), 102 ) 103 104 def __init__(self, *args: Any, **kwargs: Any) -> None: 105 course_instance = kwargs.get('instance').course_instance 106 super().__init__(*args, **kwargs) 107 self.fields['members'].widget.search_api_url = api_reverse( 108 "course-students-list", 109 kwargs={'course_id': course_instance.id}, 110 ) 111 self.fields["members"].queryset = course_instance.get_student_profiles() 112 # Course staff may use this form for modifying and creating student groups. 113 # If an existing group is being modified, its current members must be 114 # set to the initial queryset. 115 if self.instance.id: 116 self.fields["members"].initial_queryset = self.instance.members.all() 117 118 class Meta: 119 model = StudentGroup 120 fields = ['members'] 121 122 123 class EnrollStudentsForm(forms.Form): 124 125 user_profiles = UsersSearchSelectField(queryset=UserProfile.objects.all(), 126 initial_queryset=UserProfile.objects.none(), 127 label=_('LABEL_USERS'), 128 required=False, 129 ) 130 131 def __init__(self, *args: Any, **kwargs: Any) -> None: 132 self.instance = kwargs.pop('instance') 133 super().__init__(*args, **kwargs) 134 self.fields['user_profiles'].widget.search_api_url = api_reverse("user-list") 135 if self.instance.sis_id: 136 self.fields['sis'] = forms.BooleanField( 137 required=False, 138 label=_('LABEL_ENROLL_FROM_SIS'), 139 ) 140 [end of course/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/course/forms.py b/course/forms.py --- a/course/forms.py +++ b/course/forms.py @@ -5,6 +5,7 @@ from django.utils.safestring import mark_safe from django.utils.text import format_lazy from django.utils.translation import gettext_lazy as _ +from django.db.models import Count from aplus.api import api_reverse from exercise.models import SubmissionDraft @@ -115,6 +116,30 @@ if self.instance.id: self.fields["members"].initial_queryset = self.instance.members.all() + def clean(self): + super().clean() + members = self.cleaned_data.get('members') + if members: + if len(members) == 1: + self.add_error('members', _('MUST_HAVE_TWO_MEMBERS')) + course_instance = self.instance.course_instance + # Filter all groups with course instance and that have one or more similar members as in the members list + filtered_groups = StudentGroup.objects.filter(course_instance=course_instance, members__in=members) + # Count number of members in each group + groups_with_member_count = filtered_groups.annotate(member_count=Count('members')) + # Filter only those groups that have same number of members + groups_with_exact_member_count = groups_with_member_count.filter(member_count=len(members)) + # Loop through the returned groups and check if any group with exact same members exist + group_exists = False + for group in groups_with_exact_member_count: + group_members = group.members.all() + if list(group_members) == list(members): + group_exists = True + if group_exists: + self.add_error('members', _('ERROR_GROUP_ALREADY_EXISTS')) + return self.cleaned_data + + class Meta: model = StudentGroup fields = ['members']
{"golden_diff": "diff --git a/course/forms.py b/course/forms.py\n--- a/course/forms.py\n+++ b/course/forms.py\n@@ -5,6 +5,7 @@\n from django.utils.safestring import mark_safe\n from django.utils.text import format_lazy\n from django.utils.translation import gettext_lazy as _\n+from django.db.models import Count\n \n from aplus.api import api_reverse\n from exercise.models import SubmissionDraft\n@@ -115,6 +116,30 @@\n if self.instance.id:\n self.fields[\"members\"].initial_queryset = self.instance.members.all()\n \n+ def clean(self):\n+ super().clean()\n+ members = self.cleaned_data.get('members')\n+ if members:\n+ if len(members) == 1:\n+ self.add_error('members', _('MUST_HAVE_TWO_MEMBERS'))\n+ course_instance = self.instance.course_instance\n+ # Filter all groups with course instance and that have one or more similar members as in the members list\n+ filtered_groups = StudentGroup.objects.filter(course_instance=course_instance, members__in=members)\n+ # Count number of members in each group\n+ groups_with_member_count = filtered_groups.annotate(member_count=Count('members'))\n+ # Filter only those groups that have same number of members\n+ groups_with_exact_member_count = groups_with_member_count.filter(member_count=len(members))\n+ # Loop through the returned groups and check if any group with exact same members exist\n+ group_exists = False\n+ for group in groups_with_exact_member_count:\n+ group_members = group.members.all()\n+ if list(group_members) == list(members):\n+ group_exists = True\n+ if group_exists:\n+ self.add_error('members', _('ERROR_GROUP_ALREADY_EXISTS'))\n+ return self.cleaned_data\n+\n+\n class Meta:\n model = StudentGroup\n fields = ['members']\n", "issue": "Course staff may create duplicate student groups\nCourse staff may create student groups (course/models.py class StudentGroup) that contain exactly the same group members as an existing group. Duplicate groups should not be allowed. The course staff UI for editing groups is in the URL http://localhost:8000/def/current/teachers/groups/ (in the left navigation menu, it is the \"Groups\" link under the heading Course staff).\r\n\r\nCourse staff may also create new groups (or edit existing groups) that are empty (no members) or only have one member. Groups should always have at least two members.\r\n\r\nWhen students create groups in the \"form a group\" page (with user personal codes), A+ already prevents empty and duplicate groups.\n", "before_files": [{"content": "from typing import Any\n\nfrom django import forms\nfrom django.contrib.humanize.templatetags.humanize import ordinal\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom aplus.api import api_reverse\nfrom exercise.models import SubmissionDraft\nfrom lib.fields import UsersSearchSelectField\nfrom .models import Enrollment, StudentGroup\nfrom userprofile.models import UserProfile\n\n\nclass GroupsForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n self.profile = kwargs.pop('profile')\n self.instance = kwargs.pop('instance')\n self.content = kwargs.pop('content')\n super().__init__(*args, **kwargs)\n total = self.content.total()\n min_size = max(total.min_group_size, 2)\n max_size = total.max_group_size\n\n for n in range(2, max_size + 1):\n widget = forms.TextInput(attrs={'class':'form-control'})\n field = forms.CharField(widget=widget, required=(n <= min_size))\n field.label = mark_safe(format_lazy(_('GROUP_MEMBER_LABEL -- {num}'), num=ordinal(n)))\n self.fields['member{:d}'.format(n)] = field\n\n def clean(self):\n super().clean()\n\n self.member_profiles = [self.profile]\n for key in self.fields.keys():\n if key in self.cleaned_data and self.cleaned_data[key]:\n enrollment = Enrollment.objects.filter(\n course_instance=self.instance,\n personal_code=self.cleaned_data[key].upper()\n ).first()\n if not enrollment:\n self.add_error(key, _('ERROR_CODE_NOT_RECOGNIZED'))\n elif enrollment.user_profile in self.member_profiles:\n self.add_error(key, _('ERROR_USER_ALREADY_IN_GROUP'))\n else:\n self.member_profiles.append(enrollment.user_profile)\n\n if not self.errors and len(self.member_profiles) > 1:\n if StudentGroup.get_exact(self.instance, self.member_profiles):\n self.add_error(None, _('ERROR_GROUP_ALREADY_EXISTS'))\n\n return self.cleaned_data\n\n def save(self):\n group = StudentGroup(course_instance=self.instance)\n group.save()\n group.members.add(*self.member_profiles)\n return group\n\n\nclass GroupSelectForm(forms.Form):\n group = forms.IntegerField(required=True)\n\n def __init__(self, *args, **kwargs):\n self.profile = kwargs.pop('profile')\n self.instance = kwargs.pop('instance')\n super().__init__(*args, **kwargs)\n\n def clean(self):\n super().clean()\n self.selected_group = None\n if 'group' in self.cleaned_data:\n gid = self.cleaned_data['group']\n if gid != 0:\n group = self.profile.groups.filter(id=gid, course_instance=self.instance).first()\n if group:\n self.selected_group = group\n else:\n self.add_error('group', 'Invalid group id')\n return self.cleaned_data\n\n def save(self) -> Enrollment:\n enrollment = self.instance.get_enrollment_for(self.profile.user)\n enrollment.selected_group = self.selected_group\n enrollment.save()\n # Deactivate all drafts when changing groups.\n SubmissionDraft.objects.filter(\n exercise__course_module__course_instance=self.instance,\n submitter=self.profile,\n active=True,\n ).update(active=False)\n return enrollment\n\n\nclass GroupEditForm(forms.ModelForm):\n\n members = UsersSearchSelectField(queryset=UserProfile.objects.none(),\n initial_queryset=UserProfile.objects.none(),\n label=_('LABEL_MEMBERS'),\n )\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n course_instance = kwargs.get('instance').course_instance\n super().__init__(*args, **kwargs)\n self.fields['members'].widget.search_api_url = api_reverse(\n \"course-students-list\",\n kwargs={'course_id': course_instance.id},\n )\n self.fields[\"members\"].queryset = course_instance.get_student_profiles()\n # Course staff may use this form for modifying and creating student groups.\n # If an existing group is being modified, its current members must be\n # set to the initial queryset.\n if self.instance.id:\n self.fields[\"members\"].initial_queryset = self.instance.members.all()\n\n class Meta:\n model = StudentGroup\n fields = ['members']\n\n\nclass EnrollStudentsForm(forms.Form):\n\n user_profiles = UsersSearchSelectField(queryset=UserProfile.objects.all(),\n initial_queryset=UserProfile.objects.none(),\n label=_('LABEL_USERS'),\n required=False,\n )\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n self.instance = kwargs.pop('instance')\n super().__init__(*args, **kwargs)\n self.fields['user_profiles'].widget.search_api_url = api_reverse(\"user-list\")\n if self.instance.sis_id:\n self.fields['sis'] = forms.BooleanField(\n required=False,\n label=_('LABEL_ENROLL_FROM_SIS'),\n )\n", "path": "course/forms.py"}]}
2,043
400
gh_patches_debug_24936
rasdani/github-patches
git_diff
DDMAL__CantusDB-733
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Links to unpublished sources should not appear on Provenance detail pages Example: visit http://206.12.93.196/provenance/3665 (while logged out), click on first link. We get a 403 Forbidden error, since the source is unpublished. Unpublished sources should not be listed on the Provenance Detail page. Credit to @zhannaklimanova and her link checker script for catching this bug! </issue> <code> [start of django/cantusdb_project/main_app/views/provenance.py] 1 from django.views.generic import DetailView 2 from main_app.models import Provenance 3 4 5 class ProvenanceDetailView(DetailView): 6 model = Provenance 7 context_object_name = "provenance" 8 template_name = "provenance_detail.html" 9 [end of django/cantusdb_project/main_app/views/provenance.py] [start of django/cantusdb_project/main_app/views/century.py] 1 from django.views.generic import DetailView 2 from main_app.models import Century, Source 3 from typing import Any 4 5 6 class CenturyDetailView(DetailView): 7 model = Century 8 context_object_name = "century" 9 template_name = "century_detail.html" 10 11 def get_context_data(self, **kwargs: Any) -> dict[str, Any]: 12 context = super().get_context_data(**kwargs) 13 century = self.get_object() 14 user = self.request.user 15 display_unpublished = user.is_authenticated 16 sources = Source.objects.filter(century=century) 17 if not display_unpublished: 18 sources = sources.filter(published=True) 19 sources = sources.only("title", "id") 20 context["sources"] = sources 21 return context 22 [end of django/cantusdb_project/main_app/views/century.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py --- a/django/cantusdb_project/main_app/views/century.py +++ b/django/cantusdb_project/main_app/views/century.py @@ -16,6 +16,6 @@ sources = Source.objects.filter(century=century) if not display_unpublished: sources = sources.filter(published=True) - sources = sources.only("title", "id") + sources = sources.only("title", "id", "siglum") context["sources"] = sources return context diff --git a/django/cantusdb_project/main_app/views/provenance.py b/django/cantusdb_project/main_app/views/provenance.py --- a/django/cantusdb_project/main_app/views/provenance.py +++ b/django/cantusdb_project/main_app/views/provenance.py @@ -1,8 +1,21 @@ from django.views.generic import DetailView -from main_app.models import Provenance +from main_app.models import Provenance, Source +from typing import Any class ProvenanceDetailView(DetailView): model = Provenance context_object_name = "provenance" template_name = "provenance_detail.html" + + def get_context_data(self, **kwargs: Any) -> dict[str, Any]: + context = super().get_context_data(**kwargs) + provenance = self.get_object() + user = self.request.user + display_unpublished = user.is_authenticated + sources = Source.objects.filter(provenance=provenance) + if not display_unpublished: + sources = sources.filter(published=True) + sources = sources.only("title", "id", "siglum") + context["sources"] = sources + return context
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py\n--- a/django/cantusdb_project/main_app/views/century.py\n+++ b/django/cantusdb_project/main_app/views/century.py\n@@ -16,6 +16,6 @@\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n- sources = sources.only(\"title\", \"id\")\n+ sources = sources.only(\"title\", \"id\", \"siglum\")\n context[\"sources\"] = sources\n return context\ndiff --git a/django/cantusdb_project/main_app/views/provenance.py b/django/cantusdb_project/main_app/views/provenance.py\n--- a/django/cantusdb_project/main_app/views/provenance.py\n+++ b/django/cantusdb_project/main_app/views/provenance.py\n@@ -1,8 +1,21 @@\n from django.views.generic import DetailView\n-from main_app.models import Provenance\n+from main_app.models import Provenance, Source\n+from typing import Any\n \n \n class ProvenanceDetailView(DetailView):\n model = Provenance\n context_object_name = \"provenance\"\n template_name = \"provenance_detail.html\"\n+\n+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n+ context = super().get_context_data(**kwargs)\n+ provenance = self.get_object()\n+ user = self.request.user\n+ display_unpublished = user.is_authenticated\n+ sources = Source.objects.filter(provenance=provenance)\n+ if not display_unpublished:\n+ sources = sources.filter(published=True)\n+ sources = sources.only(\"title\", \"id\", \"siglum\")\n+ context[\"sources\"] = sources\n+ return context\n", "issue": "Links to unpublished sources should not appear on Provenance detail pages\nExample: visit http://206.12.93.196/provenance/3665 (while logged out), click on first link. We get a 403 Forbidden error, since the source is unpublished.\r\n\r\nUnpublished sources should not be listed on the Provenance Detail page.\r\n\r\nCredit to @zhannaklimanova and her link checker script for catching this bug!\n", "before_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Provenance\n\n\nclass ProvenanceDetailView(DetailView):\n model = Provenance\n context_object_name = \"provenance\"\n template_name = \"provenance_detail.html\"\n", "path": "django/cantusdb_project/main_app/views/provenance.py"}, {"content": "from django.views.generic import DetailView\nfrom main_app.models import Century, Source\nfrom typing import Any\n\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n century = self.get_object()\n user = self.request.user\n display_unpublished = user.is_authenticated\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n sources = sources.only(\"title\", \"id\")\n context[\"sources\"] = sources\n return context\n", "path": "django/cantusdb_project/main_app/views/century.py"}]}
944
426
gh_patches_debug_39481
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3317
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider lowes is broken During the global build at 2021-06-02-14-42-40, spider **lowes** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/lowes.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson)) </issue> <code> [start of locations/spiders/lowes.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import re 4 import json 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 9 day_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa', 10 'Sunday': 'Su'} 11 12 13 class LowesSpider(scrapy.Spider): 14 """"This spider scrapes Lowes retail store locations""" 15 name = "lowes" 16 item_attributes = { 'brand': "Lowe's", 'brand_wikidata': "Q1373493" } 17 allowed_domains = ["lowes.com"] 18 start_urls = ('https://www.lowes.com/Lowes-Stores',) 19 download_delay = 0.5 20 21 custom_settings = { 22 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36', 23 } 24 25 def parse_hours(self, store_hours): 26 opening_hours = OpeningHours() 27 28 for weekday in store_hours: 29 day = weekday.get('day').get('day') 30 open_time = weekday.get('day').get('open') 31 hour, minute, sec = open_time.split('.') 32 open_time_formatted = hour + ':' + minute 33 34 close = weekday.get('day').get('close') 35 hour, minute, sec = close.split('.') 36 close_time_formatted = hour + ':' + minute 37 38 if close_time_formatted in {'00:00', '24:00'}: 39 close_time_formatted = "23:59" 40 41 opening_hours.add_range(day=day_mapping[day], 42 open_time=open_time_formatted, 43 close_time=close_time_formatted) 44 45 return opening_hours.as_opening_hours() 46 47 def parse_store(self, response): 48 ref = re.search(r'.+/(.+)', response.url).group(1) 49 50 script_content = response.xpath('//script[contains(text(),"storeHours")]/text()').extract_first() 51 if not script_content: 52 return 53 54 # effectively strip off leading "window.__PRELOADED_STATE__ = " where 55 # the rest is a json blob 56 script_data = script_content.split(" = ", 1)[-1] 57 json_data = json.loads(script_data) 58 store_hours = json_data.get('storeHours') 59 60 state_texts = response.xpath('//span[@itemprop="addressRegion"]/text()').extract() 61 properties = { 62 'lat': float(json_data['storeDetails']['lat']), 63 'lon': float(json_data['storeDetails']['long']), 64 'ref': ref, 65 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first(), 66 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(), 67 'state': " ".join(text.strip() for text in state_texts if text.strip()), 68 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(), 69 'phone': response.xpath('normalize-space(//meta[@itemprop="telephone"]/@content)').extract_first(), 70 'website': response.request.url, 71 'opening_hours': self.parse_hours(store_hours), 72 'extras': { 73 'amenity:toilets': True, 74 }, 75 } 76 77 yield GeojsonPointItem(**properties) 78 79 def parse_state(self, response): 80 city_urls = response.xpath('//div[@class="v-spacing-small"]/a/@href').extract() 81 for path in city_urls: 82 yield scrapy.Request(response.urljoin(path), callback=self.parse_store) 83 84 def parse(self, response): 85 urls = response.xpath('//div[@id="mainContent"]//li[@role="listitem"]/a/@href').extract() 86 for path in urls: 87 yield scrapy.Request(response.urljoin(path), callback=self.parse_state) 88 [end of locations/spiders/lowes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/lowes.py b/locations/spiders/lowes.py --- a/locations/spiders/lowes.py +++ b/locations/spiders/lowes.py @@ -6,16 +6,23 @@ from locations.hours import OpeningHours -day_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa', - 'Sunday': 'Su'} +day_mapping = { + 'Monday': 'Mo', + 'Tuesday': 'Tu', + 'Wednesday': 'We', + 'Thursday': 'Th', + 'Friday': 'Fr', + 'Saturday': 'Sa', + 'Sunday': 'Su', +} class LowesSpider(scrapy.Spider): """"This spider scrapes Lowes retail store locations""" name = "lowes" - item_attributes = { 'brand': "Lowe's", 'brand_wikidata': "Q1373493" } + item_attributes = {'brand': "Lowe's", 'brand_wikidata': "Q1373493"} allowed_domains = ["lowes.com"] - start_urls = ('https://www.lowes.com/Lowes-Stores',) + start_urls = ('https://www.lowes.com/sitemap/store0.xml',) download_delay = 0.5 custom_settings = { @@ -59,14 +66,14 @@ state_texts = response.xpath('//span[@itemprop="addressRegion"]/text()').extract() properties = { - 'lat': float(json_data['storeDetails']['lat']), - 'lon': float(json_data['storeDetails']['long']), - 'ref': ref, - 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first(), - 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(), - 'state': " ".join(text.strip() for text in state_texts if text.strip()), - 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(), - 'phone': response.xpath('normalize-space(//meta[@itemprop="telephone"]/@content)').extract_first(), + 'lat': json_data['storeDetails']['lat'], + 'lon': json_data['storeDetails']['long'], + 'ref': json_data['storeDetails']['id'], + 'addr_full': json_data['storeDetails']['address'], + 'city': json_data['storeDetails']['city'], + 'state': json_data['storeDetails']['state'], + 'postcode': json_data['storeDetails']['zip'], + 'phone': json_data['storeDetails']['phone'], 'website': response.request.url, 'opening_hours': self.parse_hours(store_hours), 'extras': { @@ -76,12 +83,9 @@ yield GeojsonPointItem(**properties) - def parse_state(self, response): - city_urls = response.xpath('//div[@class="v-spacing-small"]/a/@href').extract() - for path in city_urls: - yield scrapy.Request(response.urljoin(path), callback=self.parse_store) - def parse(self, response): - urls = response.xpath('//div[@id="mainContent"]//li[@role="listitem"]/a/@href').extract() - for path in urls: - yield scrapy.Request(response.urljoin(path), callback=self.parse_state) + response.selector.remove_namespaces() + urls = response.xpath('//url/loc/text()').extract() + + for url in urls: + yield scrapy.Request(url, callback=self.parse_store)
{"golden_diff": "diff --git a/locations/spiders/lowes.py b/locations/spiders/lowes.py\n--- a/locations/spiders/lowes.py\n+++ b/locations/spiders/lowes.py\n@@ -6,16 +6,23 @@\n from locations.hours import OpeningHours\n \n \n-day_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa',\n- 'Sunday': 'Su'}\n+day_mapping = {\n+ 'Monday': 'Mo',\n+ 'Tuesday': 'Tu',\n+ 'Wednesday': 'We',\n+ 'Thursday': 'Th',\n+ 'Friday': 'Fr',\n+ 'Saturday': 'Sa',\n+ 'Sunday': 'Su',\n+}\n \n \n class LowesSpider(scrapy.Spider):\n \"\"\"\"This spider scrapes Lowes retail store locations\"\"\"\n name = \"lowes\"\n- item_attributes = { 'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\" }\n+ item_attributes = {'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\"}\n allowed_domains = [\"lowes.com\"]\n- start_urls = ('https://www.lowes.com/Lowes-Stores',)\n+ start_urls = ('https://www.lowes.com/sitemap/store0.xml',)\n download_delay = 0.5\n \n custom_settings = {\n@@ -59,14 +66,14 @@\n \n state_texts = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract()\n properties = {\n- 'lat': float(json_data['storeDetails']['lat']),\n- 'lon': float(json_data['storeDetails']['long']),\n- 'ref': ref,\n- 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first(),\n- 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n- 'state': \" \".join(text.strip() for text in state_texts if text.strip()),\n- 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n- 'phone': response.xpath('normalize-space(//meta[@itemprop=\"telephone\"]/@content)').extract_first(),\n+ 'lat': json_data['storeDetails']['lat'],\n+ 'lon': json_data['storeDetails']['long'],\n+ 'ref': json_data['storeDetails']['id'],\n+ 'addr_full': json_data['storeDetails']['address'],\n+ 'city': json_data['storeDetails']['city'],\n+ 'state': json_data['storeDetails']['state'],\n+ 'postcode': json_data['storeDetails']['zip'],\n+ 'phone': json_data['storeDetails']['phone'],\n 'website': response.request.url,\n 'opening_hours': self.parse_hours(store_hours),\n 'extras': {\n@@ -76,12 +83,9 @@\n \n yield GeojsonPointItem(**properties)\n \n- def parse_state(self, response):\n- city_urls = response.xpath('//div[@class=\"v-spacing-small\"]/a/@href').extract()\n- for path in city_urls:\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n-\n def parse(self, response):\n- urls = response.xpath('//div[@id=\"mainContent\"]//li[@role=\"listitem\"]/a/@href').extract()\n- for path in urls:\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n+ response.selector.remove_namespaces()\n+ urls = response.xpath('//url/loc/text()').extract()\n+\n+ for url in urls:\n+ yield scrapy.Request(url, callback=self.parse_store)\n", "issue": "Spider lowes is broken\nDuring the global build at 2021-06-02-14-42-40, spider **lowes** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/lowes.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nday_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\n\nclass LowesSpider(scrapy.Spider):\n \"\"\"\"This spider scrapes Lowes retail store locations\"\"\"\n name = \"lowes\"\n item_attributes = { 'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\" }\n allowed_domains = [\"lowes.com\"]\n start_urls = ('https://www.lowes.com/Lowes-Stores',)\n download_delay = 0.5\n\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n day = weekday.get('day').get('day')\n open_time = weekday.get('day').get('open')\n hour, minute, sec = open_time.split('.')\n open_time_formatted = hour + ':' + minute\n\n close = weekday.get('day').get('close')\n hour, minute, sec = close.split('.')\n close_time_formatted = hour + ':' + minute\n\n if close_time_formatted in {'00:00', '24:00'}:\n close_time_formatted = \"23:59\"\n\n opening_hours.add_range(day=day_mapping[day],\n open_time=open_time_formatted,\n close_time=close_time_formatted)\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+)', response.url).group(1)\n\n script_content = response.xpath('//script[contains(text(),\"storeHours\")]/text()').extract_first()\n if not script_content:\n return\n\n # effectively strip off leading \"window.__PRELOADED_STATE__ = \" where\n # the rest is a json blob\n script_data = script_content.split(\" = \", 1)[-1]\n json_data = json.loads(script_data)\n store_hours = json_data.get('storeHours')\n\n state_texts = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract()\n properties = {\n 'lat': float(json_data['storeDetails']['lat']),\n 'lon': float(json_data['storeDetails']['long']),\n 'ref': ref,\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n 'state': \" \".join(text.strip() for text in state_texts if text.strip()),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//meta[@itemprop=\"telephone\"]/@content)').extract_first(),\n 'website': response.request.url,\n 'opening_hours': self.parse_hours(store_hours),\n 'extras': {\n 'amenity:toilets': True,\n },\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_state(self, response):\n city_urls = response.xpath('//div[@class=\"v-spacing-small\"]/a/@href').extract()\n for path in city_urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n\n def parse(self, response):\n urls = response.xpath('//div[@id=\"mainContent\"]//li[@role=\"listitem\"]/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n", "path": "locations/spiders/lowes.py"}]}
1,766
831
gh_patches_debug_28882
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1391
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WYSIWYG for static pages Client-side WYSIWYG : - http://sofish.github.io/pen/ - https://github.com/mduvall/grande.js - http://imperavi.com/redactor/ - https://github.com/tholman/zenpen </issue> <code> [start of geotrek/flatpages/views.py] 1 from rest_framework import viewsets 2 3 from geotrek.flatpages.serializers import FlatPageSerializer 4 from geotrek.flatpages import models as flatpages_models 5 6 7 class FlatPageViewSet(viewsets.ModelViewSet): 8 """ 9 A viewset for viewing and editing flat pages instances. 10 """ 11 serializer_class = FlatPageSerializer 12 queryset = flatpages_models.FlatPage.objects.all() 13 [end of geotrek/flatpages/views.py] [start of geotrek/flatpages/admin.py] 1 from django.contrib import admin 2 from django.conf import settings 3 4 from modeltranslation.admin import TranslationAdmin 5 6 from geotrek.flatpages import models as flatpages_models 7 8 9 class FlatPagesAdmin(TranslationAdmin): 10 list_display = ('title', 'published', 'publication_date', 'target') 11 search_fields = ('title', 'content') 12 13 14 if settings.FLATPAGES_ENABLED: 15 admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin) 16 [end of geotrek/flatpages/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geotrek/flatpages/admin.py b/geotrek/flatpages/admin.py --- a/geotrek/flatpages/admin.py +++ b/geotrek/flatpages/admin.py @@ -2,6 +2,7 @@ from django.conf import settings from modeltranslation.admin import TranslationAdmin +from tinymce.widgets import TinyMCE from geotrek.flatpages import models as flatpages_models @@ -10,6 +11,11 @@ list_display = ('title', 'published', 'publication_date', 'target') search_fields = ('title', 'content') + def formfield_for_dbfield(self, db_field, **kwargs): + if db_field.name[:7] == 'content': + return db_field.formfield(widget=TinyMCE) + return super(FlatPagesAdmin, self).formfield_for_dbfield(db_field, **kwargs) + if settings.FLATPAGES_ENABLED: admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin) diff --git a/geotrek/flatpages/views.py b/geotrek/flatpages/views.py --- a/geotrek/flatpages/views.py +++ b/geotrek/flatpages/views.py @@ -1,3 +1,4 @@ +from rest_framework import permissions as rest_permissions from rest_framework import viewsets from geotrek.flatpages.serializers import FlatPageSerializer @@ -8,5 +9,9 @@ """ A viewset for viewing and editing flat pages instances. """ + model = flatpages_models.FlatPage serializer_class = FlatPageSerializer - queryset = flatpages_models.FlatPage.objects.all() + permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly] + + def get_queryset(self): + return flatpages_models.FlatPage.objects.filter(published=True)
{"golden_diff": "diff --git a/geotrek/flatpages/admin.py b/geotrek/flatpages/admin.py\n--- a/geotrek/flatpages/admin.py\n+++ b/geotrek/flatpages/admin.py\n@@ -2,6 +2,7 @@\n from django.conf import settings\n \n from modeltranslation.admin import TranslationAdmin\n+from tinymce.widgets import TinyMCE\n \n from geotrek.flatpages import models as flatpages_models\n \n@@ -10,6 +11,11 @@\n list_display = ('title', 'published', 'publication_date', 'target')\n search_fields = ('title', 'content')\n \n+ def formfield_for_dbfield(self, db_field, **kwargs):\n+ if db_field.name[:7] == 'content':\n+ return db_field.formfield(widget=TinyMCE)\n+ return super(FlatPagesAdmin, self).formfield_for_dbfield(db_field, **kwargs)\n+\n \n if settings.FLATPAGES_ENABLED:\n admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin)\ndiff --git a/geotrek/flatpages/views.py b/geotrek/flatpages/views.py\n--- a/geotrek/flatpages/views.py\n+++ b/geotrek/flatpages/views.py\n@@ -1,3 +1,4 @@\n+from rest_framework import permissions as rest_permissions\n from rest_framework import viewsets\n \n from geotrek.flatpages.serializers import FlatPageSerializer\n@@ -8,5 +9,9 @@\n \"\"\"\n A viewset for viewing and editing flat pages instances.\n \"\"\"\n+ model = flatpages_models.FlatPage\n serializer_class = FlatPageSerializer\n- queryset = flatpages_models.FlatPage.objects.all()\n+ permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly]\n+\n+ def get_queryset(self):\n+ return flatpages_models.FlatPage.objects.filter(published=True)\n", "issue": "WYSIWYG for static pages\nClient-side WYSIWYG : \n- http://sofish.github.io/pen/\n- https://github.com/mduvall/grande.js\n- http://imperavi.com/redactor/\n- https://github.com/tholman/zenpen\n\n", "before_files": [{"content": "from rest_framework import viewsets\n\nfrom geotrek.flatpages.serializers import FlatPageSerializer\nfrom geotrek.flatpages import models as flatpages_models\n\n\nclass FlatPageViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset for viewing and editing flat pages instances.\n \"\"\"\n serializer_class = FlatPageSerializer\n queryset = flatpages_models.FlatPage.objects.all()\n", "path": "geotrek/flatpages/views.py"}, {"content": "from django.contrib import admin\nfrom django.conf import settings\n\nfrom modeltranslation.admin import TranslationAdmin\n\nfrom geotrek.flatpages import models as flatpages_models\n\n\nclass FlatPagesAdmin(TranslationAdmin):\n list_display = ('title', 'published', 'publication_date', 'target')\n search_fields = ('title', 'content')\n\n\nif settings.FLATPAGES_ENABLED:\n admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin)\n", "path": "geotrek/flatpages/admin.py"}]}
843
401
gh_patches_debug_7795
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-4076
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> azure - unpinn EventGrid SDK version We need AdvancedFilters to be added to the stable version. https://pypi.org/project/azure-mgmt-eventgrid/ </issue> <code> [start of tools/c7n_azure/setup.py] 1 # Copyright 2018 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from io import open 16 from os import path 17 from setuptools import setup, find_packages 18 19 # read the contents of your README file 20 this_directory = path.abspath(path.dirname(__file__)) 21 readme = path.join(this_directory, 'readme.md') 22 long_description = '' 23 if path.exists(readme): 24 with open(readme, encoding='utf-8') as f: 25 long_description = f.read() 26 27 setup( 28 name="c7n_azure", 29 version='0.5.3', 30 description="Cloud Custodian - Azure Support", 31 long_description=long_description, 32 long_description_content_type='text/markdown', 33 classifiers=[ 34 "Topic :: System :: Systems Administration", 35 "Topic :: System :: Distributed Computing" 36 ], 37 url="https://github.com/cloud-custodian/cloud-custodian", 38 license="Apache-2.0", 39 packages=find_packages(), 40 entry_points={ 41 "custodian.resources": [ 42 'azure = c7n_azure.entry:initialize_azure'] 43 }, 44 install_requires=["azure-mgmt-authorization", 45 "azure-mgmt-applicationinsights==0.1.1", 46 "azure-mgmt-batch", 47 "azure-mgmt-cognitiveservices", 48 "azure-mgmt-cosmosdb", 49 "azure-mgmt-compute", 50 "azure-mgmt-cdn", 51 "azure-mgmt-containerregistry", 52 "azure-mgmt-containerservice", 53 "azure-mgmt-datalake-store", 54 "azure-mgmt-datafactory", 55 "azure-mgmt-iothub", 56 "azure-mgmt-keyvault", 57 "azure-mgmt-managementgroups", 58 "azure-mgmt-network", 59 "azure-mgmt-redis", 60 "azure-mgmt-resource==2.1.0", 61 "azure-mgmt-sql", 62 "azure-mgmt-storage", 63 "azure-mgmt-web", 64 "azure-mgmt-monitor", 65 "azure-mgmt-policyinsights", 66 "azure-mgmt-eventgrid==2.0.0rc2", # RC2 supports AdvancedFilters 67 "azure-graphrbac", 68 "azure-keyvault", 69 "azure-storage-blob", 70 "azure-storage-queue", 71 "distlib", 72 "requests", 73 "PyJWT", 74 "c7n", 75 "requests", 76 "azure-cli-core", 77 "adal", 78 "backports.functools_lru_cache", 79 "futures>=3.1.1", 80 "netaddr"], 81 package_data={str(''): [str('function_binding_resources/bin/*.dll'), 82 str('function_binding_resources/*.csproj'), 83 str('function_binding_resources/bin/*.json')]} 84 ) 85 [end of tools/c7n_azure/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/c7n_azure/setup.py b/tools/c7n_azure/setup.py --- a/tools/c7n_azure/setup.py +++ b/tools/c7n_azure/setup.py @@ -63,7 +63,7 @@ "azure-mgmt-web", "azure-mgmt-monitor", "azure-mgmt-policyinsights", - "azure-mgmt-eventgrid==2.0.0rc2", # RC2 supports AdvancedFilters + "azure-mgmt-eventgrid", "azure-graphrbac", "azure-keyvault", "azure-storage-blob",
{"golden_diff": "diff --git a/tools/c7n_azure/setup.py b/tools/c7n_azure/setup.py\n--- a/tools/c7n_azure/setup.py\n+++ b/tools/c7n_azure/setup.py\n@@ -63,7 +63,7 @@\n \"azure-mgmt-web\",\n \"azure-mgmt-monitor\",\n \"azure-mgmt-policyinsights\",\n- \"azure-mgmt-eventgrid==2.0.0rc2\", # RC2 supports AdvancedFilters\n+ \"azure-mgmt-eventgrid\",\n \"azure-graphrbac\",\n \"azure-keyvault\",\n \"azure-storage-blob\",\n", "issue": "azure - unpinn EventGrid SDK version\nWe need AdvancedFilters to be added to the stable version.\r\n\r\nhttps://pypi.org/project/azure-mgmt-eventgrid/\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom io import open\nfrom os import path\nfrom setuptools import setup, find_packages\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nreadme = path.join(this_directory, 'readme.md')\nlong_description = ''\nif path.exists(readme):\n with open(readme, encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"c7n_azure\",\n version='0.5.3',\n description=\"Cloud Custodian - Azure Support\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/cloud-custodian/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n \"custodian.resources\": [\n 'azure = c7n_azure.entry:initialize_azure']\n },\n install_requires=[\"azure-mgmt-authorization\",\n \"azure-mgmt-applicationinsights==0.1.1\",\n \"azure-mgmt-batch\",\n \"azure-mgmt-cognitiveservices\",\n \"azure-mgmt-cosmosdb\",\n \"azure-mgmt-compute\",\n \"azure-mgmt-cdn\",\n \"azure-mgmt-containerregistry\",\n \"azure-mgmt-containerservice\",\n \"azure-mgmt-datalake-store\",\n \"azure-mgmt-datafactory\",\n \"azure-mgmt-iothub\",\n \"azure-mgmt-keyvault\",\n \"azure-mgmt-managementgroups\",\n \"azure-mgmt-network\",\n \"azure-mgmt-redis\",\n \"azure-mgmt-resource==2.1.0\",\n \"azure-mgmt-sql\",\n \"azure-mgmt-storage\",\n \"azure-mgmt-web\",\n \"azure-mgmt-monitor\",\n \"azure-mgmt-policyinsights\",\n \"azure-mgmt-eventgrid==2.0.0rc2\", # RC2 supports AdvancedFilters\n \"azure-graphrbac\",\n \"azure-keyvault\",\n \"azure-storage-blob\",\n \"azure-storage-queue\",\n \"distlib\",\n \"requests\",\n \"PyJWT\",\n \"c7n\",\n \"requests\",\n \"azure-cli-core\",\n \"adal\",\n \"backports.functools_lru_cache\",\n \"futures>=3.1.1\",\n \"netaddr\"],\n package_data={str(''): [str('function_binding_resources/bin/*.dll'),\n str('function_binding_resources/*.csproj'),\n str('function_binding_resources/bin/*.json')]}\n)\n", "path": "tools/c7n_azure/setup.py"}]}
1,427
134
gh_patches_debug_41534
rasdani/github-patches
git_diff
dbt-labs__dbt-core-8855
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CT-3107] [Bug] nested dependencies not installed when package is a tarball ### Is this a new bug in dbt-core? - [X] I believe this is a new bug in dbt-core - [X] I have searched the existing issues, and I could not find an existing issue for this bug ### Current Behavior when running `dbt deps` to install a package specified as a tarball, dbt doesn't install nested dependencies (i.e. packages specified in the imported package's `packages.yml` file) as it does when installing a package from local, git or the dbt hub. ### Expected Behavior consistent behaviour across import methods regarding nested dependencies. dbt should install any dependencies specified in the tarball project's packages.yml file. ### Steps To Reproduce this can be reproduced by importing the tarball of a package with nested dependencies. In this case, importing dbt_expectations should cause dbt_date to be installed, as its included in the package's dependencies here: https://github.com/calogica/dbt-expectations/blob/0.9.0/packages.yml Steps: 1. create a `packages.yml` file in a project with the following structure: ``` yaml packages: - tarball: "https://github.com/calogica/dbt-expectations/archive/refs/tags/0.9.0.tar.gz" name: "dbt_expectations" ``` 2. run `dbt deps` running dbt deps will only install dbt_expectations: ``` 20:08:55 Running with dbt=1.5.6 20:08:55 Installing dbt_expectations 20:08:56 Installed from tarball (url: https://github.com/calogica/dbt-expectations/archive/refs/tags/0.9.0.tar.gz) ``` compare this to installing the same package from dbt hub, with the following `packages.yml`: ``` yaml packages: - package: calogica/dbt_expectations version: "0.9.0" ``` ``` 20:14:24 Running with dbt=1.5.6 20:14:24 Installing calogica/dbt_expectations 20:14:25 Installed from version 0.9.0 20:14:25 Up to date! 20:14:25 Installing calogica/dbt_date 20:14:25 Installed from version 0.8.1 20:14:25 Updated version available: 0.9.1 20:14:25 20:14:25 Updates available for packages: ['calogica/dbt_date'] Update your versions in packages.yml, then run dbt deps ``` ### Relevant log output _No response_ ### Environment ```markdown - OS: Mac OS 13.5.2 (22G91) - Python: 3.9 - dbt: 1.5.6 ``` ### Which database adapter are you using with dbt? snowflake ### Additional Context _No response_ </issue> <code> [start of core/dbt/deps/tarball.py] 1 from typing import Dict 2 3 from dbt.contracts.project import RegistryPackageMetadata, TarballPackage 4 from dbt.deps.base import PinnedPackage, UnpinnedPackage 5 6 7 class TarballPackageMixin: 8 def __init__(self, tarball: str) -> None: 9 super().__init__() 10 self.tarball = tarball 11 12 @property 13 def name(self): 14 return self.tarball 15 16 def source_type(self) -> str: 17 return "tarball" 18 19 20 class TarballPinnedPackage(TarballPackageMixin, PinnedPackage): 21 def __init__(self, tarball: str, package: str) -> None: 22 super().__init__(tarball) 23 # setup to recycle RegistryPinnedPackage fns 24 self.package = package 25 self.version = "tarball" 26 27 @property 28 def name(self): 29 return self.package 30 31 def to_dict(self) -> Dict[str, str]: 32 return { 33 "tarball": self.tarball, 34 "version": self.version, 35 "package": self.package, 36 } 37 38 def get_version(self): 39 return self.version 40 41 def nice_version_name(self): 42 return f"tarball (url: {self.tarball})" 43 44 def _fetch_metadata(self, project, renderer): 45 """ 46 recycle RegistryPackageMetadata so that we can use the install and 47 download_and_untar from RegistryPinnedPackage next. 48 build RegistryPackageMetadata from info passed via packages.yml since no 49 'metadata' service exists in this case. 50 """ 51 52 dct = { 53 "name": self.package, 54 "packages": [], # note: required by RegistryPackageMetadata 55 "downloads": {"tarball": self.tarball}, 56 } 57 58 return RegistryPackageMetadata.from_dict(dct) 59 60 def install(self, project, renderer): 61 self._install(project, renderer) 62 63 64 class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]): 65 def __init__( 66 self, 67 tarball: str, 68 package: str, 69 ) -> None: 70 super().__init__(tarball) 71 # setup to recycle RegistryPinnedPackage fns 72 self.package = package 73 self.version = "tarball" 74 75 @classmethod 76 def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage": 77 return cls(tarball=contract.tarball, package=contract.name) 78 79 def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage": 80 return TarballUnpinnedPackage(tarball=self.tarball, package=self.package) 81 82 def resolved(self) -> TarballPinnedPackage: 83 return TarballPinnedPackage(tarball=self.tarball, package=self.package) 84 [end of core/dbt/deps/tarball.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py --- a/core/dbt/deps/tarball.py +++ b/core/dbt/deps/tarball.py @@ -1,7 +1,14 @@ +import functools +import os +from pathlib import Path from typing import Dict -from dbt.contracts.project import RegistryPackageMetadata, TarballPackage -from dbt.deps.base import PinnedPackage, UnpinnedPackage +from dbt.clients import system +from dbt.config.project import PartialProject +from dbt.contracts.project import TarballPackage +from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path +from dbt.exceptions import DependencyError +from dbt.utils import _connection_exception_retry as connection_exception_retry class TarballPackageMixin: @@ -20,9 +27,10 @@ class TarballPinnedPackage(TarballPackageMixin, PinnedPackage): def __init__(self, tarball: str, package: str) -> None: super().__init__(tarball) - # setup to recycle RegistryPinnedPackage fns self.package = package self.version = "tarball" + self.tar_path = os.path.join(Path(get_downloads_path()), self.package) + self.untarred_path = f"{self.tar_path}_untarred" @property def name(self): @@ -31,8 +39,7 @@ def to_dict(self) -> Dict[str, str]: return { "tarball": self.tarball, - "version": self.version, - "package": self.package, + "name": self.package, } def get_version(self): @@ -42,23 +49,38 @@ return f"tarball (url: {self.tarball})" def _fetch_metadata(self, project, renderer): - """ - recycle RegistryPackageMetadata so that we can use the install and - download_and_untar from RegistryPinnedPackage next. - build RegistryPackageMetadata from info passed via packages.yml since no - 'metadata' service exists in this case. - """ - - dct = { - "name": self.package, - "packages": [], # note: required by RegistryPackageMetadata - "downloads": {"tarball": self.tarball}, - } - - return RegistryPackageMetadata.from_dict(dct) + """Download and untar the project and parse metadata from the project folder.""" + download_untar_fn = functools.partial( + self.download_and_untar, self.tarball, self.tar_path, self.untarred_path, self.name + ) + connection_exception_retry(download_untar_fn, 5) + + tar_contents = os.listdir(self.untarred_path) + if len(tar_contents) != 1: + raise DependencyError( + f"Incorrect structure for package extracted from {self.tarball}." + f"The extracted package needs to follow the structure {self.name}/<package_contents>." + ) + child_folder = os.listdir(self.untarred_path)[0] + + self.untarred_path = os.path.join(self.untarred_path, child_folder) + partial = PartialProject.from_project_root(self.untarred_path) + metadata = partial.render_package_metadata(renderer) + metadata.name = self.package if self.package else metadata.name + return metadata def install(self, project, renderer): - self._install(project, renderer) + download_untar_fn = functools.partial( + self.download_and_untar, self.tarball, self.tar_path, self.untarred_path, self.name + ) + connection_exception_retry(download_untar_fn, 5) + dest_path = self.get_installation_path(project, renderer) + if os.path.exists(dest_path): + if system.path_is_symlink(dest_path): + system.remove_file(dest_path) + else: + system.rmdir(dest_path) + system.move(self.untarred_path, dest_path) class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):
{"golden_diff": "diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py\n--- a/core/dbt/deps/tarball.py\n+++ b/core/dbt/deps/tarball.py\n@@ -1,7 +1,14 @@\n+import functools\n+import os\n+from pathlib import Path\n from typing import Dict\n \n-from dbt.contracts.project import RegistryPackageMetadata, TarballPackage\n-from dbt.deps.base import PinnedPackage, UnpinnedPackage\n+from dbt.clients import system\n+from dbt.config.project import PartialProject\n+from dbt.contracts.project import TarballPackage\n+from dbt.deps.base import PinnedPackage, UnpinnedPackage, get_downloads_path\n+from dbt.exceptions import DependencyError\n+from dbt.utils import _connection_exception_retry as connection_exception_retry\n \n \n class TarballPackageMixin:\n@@ -20,9 +27,10 @@\n class TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n- # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n+ self.tar_path = os.path.join(Path(get_downloads_path()), self.package)\n+ self.untarred_path = f\"{self.tar_path}_untarred\"\n \n @property\n def name(self):\n@@ -31,8 +39,7 @@\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n- \"version\": self.version,\n- \"package\": self.package,\n+ \"name\": self.package,\n }\n \n def get_version(self):\n@@ -42,23 +49,38 @@\n return f\"tarball (url: {self.tarball})\"\n \n def _fetch_metadata(self, project, renderer):\n- \"\"\"\n- recycle RegistryPackageMetadata so that we can use the install and\n- download_and_untar from RegistryPinnedPackage next.\n- build RegistryPackageMetadata from info passed via packages.yml since no\n- 'metadata' service exists in this case.\n- \"\"\"\n-\n- dct = {\n- \"name\": self.package,\n- \"packages\": [], # note: required by RegistryPackageMetadata\n- \"downloads\": {\"tarball\": self.tarball},\n- }\n-\n- return RegistryPackageMetadata.from_dict(dct)\n+ \"\"\"Download and untar the project and parse metadata from the project folder.\"\"\"\n+ download_untar_fn = functools.partial(\n+ self.download_and_untar, self.tarball, self.tar_path, self.untarred_path, self.name\n+ )\n+ connection_exception_retry(download_untar_fn, 5)\n+\n+ tar_contents = os.listdir(self.untarred_path)\n+ if len(tar_contents) != 1:\n+ raise DependencyError(\n+ f\"Incorrect structure for package extracted from {self.tarball}.\"\n+ f\"The extracted package needs to follow the structure {self.name}/<package_contents>.\"\n+ )\n+ child_folder = os.listdir(self.untarred_path)[0]\n+\n+ self.untarred_path = os.path.join(self.untarred_path, child_folder)\n+ partial = PartialProject.from_project_root(self.untarred_path)\n+ metadata = partial.render_package_metadata(renderer)\n+ metadata.name = self.package if self.package else metadata.name\n+ return metadata\n \n def install(self, project, renderer):\n- self._install(project, renderer)\n+ download_untar_fn = functools.partial(\n+ self.download_and_untar, self.tarball, self.tar_path, self.untarred_path, self.name\n+ )\n+ connection_exception_retry(download_untar_fn, 5)\n+ dest_path = self.get_installation_path(project, renderer)\n+ if os.path.exists(dest_path):\n+ if system.path_is_symlink(dest_path):\n+ system.remove_file(dest_path)\n+ else:\n+ system.rmdir(dest_path)\n+ system.move(self.untarred_path, dest_path)\n \n \n class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n", "issue": "[CT-3107] [Bug] nested dependencies not installed when package is a tarball\n### Is this a new bug in dbt-core?\r\n\r\n- [X] I believe this is a new bug in dbt-core\r\n- [X] I have searched the existing issues, and I could not find an existing issue for this bug\r\n\r\n### Current Behavior\r\n\r\nwhen running `dbt deps` to install a package specified as a tarball, dbt doesn't install nested dependencies (i.e. packages specified in the imported package's `packages.yml` file) as it does when installing a package from local, git or the dbt hub.\r\n\r\n### Expected Behavior\r\n\r\nconsistent behaviour across import methods regarding nested dependencies. dbt should install any dependencies specified in the tarball project's packages.yml file.\r\n\r\n\r\n### Steps To Reproduce\r\n\r\nthis can be reproduced by importing the tarball of a package with nested dependencies. In this case, importing dbt_expectations should cause dbt_date to be installed, as its included in the package's dependencies here: https://github.com/calogica/dbt-expectations/blob/0.9.0/packages.yml\r\n\r\nSteps:\r\n1. create a `packages.yml` file in a project with the following structure:\r\n``` yaml\r\npackages:\r\n - tarball: \"https://github.com/calogica/dbt-expectations/archive/refs/tags/0.9.0.tar.gz\"\r\n name: \"dbt_expectations\"\r\n```\r\n2. run `dbt deps`\r\n\r\n\r\nrunning dbt deps will only install dbt_expectations:\r\n```\r\n20:08:55 Running with dbt=1.5.6\r\n20:08:55 Installing dbt_expectations\r\n20:08:56 Installed from tarball (url: https://github.com/calogica/dbt-expectations/archive/refs/tags/0.9.0.tar.gz)\r\n```\r\ncompare this to installing the same package from dbt hub, with the following `packages.yml`:\r\n``` yaml\r\npackages:\r\n - package: calogica/dbt_expectations\r\n version: \"0.9.0\"\r\n```\r\n```\r\n20:14:24 Running with dbt=1.5.6\r\n20:14:24 Installing calogica/dbt_expectations\r\n20:14:25 Installed from version 0.9.0\r\n20:14:25 Up to date!\r\n20:14:25 Installing calogica/dbt_date\r\n20:14:25 Installed from version 0.8.1\r\n20:14:25 Updated version available: 0.9.1\r\n20:14:25 \r\n20:14:25 Updates available for packages: ['calogica/dbt_date'] \r\nUpdate your versions in packages.yml, then run dbt deps\r\n```\r\n\r\n### Relevant log output\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: Mac OS 13.5.2 (22G91)\r\n- Python: 3.9\r\n- dbt: 1.5.6\r\n```\r\n\r\n\r\n### Which database adapter are you using with dbt?\r\n\r\nsnowflake\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "from typing import Dict\n\nfrom dbt.contracts.project import RegistryPackageMetadata, TarballPackage\nfrom dbt.deps.base import PinnedPackage, UnpinnedPackage\n\n\nclass TarballPackageMixin:\n def __init__(self, tarball: str) -> None:\n super().__init__()\n self.tarball = tarball\n\n @property\n def name(self):\n return self.tarball\n\n def source_type(self) -> str:\n return \"tarball\"\n\n\nclass TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @property\n def name(self):\n return self.package\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n \"version\": self.version,\n \"package\": self.package,\n }\n\n def get_version(self):\n return self.version\n\n def nice_version_name(self):\n return f\"tarball (url: {self.tarball})\"\n\n def _fetch_metadata(self, project, renderer):\n \"\"\"\n recycle RegistryPackageMetadata so that we can use the install and\n download_and_untar from RegistryPinnedPackage next.\n build RegistryPackageMetadata from info passed via packages.yml since no\n 'metadata' service exists in this case.\n \"\"\"\n\n dct = {\n \"name\": self.package,\n \"packages\": [], # note: required by RegistryPackageMetadata\n \"downloads\": {\"tarball\": self.tarball},\n }\n\n return RegistryPackageMetadata.from_dict(dct)\n\n def install(self, project, renderer):\n self._install(project, renderer)\n\n\nclass TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n def __init__(\n self,\n tarball: str,\n package: str,\n ) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @classmethod\n def from_contract(cls, contract: TarballPackage) -> \"TarballUnpinnedPackage\":\n return cls(tarball=contract.tarball, package=contract.name)\n\n def incorporate(self, other: \"TarballUnpinnedPackage\") -> \"TarballUnpinnedPackage\":\n return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)\n\n def resolved(self) -> TarballPinnedPackage:\n return TarballPinnedPackage(tarball=self.tarball, package=self.package)\n", "path": "core/dbt/deps/tarball.py"}]}
2,013
945
gh_patches_debug_27699
rasdani/github-patches
git_diff
cowrie__cowrie-638
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> csirtg plugin no longer working I'm not sure exactly when this happened, but just happend to check the logs today, and noticed the csirtg plugin has some errors. ``` 2017-11-02T17:05:41-0400 [cowrie.telnet.transport.HoneyPotTelnetFactory] New connection: 45.32.221.61:59776 (x.x.x.x:23) [session: TT0] 2017-11-02T17:05:41-0400 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.csirtg.Output object at 0x7f3a5ce9bb50>>) due to exception: [Failure instance: Traceback: <type 'exceptions.TypeError'>: string indices must be integers /home/cowrie/cowrie/cowrie/telnet/transport.py:218:connectionMade /usr/local/lib/python2.7/dist-packages/twisted/python/threadable.py:53:sync /usr/local/lib/python2.7/dist-packages/twisted/python/log.py:286:msg /usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py:154:publishToNewObserver --- <exception caught here> --- /usr/local/lib/python2.7/dist-packages/twisted/logger/_observer.py:131:__call__ /usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py:93:__call__ /home/cowrie/cowrie/cowrie/core/output.py:190:emit /home/cowrie/cowrie/cowrie/output/csirtg.py:82:write ] Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie/telnet/transport.py", line 218, in connectionMade session=self.transportId, sessionno='T'+str(sessionno)) File "/usr/local/lib/python2.7/dist-packages/twisted/python/threadable.py", line 53, in sync return function(self, *args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/twisted/python/log.py", line 286, in msg _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) File "/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py", line 154, in publishToNewObserver observer(eventDict) --- <exception caught here> --- File "/usr/local/lib/python2.7/dist-packages/twisted/logger/_observer.py", line 131, in __call__ observer(event) File "/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py", line 93, in __call__ self.legacyObserver(event) File "/home/cowrie/cowrie/cowrie/core/output.py", line 190, in emit self.write(ev) File "/home/cowrie/cowrie/cowrie/output/csirtg.py", line 82, in write logger.info('logged to csirtg %s ' % ret['indicator']['location']) exceptions.TypeError: string indices must be integers ``` </issue> <code> [start of cowrie/output/csirtg.py] 1 from __future__ import division, absolute_import 2 3 import cowrie.core.output 4 5 from csirtgsdk.indicator import Indicator 6 from csirtgsdk.client import Client 7 from datetime import datetime 8 import logging 9 import os 10 11 logger = logging.getLogger(__name__) 12 13 USERNAME = os.environ.get('CSIRTG_USER') 14 FEED = os.environ.get('CSIRTG_FEED') 15 TOKEN = os.environ.get('CSIRG_TOKEN') 16 DESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity') 17 18 19 class Output(cowrie.core.output.Output): 20 def __init__(self, cfg): 21 cowrie.core.output.Output.__init__(self, cfg) 22 self.user = cfg.get('output_csirtg', 'username') or USERNAME 23 self.feed = cfg.get('output_csirtg', 'feed') or FEED 24 self.token = cfg.get('output_csirtg', 'token') or TOKEN 25 try: 26 self.description = cfg.get('output_csirtg', 'description') 27 except Exception: 28 self.description = DESCRIPTION 29 self.context = {} 30 self.client = Client(token=self.token) 31 32 def start(self,): 33 pass 34 35 def stop(self): 36 pass 37 38 def write(self, e): 39 sid = e['session'] 40 peerIP = e['src_ip'] 41 ts = e['timestamp'] 42 system = e['system'] 43 44 if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: 45 logger.debug('skipping {}'.format(system)) 46 return 47 48 today = str(datetime.now().date()) 49 50 if not self.context.get(today): 51 logger.debug('resetting context for %s' % today) 52 self.context = {} 53 self.context[today] = set() 54 55 key = ','.join([peerIP, system]) 56 57 if key in self.context[today]: 58 logger.debug('skipping {}'.format(key)) 59 return 60 61 self.context[today].add(key) 62 63 tags = 'scanner,ssh' 64 port = 22 65 if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory': 66 tags = 'scanner,telnet' 67 port = 23 68 69 i = { 70 'user': self.user, 71 'feed': self.feed, 72 'indicator': peerIP, 73 'portlist': port, 74 'protocol': 'tcp', 75 'tags': tags, 76 'firsttime': ts, 77 'lasttime': ts, 78 'description': self.description 79 } 80 81 ret = Indicator(self.client, i).submit() 82 logger.info('logged to csirtg %s ' % ret['indicator']['location']) 83 84 [end of cowrie/output/csirtg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cowrie/output/csirtg.py b/cowrie/output/csirtg.py --- a/cowrie/output/csirtg.py +++ b/cowrie/output/csirtg.py @@ -7,8 +7,7 @@ from datetime import datetime import logging import os - -logger = logging.getLogger(__name__) +from twisted.python import log USERNAME = os.environ.get('CSIRTG_USER') FEED = os.environ.get('CSIRTG_FEED') @@ -42,20 +41,17 @@ system = e['system'] if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: - logger.debug('skipping {}'.format(system)) return today = str(datetime.now().date()) if not self.context.get(today): - logger.debug('resetting context for %s' % today) self.context = {} self.context[today] = set() key = ','.join([peerIP, system]) if key in self.context[today]: - logger.debug('skipping {}'.format(key)) return self.context[today].add(key) @@ -79,5 +75,5 @@ } ret = Indicator(self.client, i).submit() - logger.info('logged to csirtg %s ' % ret['indicator']['location']) + log.msg('logged to csirtg %s ' % ret['location'])
{"golden_diff": "diff --git a/cowrie/output/csirtg.py b/cowrie/output/csirtg.py\n--- a/cowrie/output/csirtg.py\n+++ b/cowrie/output/csirtg.py\n@@ -7,8 +7,7 @@\n from datetime import datetime\n import logging\n import os\n-\n-logger = logging.getLogger(__name__)\n+from twisted.python import log\n \n USERNAME = os.environ.get('CSIRTG_USER')\n FEED = os.environ.get('CSIRTG_FEED')\n@@ -42,20 +41,17 @@\n system = e['system']\n \n if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n- logger.debug('skipping {}'.format(system))\n return\n \n today = str(datetime.now().date())\n \n if not self.context.get(today):\n- logger.debug('resetting context for %s' % today)\n self.context = {}\n self.context[today] = set()\n \n key = ','.join([peerIP, system])\n \n if key in self.context[today]:\n- logger.debug('skipping {}'.format(key))\n return\n \n self.context[today].add(key)\n@@ -79,5 +75,5 @@\n }\n \n ret = Indicator(self.client, i).submit()\n- logger.info('logged to csirtg %s ' % ret['indicator']['location'])\n+ log.msg('logged to csirtg %s ' % ret['location'])\n", "issue": "csirtg plugin no longer working\nI'm not sure exactly when this happened, but just happend to check the logs today, and noticed the csirtg plugin has some errors.\r\n\r\n```\r\n2017-11-02T17:05:41-0400 [cowrie.telnet.transport.HoneyPotTelnetFactory] New connection: 45.32.221.61:59776 (x.x.x.x:23) [session: TT0]\r\n2017-11-02T17:05:41-0400 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.csirtg.Output object at 0x7f3a5ce9bb50>>) due to exception: [Failure instance: Traceback: <type 'exceptions.TypeError'>: string indices must be integers\r\n\t/home/cowrie/cowrie/cowrie/telnet/transport.py:218:connectionMade\r\n\t/usr/local/lib/python2.7/dist-packages/twisted/python/threadable.py:53:sync\r\n\t/usr/local/lib/python2.7/dist-packages/twisted/python/log.py:286:msg\r\n\t/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py:154:publishToNewObserver\r\n\t--- <exception caught here> ---\r\n\t/usr/local/lib/python2.7/dist-packages/twisted/logger/_observer.py:131:__call__\r\n\t/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py:93:__call__\r\n\t/home/cowrie/cowrie/cowrie/core/output.py:190:emit\r\n\t/home/cowrie/cowrie/cowrie/output/csirtg.py:82:write\r\n\t]\r\n\tTraceback (most recent call last):\r\n\t File \"/home/cowrie/cowrie/cowrie/telnet/transport.py\", line 218, in connectionMade\r\n\t session=self.transportId, sessionno='T'+str(sessionno))\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/python/threadable.py\", line 53, in sync\r\n\t return function(self, *args, **kwargs)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/python/log.py\", line 286, in msg\r\n\t _publishNew(self._publishPublisher, actualEventDict, textFromEventDict)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py\", line 154, in publishToNewObserver\r\n\t observer(eventDict)\r\n\t--- <exception caught here> ---\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/logger/_observer.py\", line 131, in __call__\r\n\t observer(event)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py\", line 93, in __call__\r\n\t self.legacyObserver(event)\r\n\t File \"/home/cowrie/cowrie/cowrie/core/output.py\", line 190, in emit\r\n\t self.write(ev)\r\n\t File \"/home/cowrie/cowrie/cowrie/output/csirtg.py\", line 82, in write\r\n\t logger.info('logged to csirtg %s ' % ret['indicator']['location'])\r\n\texceptions.TypeError: string indices must be integers\r\n```\n", "before_files": [{"content": "from __future__ import division, absolute_import\n\nimport cowrie.core.output\n\nfrom csirtgsdk.indicator import Indicator\nfrom csirtgsdk.client import Client\nfrom datetime import datetime\nimport logging\nimport os\n\nlogger = logging.getLogger(__name__)\n\nUSERNAME = os.environ.get('CSIRTG_USER')\nFEED = os.environ.get('CSIRTG_FEED')\nTOKEN = os.environ.get('CSIRG_TOKEN')\nDESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity')\n\n\nclass Output(cowrie.core.output.Output):\n def __init__(self, cfg):\n cowrie.core.output.Output.__init__(self, cfg)\n self.user = cfg.get('output_csirtg', 'username') or USERNAME\n self.feed = cfg.get('output_csirtg', 'feed') or FEED\n self.token = cfg.get('output_csirtg', 'token') or TOKEN\n try:\n self.description = cfg.get('output_csirtg', 'description')\n except Exception:\n self.description = DESCRIPTION\n self.context = {}\n self.client = Client(token=self.token)\n\n def start(self,):\n pass\n\n def stop(self):\n pass\n\n def write(self, e):\n sid = e['session']\n peerIP = e['src_ip']\n ts = e['timestamp']\n system = e['system']\n\n if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n logger.debug('skipping {}'.format(system))\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n logger.debug('resetting context for %s' % today)\n self.context = {}\n self.context[today] = set()\n\n key = ','.join([peerIP, system])\n\n if key in self.context[today]:\n logger.debug('skipping {}'.format(key))\n return\n\n self.context[today].add(key)\n\n tags = 'scanner,ssh'\n port = 22\n if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory':\n tags = 'scanner,telnet'\n port = 23\n\n i = {\n 'user': self.user,\n 'feed': self.feed,\n 'indicator': peerIP,\n 'portlist': port,\n 'protocol': 'tcp',\n 'tags': tags,\n 'firsttime': ts,\n 'lasttime': ts,\n 'description': self.description\n }\n\n ret = Indicator(self.client, i).submit()\n logger.info('logged to csirtg %s ' % ret['indicator']['location'])\n\n", "path": "cowrie/output/csirtg.py"}]}
2,038
327
gh_patches_debug_18933
rasdani/github-patches
git_diff
Qiskit__qiskit-1720
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BackendConfiguration fails validation if backend supports pulse <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Informations - **Qiskit Terra version**:0.8.0 - **Python version**3.6.6 - **Operating system**:OSX ### What is the current behavior? If a backend sets `open_pulse=true` in its configuration Qiskit will raise a validation error when creating a `BackendConfigurationSchema` ### Steps to reproduce the problem Create a backend with `open_pulse=true` set in its configuration. ### What is the expected behavior? Should not fail. ### Suggested solutions Allow `open_pulse=true` to be valid. </issue> <code> [start of qiskit/providers/models/backendconfiguration.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright 2018, IBM. 4 # 5 # This source code is licensed under the Apache License, Version 2.0 found in 6 # the LICENSE.txt file in the root directory of this source tree. 7 8 """Model and schema for backend configuration.""" 9 10 from marshmallow.validate import Equal, Length, OneOf, Range, Regexp 11 12 from qiskit.validation import BaseModel, BaseSchema, bind_schema 13 from qiskit.validation.fields import Boolean, DateTime, Integer, List, Nested, String 14 15 16 class GateConfigSchema(BaseSchema): 17 """Schema for GateConfig.""" 18 19 # Required properties. 20 name = String(required=True) 21 parameters = List(String(), required=True) 22 qasm_def = String(required=True) 23 24 # Optional properties. 25 coupling_map = List(List(Integer(), 26 validate=Length(min=1)), 27 validate=Length(min=1)) 28 latency_map = List(List(Integer(validate=OneOf([0, 1])), 29 validate=Length(min=1)), 30 validate=Length(min=1)) 31 conditional = Boolean() 32 description = String() 33 34 35 class BackendConfigurationSchema(BaseSchema): 36 """Schema for BackendConfiguration.""" 37 38 # Required properties. 39 backend_name = String(required=True) 40 backend_version = String(required=True, 41 validate=Regexp("[0-9]+.[0-9]+.[0-9]+$")) 42 n_qubits = Integer(required=True, validate=Range(min=1)) 43 basis_gates = List(String(), required=True, 44 validate=Length(min=1)) 45 gates = Nested(GateConfigSchema, required=True, many=True, 46 validate=Length(min=1)) 47 local = Boolean(required=True) 48 simulator = Boolean(required=True) 49 conditional = Boolean(required=True) 50 open_pulse = Boolean(required=True, validate=Equal(False)) 51 memory = Boolean(required=True) 52 max_shots = Integer(required=True, validate=Range(min=1)) 53 54 # Optional properties. 55 max_experiments = Integer(validate=Range(min=1)) 56 sample_name = String() 57 coupling_map = List(List(Integer(), 58 validate=Length(min=1)), 59 validate=Length(min=1)) 60 n_registers = Integer(validate=Range(min=1)) 61 register_map = List(List(Integer(validate=OneOf([0, 1])), 62 validate=Length(min=1)), 63 validate=Length(min=1)) 64 configurable = Boolean() 65 credits_required = Boolean() 66 online_date = DateTime() 67 display_name = String() 68 description = String() 69 tags = List(String()) 70 71 72 @bind_schema(GateConfigSchema) 73 class GateConfig(BaseModel): 74 """Model for GateConfig. 75 76 Please note that this class only describes the required fields. For the 77 full description of the model, please check ``GateConfigSchema``. 78 79 Attributes: 80 name (str): the gate name as it will be referred to in QASM. 81 parameters (list[str]): variable names for the gate parameters (if any). 82 qasm_def (str): definition of this gate in terms of QASM primitives U 83 and CX. 84 """ 85 86 def __init__(self, name, parameters, qasm_def, **kwargs): 87 self.name = name 88 self.parameters = parameters 89 self.qasm_def = qasm_def 90 91 super().__init__(**kwargs) 92 93 94 @bind_schema(BackendConfigurationSchema) 95 class BackendConfiguration(BaseModel): 96 """Model for BackendConfiguration. 97 98 Please note that this class only describes the required fields. For the 99 full description of the model, please check ``BackendConfigurationSchema``. 100 Attributes: 101 backend_name (str): backend name. 102 backend_version (str): backend version in the form X.Y.Z. 103 n_qubits (int): number of qubits. 104 basis_gates (list[str]): list of basis gates names on the backend. 105 gates (GateConfig): list of basis gates on the backend. 106 local (bool): backend is local or remote. 107 simulator (bool): backend is a simulator. 108 conditional (bool): backend supports conditional operations. 109 open_pulse (bool): backend supports open pulse. 110 memory (bool): backend supports memory. 111 max_shots (int): maximum number of shots supported. 112 """ 113 114 def __init__(self, backend_name, backend_version, n_qubits, basis_gates, 115 gates, local, simulator, conditional, open_pulse, memory, 116 max_shots, **kwargs): 117 self.backend_name = backend_name 118 self.backend_version = backend_version 119 self.n_qubits = n_qubits 120 self.basis_gates = basis_gates 121 self.gates = gates 122 self.local = local 123 self.simulator = simulator 124 self.conditional = conditional 125 self.open_pulse = open_pulse 126 self.memory = memory 127 self.max_shots = max_shots 128 129 super().__init__(**kwargs) 130 [end of qiskit/providers/models/backendconfiguration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qiskit/providers/models/backendconfiguration.py b/qiskit/providers/models/backendconfiguration.py --- a/qiskit/providers/models/backendconfiguration.py +++ b/qiskit/providers/models/backendconfiguration.py @@ -7,7 +7,7 @@ """Model and schema for backend configuration.""" -from marshmallow.validate import Equal, Length, OneOf, Range, Regexp +from marshmallow.validate import Length, OneOf, Range, Regexp from qiskit.validation import BaseModel, BaseSchema, bind_schema from qiskit.validation.fields import Boolean, DateTime, Integer, List, Nested, String @@ -47,7 +47,7 @@ local = Boolean(required=True) simulator = Boolean(required=True) conditional = Boolean(required=True) - open_pulse = Boolean(required=True, validate=Equal(False)) + open_pulse = Boolean(required=True) memory = Boolean(required=True) max_shots = Integer(required=True, validate=Range(min=1))
{"golden_diff": "diff --git a/qiskit/providers/models/backendconfiguration.py b/qiskit/providers/models/backendconfiguration.py\n--- a/qiskit/providers/models/backendconfiguration.py\n+++ b/qiskit/providers/models/backendconfiguration.py\n@@ -7,7 +7,7 @@\n \n \"\"\"Model and schema for backend configuration.\"\"\"\n \n-from marshmallow.validate import Equal, Length, OneOf, Range, Regexp\n+from marshmallow.validate import Length, OneOf, Range, Regexp\n \n from qiskit.validation import BaseModel, BaseSchema, bind_schema\n from qiskit.validation.fields import Boolean, DateTime, Integer, List, Nested, String\n@@ -47,7 +47,7 @@\n local = Boolean(required=True)\n simulator = Boolean(required=True)\n conditional = Boolean(required=True)\n- open_pulse = Boolean(required=True, validate=Equal(False))\n+ open_pulse = Boolean(required=True)\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n", "issue": "BackendConfiguration fails validation if backend supports pulse\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Informations\r\n\r\n- **Qiskit Terra version**:0.8.0\r\n- **Python version**3.6.6\r\n- **Operating system**:OSX\r\n\r\n### What is the current behavior?\r\nIf a backend sets `open_pulse=true` in its configuration Qiskit will raise a validation error when creating a `BackendConfigurationSchema`\r\n\r\n\r\n### Steps to reproduce the problem\r\nCreate a backend with `open_pulse=true` set in its configuration.\r\n\r\n\r\n### What is the expected behavior?\r\nShould not fail.\r\n\r\n\r\n### Suggested solutions\r\nAllow `open_pulse=true` to be valid.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"Model and schema for backend configuration.\"\"\"\n\nfrom marshmallow.validate import Equal, Length, OneOf, Range, Regexp\n\nfrom qiskit.validation import BaseModel, BaseSchema, bind_schema\nfrom qiskit.validation.fields import Boolean, DateTime, Integer, List, Nested, String\n\n\nclass GateConfigSchema(BaseSchema):\n \"\"\"Schema for GateConfig.\"\"\"\n\n # Required properties.\n name = String(required=True)\n parameters = List(String(), required=True)\n qasm_def = String(required=True)\n\n # Optional properties.\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1))\n latency_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n conditional = Boolean()\n description = String()\n\n\nclass BackendConfigurationSchema(BaseSchema):\n \"\"\"Schema for BackendConfiguration.\"\"\"\n\n # Required properties.\n backend_name = String(required=True)\n backend_version = String(required=True,\n validate=Regexp(\"[0-9]+.[0-9]+.[0-9]+$\"))\n n_qubits = Integer(required=True, validate=Range(min=1))\n basis_gates = List(String(), required=True,\n validate=Length(min=1))\n gates = Nested(GateConfigSchema, required=True, many=True,\n validate=Length(min=1))\n local = Boolean(required=True)\n simulator = Boolean(required=True)\n conditional = Boolean(required=True)\n open_pulse = Boolean(required=True, validate=Equal(False))\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n\n # Optional properties.\n max_experiments = Integer(validate=Range(min=1))\n sample_name = String()\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1))\n n_registers = Integer(validate=Range(min=1))\n register_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n configurable = Boolean()\n credits_required = Boolean()\n online_date = DateTime()\n display_name = String()\n description = String()\n tags = List(String())\n\n\n@bind_schema(GateConfigSchema)\nclass GateConfig(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n name (str): the gate name as it will be referred to in QASM.\n parameters (list[str]): variable names for the gate parameters (if any).\n qasm_def (str): definition of this gate in terms of QASM primitives U\n and CX.\n \"\"\"\n\n def __init__(self, name, parameters, qasm_def, **kwargs):\n self.name = name\n self.parameters = parameters\n self.qasm_def = qasm_def\n\n super().__init__(**kwargs)\n\n\n@bind_schema(BackendConfigurationSchema)\nclass BackendConfiguration(BaseModel):\n \"\"\"Model for BackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``BackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n self.backend_name = backend_name\n self.backend_version = backend_version\n self.n_qubits = n_qubits\n self.basis_gates = basis_gates\n self.gates = gates\n self.local = local\n self.simulator = simulator\n self.conditional = conditional\n self.open_pulse = open_pulse\n self.memory = memory\n self.max_shots = max_shots\n\n super().__init__(**kwargs)\n", "path": "qiskit/providers/models/backendconfiguration.py"}]}
2,025
210
gh_patches_debug_16021
rasdani/github-patches
git_diff
wagtail__wagtail-8270
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ThumbnailMixin does not display in header the value defined under thumb_col_header_text <!-- Found a bug? Please fill out the sections below. 👍 --> ### Issue Summary When adding ThumbnailMixin to a ModelAdmin, and giving it the `thumb_col_header_text` attribute, should display that on the list header for the thumbnail. but it always uses the default defined 'image' ![image](https://user-images.githubusercontent.com/4082254/161403611-d75291b5-6db8-413a-82fa-447aab5fe69a.png) ![image](https://user-images.githubusercontent.com/4082254/161403594-1b2bd39b-2332-40a8-b539-cae4d72a57e6.png) ### Steps to Reproduce 1. (for example) Start a new project with `wagtail start myproject` 2. in models.py add a new model (non page) with a forignkey to wagtailimages.Image 3. add model admin definition in wagtail_hooks.py 4. add ThumbnailMixin to model admin super classes 5. add some value to thumb_col_header_text 6. register new model admin 7. load app 8. add new instance of your new model with an image 9. in list header for your image it will say 'image' not what you defined in thumb_col_header_text Any other relevant information. For example, why do you consider this a bug and what did you expect to happen instead? * I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes) * i already know why this is happening and will submit a pull request shortly ### Technical details * Python version: 3.9.7 * Django version: 4.0.3 * Wagtail version: 2.16.1 * Browser version: Chrome Version 100.0.4896.60 (Official Build) (x86_64) </issue> <code> [start of wagtail/contrib/modeladmin/mixins.py] 1 from django.conf import settings 2 from django.core.exceptions import ImproperlyConfigured 3 from django.forms.utils import flatatt 4 from django.utils.safestring import mark_safe 5 from django.utils.translation import gettext_lazy as _ 6 7 8 class ThumbnailMixin: 9 """ 10 Mixin class to help display thumbnail images in ModelAdmin listing results. 11 `thumb_image_field_name` must be overridden to name a ForeignKey field on 12 your model, linking to `wagtailimages.Image`. 13 """ 14 15 thumb_image_field_name = "image" 16 thumb_image_filter_spec = "fill-100x100" 17 thumb_image_width = 50 18 thumb_classname = "admin-thumb" 19 thumb_col_header_text = _("image") 20 thumb_default = None 21 22 def __init__(self, *args, **kwargs): 23 if "wagtail.images" not in settings.INSTALLED_APPS: 24 raise ImproperlyConfigured( 25 "The `wagtail.images` app must be installed in order " 26 "to use the `ThumbnailMixin` class." 27 ) 28 super().__init__(*args, **kwargs) 29 30 def admin_thumb(self, obj): 31 try: 32 image = getattr(obj, self.thumb_image_field_name, None) 33 except AttributeError: 34 raise ImproperlyConfigured( 35 "The `thumb_image_field_name` attribute on your `%s` class " 36 "must name a field on your model." % self.__class__.__name__ 37 ) 38 39 img_attrs = { 40 "src": self.thumb_default, 41 "width": self.thumb_image_width, 42 "class": self.thumb_classname, 43 } 44 if not image: 45 if self.thumb_default: 46 return mark_safe("<img{}>".format(flatatt(img_attrs))) 47 return "" 48 49 # try to get a rendition of the image to use 50 from wagtail.images.shortcuts import get_rendition_or_not_found 51 52 spec = self.thumb_image_filter_spec 53 rendition = get_rendition_or_not_found(image, spec) 54 img_attrs.update({"src": rendition.url}) 55 return mark_safe("<img{}>".format(flatatt(img_attrs))) 56 57 admin_thumb.short_description = thumb_col_header_text 58 [end of wagtail/contrib/modeladmin/mixins.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/contrib/modeladmin/mixins.py b/wagtail/contrib/modeladmin/mixins.py --- a/wagtail/contrib/modeladmin/mixins.py +++ b/wagtail/contrib/modeladmin/mixins.py @@ -25,6 +25,7 @@ "The `wagtail.images` app must be installed in order " "to use the `ThumbnailMixin` class." ) + self.__class__.admin_thumb.short_description = self.thumb_col_header_text super().__init__(*args, **kwargs) def admin_thumb(self, obj): @@ -53,5 +54,3 @@ rendition = get_rendition_or_not_found(image, spec) img_attrs.update({"src": rendition.url}) return mark_safe("<img{}>".format(flatatt(img_attrs))) - - admin_thumb.short_description = thumb_col_header_text
{"golden_diff": "diff --git a/wagtail/contrib/modeladmin/mixins.py b/wagtail/contrib/modeladmin/mixins.py\n--- a/wagtail/contrib/modeladmin/mixins.py\n+++ b/wagtail/contrib/modeladmin/mixins.py\n@@ -25,6 +25,7 @@\n \"The `wagtail.images` app must be installed in order \"\n \"to use the `ThumbnailMixin` class.\"\n )\n+ self.__class__.admin_thumb.short_description = self.thumb_col_header_text\n super().__init__(*args, **kwargs)\n \n def admin_thumb(self, obj):\n@@ -53,5 +54,3 @@\n rendition = get_rendition_or_not_found(image, spec)\n img_attrs.update({\"src\": rendition.url})\n return mark_safe(\"<img{}>\".format(flatatt(img_attrs)))\n-\n- admin_thumb.short_description = thumb_col_header_text\n", "issue": "ThumbnailMixin does not display in header the value defined under thumb_col_header_text \n<!--\r\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n-->\r\n\r\n### Issue Summary\r\n\r\nWhen adding ThumbnailMixin to a ModelAdmin, and giving it the `thumb_col_header_text` attribute, should display that on the list header for the thumbnail. but it always uses the default defined 'image' \r\n\r\n![image](https://user-images.githubusercontent.com/4082254/161403611-d75291b5-6db8-413a-82fa-447aab5fe69a.png)\r\n\r\n\r\n![image](https://user-images.githubusercontent.com/4082254/161403594-1b2bd39b-2332-40a8-b539-cae4d72a57e6.png)\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. (for example) Start a new project with `wagtail start myproject`\r\n2. in models.py add a new model (non page) with a forignkey to wagtailimages.Image \r\n3. add model admin definition in wagtail_hooks.py\r\n4. add ThumbnailMixin to model admin super classes\r\n5. add some value to thumb_col_header_text\r\n6. register new model admin\r\n7. load app\r\n8. add new instance of your new model with an image\r\n9. in list header for your image it will say 'image' not what you defined in thumb_col_header_text\r\n\r\nAny other relevant information. For example, why do you consider this a bug and what did you expect to happen instead?\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: (yes)\r\n* i already know why this is happening and will submit a pull request shortly\r\n\r\n\r\n### Technical details\r\n\r\n* Python version: 3.9.7\r\n* Django version: 4.0.3\r\n* Wagtail version: 2.16.1\r\n* Browser version: Chrome Version 100.0.4896.60 (Official Build) (x86_64)\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.forms.utils import flatatt\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass ThumbnailMixin:\n \"\"\"\n Mixin class to help display thumbnail images in ModelAdmin listing results.\n `thumb_image_field_name` must be overridden to name a ForeignKey field on\n your model, linking to `wagtailimages.Image`.\n \"\"\"\n\n thumb_image_field_name = \"image\"\n thumb_image_filter_spec = \"fill-100x100\"\n thumb_image_width = 50\n thumb_classname = \"admin-thumb\"\n thumb_col_header_text = _(\"image\")\n thumb_default = None\n\n def __init__(self, *args, **kwargs):\n if \"wagtail.images\" not in settings.INSTALLED_APPS:\n raise ImproperlyConfigured(\n \"The `wagtail.images` app must be installed in order \"\n \"to use the `ThumbnailMixin` class.\"\n )\n super().__init__(*args, **kwargs)\n\n def admin_thumb(self, obj):\n try:\n image = getattr(obj, self.thumb_image_field_name, None)\n except AttributeError:\n raise ImproperlyConfigured(\n \"The `thumb_image_field_name` attribute on your `%s` class \"\n \"must name a field on your model.\" % self.__class__.__name__\n )\n\n img_attrs = {\n \"src\": self.thumb_default,\n \"width\": self.thumb_image_width,\n \"class\": self.thumb_classname,\n }\n if not image:\n if self.thumb_default:\n return mark_safe(\"<img{}>\".format(flatatt(img_attrs)))\n return \"\"\n\n # try to get a rendition of the image to use\n from wagtail.images.shortcuts import get_rendition_or_not_found\n\n spec = self.thumb_image_filter_spec\n rendition = get_rendition_or_not_found(image, spec)\n img_attrs.update({\"src\": rendition.url})\n return mark_safe(\"<img{}>\".format(flatatt(img_attrs)))\n\n admin_thumb.short_description = thumb_col_header_text\n", "path": "wagtail/contrib/modeladmin/mixins.py"}]}
1,588
198
gh_patches_debug_39253
rasdani/github-patches
git_diff
lightly-ai__lightly-1531
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in `GatherLayer.backward` Hi, We've been implementing a model at [cellarium-ml](https://github.com/cellarium-ai/cellarium-ml) using your `NTXentLoss`. Comparing the model training with a single GPU and two GPUs we noticed that they do not match. By investigating it we found an apparent bug in the `GatherLayer.backward` where gradients are not sum-reduced over GPUs. Here is our fixed version (https://github.com/cellarium-ai/cellarium-ml/blob/main/cellarium/ml/distributed/gather.py#L17-L21): ```py @staticmethod def backward(ctx, *grads) -> torch.Tensor: grad_out = grads[dist.get_rank()].contiguous() dist.all_reduce(grad_out, op=dist.ReduceOp.SUM) return grad_out ``` and the [test](https://github.com/cellarium-ai/cellarium-ml/blob/main/tests/distributed/test_gather.py) we wrote. Would you agree that this is indeed a bug? I would be happy to contribute a PR with the fix. </issue> <code> [start of lightly/utils/dist.py] 1 from typing import Optional, Tuple 2 3 import torch 4 import torch.distributed as dist 5 6 7 class GatherLayer(torch.autograd.Function): 8 """Gather tensors from all processes, supporting backward propagation. 9 10 This code was taken and adapted from here: 11 https://github.com/Spijkervet/SimCLR 12 13 """ 14 15 @staticmethod 16 def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]: 17 ctx.save_for_backward(input) 18 output = [torch.empty_like(input) for _ in range(dist.get_world_size())] 19 dist.all_gather(output, input) 20 return tuple(output) 21 22 @staticmethod 23 def backward(ctx, *grads: torch.Tensor) -> torch.Tensor: 24 (input,) = ctx.saved_tensors 25 grad_out = torch.empty_like(input) 26 grad_out[:] = grads[dist.get_rank()] 27 return grad_out 28 29 30 def rank() -> int: 31 """Returns the rank of the current process.""" 32 return dist.get_rank() if dist.is_initialized() else 0 33 34 35 def world_size() -> int: 36 """Returns the current world size (number of distributed processes).""" 37 return dist.get_world_size() if dist.is_initialized() else 1 38 39 40 def gather(input: torch.Tensor) -> Tuple[torch.Tensor]: 41 """Gathers this tensor from all processes. Supports backprop.""" 42 return GatherLayer.apply(input) 43 44 45 def eye_rank(n: int, device: Optional[torch.device] = None) -> torch.Tensor: 46 """Returns an (n, n * world_size) zero matrix with the diagonal for the rank 47 of this process set to 1. 48 49 Example output where n=3, the current process has rank 1, and there are 50 4 processes in total: 51 52 rank0 rank1 rank2 rank3 53 0 0 0 | 1 0 0 | 0 0 0 | 0 0 0 54 0 0 0 | 0 1 0 | 0 0 0 | 0 0 0 55 0 0 0 | 0 0 1 | 0 0 0 | 0 0 0 56 57 Equivalent to torch.eye for undistributed settings or if world size == 1. 58 59 Args: 60 n: 61 Size of the square matrix on a single process. 62 device: 63 Device on which the matrix should be created. 64 65 """ 66 rows = torch.arange(n, device=device, dtype=torch.long) 67 cols = rows + rank() * n 68 diag_mask = torch.zeros((n, n * world_size()), dtype=torch.bool) 69 diag_mask[(rows, cols)] = True 70 return diag_mask 71 72 73 def rank_zero_only(fn): 74 """Decorator that only runs the function on the process with rank 0. 75 76 Example: 77 >>> @rank_zero_only 78 >>> def print_rank_zero(message: str): 79 >>> print(message) 80 >>> 81 >>> print_rank_zero("Hello from rank 0!") 82 83 """ 84 85 def wrapped(*args, **kwargs): 86 if rank() == 0: 87 return fn(*args, **kwargs) 88 89 return wrapped 90 91 92 @rank_zero_only 93 def print_rank_zero(*args, **kwargs) -> None: 94 """Equivalent to print, but only runs on the process with rank 0.""" 95 print(*args, **kwargs) 96 [end of lightly/utils/dist.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightly/utils/dist.py b/lightly/utils/dist.py --- a/lightly/utils/dist.py +++ b/lightly/utils/dist.py @@ -1,29 +1,29 @@ -from typing import Optional, Tuple +from typing import Any, Callable, Optional, Tuple, TypeVar import torch import torch.distributed as dist +from torch.autograd.function import FunctionCtx class GatherLayer(torch.autograd.Function): """Gather tensors from all processes, supporting backward propagation. This code was taken and adapted from here: - https://github.com/Spijkervet/SimCLR + https://github.com/vturrisi/solo-learn/blob/b69b4bd27472593919956d9ac58902a301537a4d/solo/utils/misc.py#L187 """ @staticmethod - def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]: - ctx.save_for_backward(input) + def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]: # type: ignore output = [torch.empty_like(input) for _ in range(dist.get_world_size())] dist.all_gather(output, input) return tuple(output) @staticmethod - def backward(ctx, *grads: torch.Tensor) -> torch.Tensor: - (input,) = ctx.saved_tensors - grad_out = torch.empty_like(input) - grad_out[:] = grads[dist.get_rank()] + def backward(ctx, *grads) -> torch.Tensor: # type: ignore + all_gradients = torch.stack(grads) + dist.all_reduce(all_gradients) + grad_out = all_gradients[dist.get_rank()] return grad_out @@ -39,7 +39,7 @@ def gather(input: torch.Tensor) -> Tuple[torch.Tensor]: """Gathers this tensor from all processes. Supports backprop.""" - return GatherLayer.apply(input) + return GatherLayer.apply(input) # type: ignore[no-any-return] def eye_rank(n: int, device: Optional[torch.device] = None) -> torch.Tensor: @@ -70,7 +70,10 @@ return diag_mask -def rank_zero_only(fn): +R = TypeVar("R") + + +def rank_zero_only(fn: Callable[..., R]) -> Callable[..., Optional[R]]: """Decorator that only runs the function on the process with rank 0. Example: @@ -79,17 +82,17 @@ >>> print(message) >>> >>> print_rank_zero("Hello from rank 0!") - """ - def wrapped(*args, **kwargs): + def wrapped(*args: Any, **kwargs: Any) -> Optional[R]: if rank() == 0: return fn(*args, **kwargs) + return None return wrapped @rank_zero_only -def print_rank_zero(*args, **kwargs) -> None: +def print_rank_zero(*args: Any, **kwargs: Any) -> None: # type: ignore[misc] """Equivalent to print, but only runs on the process with rank 0.""" print(*args, **kwargs)
{"golden_diff": "diff --git a/lightly/utils/dist.py b/lightly/utils/dist.py\n--- a/lightly/utils/dist.py\n+++ b/lightly/utils/dist.py\n@@ -1,29 +1,29 @@\n-from typing import Optional, Tuple\n+from typing import Any, Callable, Optional, Tuple, TypeVar\n \n import torch\n import torch.distributed as dist\n+from torch.autograd.function import FunctionCtx\n \n \n class GatherLayer(torch.autograd.Function):\n \"\"\"Gather tensors from all processes, supporting backward propagation.\n \n This code was taken and adapted from here:\n- https://github.com/Spijkervet/SimCLR\n+ https://github.com/vturrisi/solo-learn/blob/b69b4bd27472593919956d9ac58902a301537a4d/solo/utils/misc.py#L187\n \n \"\"\"\n \n @staticmethod\n- def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]:\n- ctx.save_for_backward(input)\n+ def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]: # type: ignore\n output = [torch.empty_like(input) for _ in range(dist.get_world_size())]\n dist.all_gather(output, input)\n return tuple(output)\n \n @staticmethod\n- def backward(ctx, *grads: torch.Tensor) -> torch.Tensor:\n- (input,) = ctx.saved_tensors\n- grad_out = torch.empty_like(input)\n- grad_out[:] = grads[dist.get_rank()]\n+ def backward(ctx, *grads) -> torch.Tensor: # type: ignore\n+ all_gradients = torch.stack(grads)\n+ dist.all_reduce(all_gradients)\n+ grad_out = all_gradients[dist.get_rank()]\n return grad_out\n \n \n@@ -39,7 +39,7 @@\n \n def gather(input: torch.Tensor) -> Tuple[torch.Tensor]:\n \"\"\"Gathers this tensor from all processes. Supports backprop.\"\"\"\n- return GatherLayer.apply(input)\n+ return GatherLayer.apply(input) # type: ignore[no-any-return]\n \n \n def eye_rank(n: int, device: Optional[torch.device] = None) -> torch.Tensor:\n@@ -70,7 +70,10 @@\n return diag_mask\n \n \n-def rank_zero_only(fn):\n+R = TypeVar(\"R\")\n+\n+\n+def rank_zero_only(fn: Callable[..., R]) -> Callable[..., Optional[R]]:\n \"\"\"Decorator that only runs the function on the process with rank 0.\n \n Example:\n@@ -79,17 +82,17 @@\n >>> print(message)\n >>>\n >>> print_rank_zero(\"Hello from rank 0!\")\n-\n \"\"\"\n \n- def wrapped(*args, **kwargs):\n+ def wrapped(*args: Any, **kwargs: Any) -> Optional[R]:\n if rank() == 0:\n return fn(*args, **kwargs)\n+ return None\n \n return wrapped\n \n \n @rank_zero_only\n-def print_rank_zero(*args, **kwargs) -> None:\n+def print_rank_zero(*args: Any, **kwargs: Any) -> None: # type: ignore[misc]\n \"\"\"Equivalent to print, but only runs on the process with rank 0.\"\"\"\n print(*args, **kwargs)\n", "issue": "Bug in `GatherLayer.backward`\nHi,\r\n\r\nWe've been implementing a model at [cellarium-ml](https://github.com/cellarium-ai/cellarium-ml) using your `NTXentLoss`. Comparing the model training with a single GPU and two GPUs we noticed that they do not match. By investigating it we found an apparent bug in the `GatherLayer.backward` where gradients are not sum-reduced over GPUs. Here is our fixed version (https://github.com/cellarium-ai/cellarium-ml/blob/main/cellarium/ml/distributed/gather.py#L17-L21):\r\n\r\n```py\r\n @staticmethod\r\n def backward(ctx, *grads) -> torch.Tensor:\r\n grad_out = grads[dist.get_rank()].contiguous()\r\n dist.all_reduce(grad_out, op=dist.ReduceOp.SUM)\r\n return grad_out\r\n```\r\n\r\nand the [test](https://github.com/cellarium-ai/cellarium-ml/blob/main/tests/distributed/test_gather.py) we wrote. Would you agree that this is indeed a bug? I would be happy to contribute a PR with the fix.\n", "before_files": [{"content": "from typing import Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\n\n\nclass GatherLayer(torch.autograd.Function):\n \"\"\"Gather tensors from all processes, supporting backward propagation.\n\n This code was taken and adapted from here:\n https://github.com/Spijkervet/SimCLR\n\n \"\"\"\n\n @staticmethod\n def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]:\n ctx.save_for_backward(input)\n output = [torch.empty_like(input) for _ in range(dist.get_world_size())]\n dist.all_gather(output, input)\n return tuple(output)\n\n @staticmethod\n def backward(ctx, *grads: torch.Tensor) -> torch.Tensor:\n (input,) = ctx.saved_tensors\n grad_out = torch.empty_like(input)\n grad_out[:] = grads[dist.get_rank()]\n return grad_out\n\n\ndef rank() -> int:\n \"\"\"Returns the rank of the current process.\"\"\"\n return dist.get_rank() if dist.is_initialized() else 0\n\n\ndef world_size() -> int:\n \"\"\"Returns the current world size (number of distributed processes).\"\"\"\n return dist.get_world_size() if dist.is_initialized() else 1\n\n\ndef gather(input: torch.Tensor) -> Tuple[torch.Tensor]:\n \"\"\"Gathers this tensor from all processes. Supports backprop.\"\"\"\n return GatherLayer.apply(input)\n\n\ndef eye_rank(n: int, device: Optional[torch.device] = None) -> torch.Tensor:\n \"\"\"Returns an (n, n * world_size) zero matrix with the diagonal for the rank\n of this process set to 1.\n\n Example output where n=3, the current process has rank 1, and there are\n 4 processes in total:\n\n rank0 rank1 rank2 rank3\n 0 0 0 | 1 0 0 | 0 0 0 | 0 0 0\n 0 0 0 | 0 1 0 | 0 0 0 | 0 0 0\n 0 0 0 | 0 0 1 | 0 0 0 | 0 0 0\n\n Equivalent to torch.eye for undistributed settings or if world size == 1.\n\n Args:\n n:\n Size of the square matrix on a single process.\n device:\n Device on which the matrix should be created.\n\n \"\"\"\n rows = torch.arange(n, device=device, dtype=torch.long)\n cols = rows + rank() * n\n diag_mask = torch.zeros((n, n * world_size()), dtype=torch.bool)\n diag_mask[(rows, cols)] = True\n return diag_mask\n\n\ndef rank_zero_only(fn):\n \"\"\"Decorator that only runs the function on the process with rank 0.\n\n Example:\n >>> @rank_zero_only\n >>> def print_rank_zero(message: str):\n >>> print(message)\n >>>\n >>> print_rank_zero(\"Hello from rank 0!\")\n\n \"\"\"\n\n def wrapped(*args, **kwargs):\n if rank() == 0:\n return fn(*args, **kwargs)\n\n return wrapped\n\n\n@rank_zero_only\ndef print_rank_zero(*args, **kwargs) -> None:\n \"\"\"Equivalent to print, but only runs on the process with rank 0.\"\"\"\n print(*args, **kwargs)\n", "path": "lightly/utils/dist.py"}]}
1,691
724
gh_patches_debug_23382
rasdani/github-patches
git_diff
coala__coala-1290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `DocstyleDefinition`: Accept a single marker set also Via the normal constructor or a class method. </issue> <code> [start of coalib/bearlib/languages/documentation/DocstyleDefinition.py] 1 import os.path 2 3 from coalib.misc.Decorators import generate_eq, generate_repr, enforce_signature 4 from coalib.parsing.ConfParser import ConfParser 5 6 7 @generate_repr() 8 @generate_eq("language", "docstyle", "markers") 9 class DocstyleDefinition: 10 """ 11 The DocstyleDefinition class holds values that identify a certain type of 12 documentation comment (for which language, documentation style/tool used 13 etc.). 14 """ 15 16 @enforce_signature 17 def __init__(self, language: str, docstyle: str, markers): 18 """ 19 Instantiates a new DocstyleDefinition. 20 21 :param language: The case insensitive programming language of the 22 documentation comment, e.g. `"CPP"` for C++ or 23 `"PYTHON3"`. 24 :param docstyle: The case insensitive documentation style/tool used 25 to document code, e.g. `"default"` or `"doxygen"`. 26 :param markers: An iterable of marker/delimiter string iterables that 27 identify a documentation comment. See `markers` 28 property for more details on markers. 29 """ 30 self._language = language.lower() 31 self._docstyle = docstyle.lower() 32 self._markers = tuple(tuple(marker_set) for marker_set in markers) 33 34 # Check marker set dimensions. 35 for marker_set in self._markers: 36 length = len(marker_set) 37 if length != 3: 38 raise ValueError("Length of a given marker set was not 3 (was " 39 "actually {}).".format(length)) 40 41 @property 42 def language(self): 43 """ 44 The programming language. 45 46 :return: A lower-case string defining the programming language (i.e. 47 "cpp" or "python"). 48 """ 49 return self._language 50 51 @property 52 def docstyle(self): 53 """ 54 The documentation style/tool used to document code. 55 56 :return: A lower-case string defining the docstyle (i.e. "default" or 57 "doxygen"). 58 """ 59 return self._docstyle 60 61 @property 62 def markers(self): 63 """ 64 A tuple of marker sets that identify a documentation comment. 65 66 Marker sets consist of 3 entries where the first is the start-marker, 67 the second one the each-line marker and the last one the end-marker. 68 For example a marker tuple with a single marker set 69 `(("/**", "*", "*/"),)` would match following documentation comment: 70 71 ``` 72 /** 73 * This is documentation. 74 */ 75 ``` 76 77 It's also possible to supply an empty each-line marker 78 (`("/**", "", "*/")`): 79 80 ``` 81 /** 82 This is more documentation. 83 */ 84 ``` 85 86 Markers are matched "greedy", that means it will match as many 87 each-line markers as possible. I.e. for `("///", "///", "///")`): 88 89 ``` 90 /// Brief documentation. 91 /// 92 /// Detailed documentation. 93 ``` 94 95 :return: A tuple of marker/delimiter string tuples that identify a 96 documentation comment. 97 """ 98 return self._markers 99 100 @classmethod 101 @enforce_signature 102 def load(cls, language: str, docstyle: str): 103 """ 104 Loads a `DocstyleDefinition` from the coala docstyle definition files. 105 106 This function considers all settings inside the according coalang-files 107 as markers. 108 109 :param language: The case insensitive programming language of 110 the documentation comment as a string. 111 :param docstyle: The case insensitive documentation 112 style/tool used to document code, e.g. 113 `"default"` or `"doxygen"`. 114 :raises FileNotFoundError: Raised when the given docstyle was not 115 found. 116 :raises KeyError: Raised when the given language is not 117 defined for given docstyle. 118 :return: The `DocstyleDefinition` for given language 119 and docstyle. 120 """ 121 122 docstyle = docstyle.lower() 123 124 language_config_parser = ConfParser(remove_empty_iter_elements=False) 125 try: 126 docstyle_settings = language_config_parser.parse( 127 os.path.dirname(__file__) + "/" + docstyle + ".coalang") 128 except FileNotFoundError: 129 raise FileNotFoundError("Docstyle definition " + repr(docstyle) + 130 " not found.") 131 132 language = language.lower() 133 134 try: 135 docstyle_settings = docstyle_settings[language] 136 except KeyError: 137 raise KeyError("Language {} is not defined for docstyle {}." 138 .format(repr(language), repr(docstyle))) 139 140 marker_sets = (tuple(value) 141 for key, value in 142 filter(lambda kv: not kv[0].startswith("comment"), 143 docstyle_settings.contents.items())) 144 145 return cls(language, docstyle, marker_sets) 146 [end of coalib/bearlib/languages/documentation/DocstyleDefinition.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/coalib/bearlib/languages/documentation/DocstyleDefinition.py b/coalib/bearlib/languages/documentation/DocstyleDefinition.py --- a/coalib/bearlib/languages/documentation/DocstyleDefinition.py +++ b/coalib/bearlib/languages/documentation/DocstyleDefinition.py @@ -23,12 +23,19 @@ `"PYTHON3"`. :param docstyle: The case insensitive documentation style/tool used to document code, e.g. `"default"` or `"doxygen"`. - :param markers: An iterable of marker/delimiter string iterables that + :param markers: An iterable of marker/delimiter string iterables + or a single marker/delimiter string iterable that identify a documentation comment. See `markers` property for more details on markers. """ self._language = language.lower() self._docstyle = docstyle.lower() + + # Check and modify tuple if only one marker_set exists. + markers = tuple(markers) + if len(markers) == 3 and all(isinstance(x, str) for x in markers): + markers = (markers,) + self._markers = tuple(tuple(marker_set) for marker_set in markers) # Check marker set dimensions.
{"golden_diff": "diff --git a/coalib/bearlib/languages/documentation/DocstyleDefinition.py b/coalib/bearlib/languages/documentation/DocstyleDefinition.py\n--- a/coalib/bearlib/languages/documentation/DocstyleDefinition.py\n+++ b/coalib/bearlib/languages/documentation/DocstyleDefinition.py\n@@ -23,12 +23,19 @@\n `\"PYTHON3\"`.\n :param docstyle: The case insensitive documentation style/tool used\n to document code, e.g. `\"default\"` or `\"doxygen\"`.\n- :param markers: An iterable of marker/delimiter string iterables that\n+ :param markers: An iterable of marker/delimiter string iterables\n+ or a single marker/delimiter string iterable that\n identify a documentation comment. See `markers`\n property for more details on markers.\n \"\"\"\n self._language = language.lower()\n self._docstyle = docstyle.lower()\n+\n+ # Check and modify tuple if only one marker_set exists.\n+ markers = tuple(markers)\n+ if len(markers) == 3 and all(isinstance(x, str) for x in markers):\n+ markers = (markers,)\n+\n self._markers = tuple(tuple(marker_set) for marker_set in markers)\n \n # Check marker set dimensions.\n", "issue": "`DocstyleDefinition`: Accept a single marker set also\nVia the normal constructor or a class method.\n\n", "before_files": [{"content": "import os.path\n\nfrom coalib.misc.Decorators import generate_eq, generate_repr, enforce_signature\nfrom coalib.parsing.ConfParser import ConfParser\n\n\n@generate_repr()\n@generate_eq(\"language\", \"docstyle\", \"markers\")\nclass DocstyleDefinition:\n \"\"\"\n The DocstyleDefinition class holds values that identify a certain type of\n documentation comment (for which language, documentation style/tool used\n etc.).\n \"\"\"\n\n @enforce_signature\n def __init__(self, language: str, docstyle: str, markers):\n \"\"\"\n Instantiates a new DocstyleDefinition.\n\n :param language: The case insensitive programming language of the\n documentation comment, e.g. `\"CPP\"` for C++ or\n `\"PYTHON3\"`.\n :param docstyle: The case insensitive documentation style/tool used\n to document code, e.g. `\"default\"` or `\"doxygen\"`.\n :param markers: An iterable of marker/delimiter string iterables that\n identify a documentation comment. See `markers`\n property for more details on markers.\n \"\"\"\n self._language = language.lower()\n self._docstyle = docstyle.lower()\n self._markers = tuple(tuple(marker_set) for marker_set in markers)\n\n # Check marker set dimensions.\n for marker_set in self._markers:\n length = len(marker_set)\n if length != 3:\n raise ValueError(\"Length of a given marker set was not 3 (was \"\n \"actually {}).\".format(length))\n\n @property\n def language(self):\n \"\"\"\n The programming language.\n\n :return: A lower-case string defining the programming language (i.e.\n \"cpp\" or \"python\").\n \"\"\"\n return self._language\n\n @property\n def docstyle(self):\n \"\"\"\n The documentation style/tool used to document code.\n\n :return: A lower-case string defining the docstyle (i.e. \"default\" or\n \"doxygen\").\n \"\"\"\n return self._docstyle\n\n @property\n def markers(self):\n \"\"\"\n A tuple of marker sets that identify a documentation comment.\n\n Marker sets consist of 3 entries where the first is the start-marker,\n the second one the each-line marker and the last one the end-marker.\n For example a marker tuple with a single marker set\n `((\"/**\", \"*\", \"*/\"),)` would match following documentation comment:\n\n ```\n /**\n * This is documentation.\n */\n ```\n\n It's also possible to supply an empty each-line marker\n (`(\"/**\", \"\", \"*/\")`):\n\n ```\n /**\n This is more documentation.\n */\n ```\n\n Markers are matched \"greedy\", that means it will match as many\n each-line markers as possible. I.e. for `(\"///\", \"///\", \"///\")`):\n\n ```\n /// Brief documentation.\n ///\n /// Detailed documentation.\n ```\n\n :return: A tuple of marker/delimiter string tuples that identify a\n documentation comment.\n \"\"\"\n return self._markers\n\n @classmethod\n @enforce_signature\n def load(cls, language: str, docstyle: str):\n \"\"\"\n Loads a `DocstyleDefinition` from the coala docstyle definition files.\n\n This function considers all settings inside the according coalang-files\n as markers.\n\n :param language: The case insensitive programming language of\n the documentation comment as a string.\n :param docstyle: The case insensitive documentation\n style/tool used to document code, e.g.\n `\"default\"` or `\"doxygen\"`.\n :raises FileNotFoundError: Raised when the given docstyle was not\n found.\n :raises KeyError: Raised when the given language is not\n defined for given docstyle.\n :return: The `DocstyleDefinition` for given language\n and docstyle.\n \"\"\"\n\n docstyle = docstyle.lower()\n\n language_config_parser = ConfParser(remove_empty_iter_elements=False)\n try:\n docstyle_settings = language_config_parser.parse(\n os.path.dirname(__file__) + \"/\" + docstyle + \".coalang\")\n except FileNotFoundError:\n raise FileNotFoundError(\"Docstyle definition \" + repr(docstyle) +\n \" not found.\")\n\n language = language.lower()\n\n try:\n docstyle_settings = docstyle_settings[language]\n except KeyError:\n raise KeyError(\"Language {} is not defined for docstyle {}.\"\n .format(repr(language), repr(docstyle)))\n\n marker_sets = (tuple(value)\n for key, value in\n filter(lambda kv: not kv[0].startswith(\"comment\"),\n docstyle_settings.contents.items()))\n\n return cls(language, docstyle, marker_sets)\n", "path": "coalib/bearlib/languages/documentation/DocstyleDefinition.py"}]}
1,917
285
gh_patches_debug_4297
rasdani/github-patches
git_diff
NVIDIA-Merlin__NVTabular-1312
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Getting error when loading the TF4Rec PyTorch model to the TIS **Describe the bug** I am getting the following error when I load a trained TF4Rec PyTorch to TIS: ``` | t4r_pytorch_pt | 1 | UNAVAILABLE: Internal: ImportError: cannot import name '_convert_string2pytorch_dty | | | | pe' from 'nvtabular.inference.triton' (/nvtabular/nvtabular/inference/triton/__init | | | | __.py) | | | | | | | | At: | | | | /workspace/models/t4r_pytorch_pt/1/model.py(42): <module> | | | | <frozen importlib._bootstrap>(219): _call_with_frames_removed | | | | <frozen importlib._bootstrap_external>(848): exec_module | | | | <frozen importlib._bootstrap>(686): _load_unlocked | | | | <frozen importlib._bootstrap>(975): _find_and_load_unlocked | | | | <frozen importlib._bootstrap>(991): _find_and_load | +-----------------+---------+--------------------------------------------------------- ``` **Steps/Code to reproduce bug** Run the 02 and 03 notebooks Transformers4Rec tutorial [notebooks](https://github.com/NVIDIA-Merlin/Transformers4Rec/tree/main/examples/tutorial) to train the model. Then serve the model to TIS based on the instructions given on the [inference notebook](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/tutorial/04-Inference-with-Triton.ipynb). `Oct-2019.parquet` Dataset can be downloaded from here: https://drive.google.com/drive/u/0/folders/1nTuG6UHWOEaZnBJj7YSIVvnphE1zGc1h **Expected behavior** Model should be loaded to the TIS without issue. **Environment details (please complete the following information):** - Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)] : Docker - Method of NVTabular install: [conda, Docker, or from source]: Docker `merlin-inference:21.11` and `merlin-pytoch-training:21.11` ` Please do `git pull origin main` && `pip install -e .` to pull the latest main branch. - If method of install is [Docker], provide `docker pull` & `docker run` commands used This issue was also submitted by a user on TF4Rec GH repo- https://github.com/NVIDIA-Merlin/Transformers4Rec/issues/339 </issue> <code> [start of nvtabular/inference/triton/__init__.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 import json 16 import os 17 18 import pandas as pd 19 20 # this needs to be before any modules that import protobuf 21 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" 22 23 import tritonclient.grpc as grpcclient # noqa 24 from tritonclient.utils import np_to_triton_dtype # noqa 25 26 from nvtabular.dispatch import _is_list_dtype, _is_string_dtype, _make_df # noqa 27 from nvtabular.inference.triton.ensemble import ( # noqa 28 export_hugectr_ensemble, 29 export_pytorch_ensemble, 30 export_tensorflow_ensemble, 31 generate_hugectr_model, 32 generate_nvtabular_model, 33 ) 34 35 36 def convert_df_to_triton_input(column_names, batch, input_class=grpcclient.InferInput): 37 columns = [(col, batch[col]) for col in column_names] 38 inputs = [] 39 for i, (name, col) in enumerate(columns): 40 if _is_list_dtype(col): 41 if isinstance(col, pd.Series): 42 raise ValueError("this function doesn't support CPU list values yet") 43 inputs.append( 44 _convert_column_to_triton_input( 45 col._column.offsets.values_host.astype("int64"), name + "__nnzs", input_class 46 ) 47 ) 48 inputs.append( 49 _convert_column_to_triton_input( 50 col.list.leaves.values_host.astype("int64"), name + "__values", input_class 51 ) 52 ) 53 else: 54 values = col.values if isinstance(col, pd.Series) else col.values_host 55 inputs.append(_convert_column_to_triton_input(values, name, input_class)) 56 return inputs 57 58 59 def _convert_column_to_triton_input(col, name, input_class=grpcclient.InferInput): 60 col = col.reshape(len(col), 1) 61 input_tensor = input_class(name, col.shape, np_to_triton_dtype(col.dtype)) 62 input_tensor.set_data_from_numpy(col) 63 return input_tensor 64 65 66 def convert_triton_output_to_df(columns, response): 67 return _make_df({col: response.as_numpy(col) for col in columns}) 68 69 70 def get_column_types(path): 71 return json.load(open(os.path.join(path, "column_types.json"))) 72 73 74 def _convert_tensor(t): 75 out = t.as_numpy() 76 if len(out.shape) == 2: 77 out = out[:, 0] 78 # cudf doesn't seem to handle dtypes like |S15 or object that well 79 if _is_string_dtype(out.dtype): 80 out = out.astype("str") 81 return out 82 [end of nvtabular/inference/triton/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvtabular/inference/triton/__init__.py b/nvtabular/inference/triton/__init__.py --- a/nvtabular/inference/triton/__init__.py +++ b/nvtabular/inference/triton/__init__.py @@ -25,6 +25,7 @@ from nvtabular.dispatch import _is_list_dtype, _is_string_dtype, _make_df # noqa from nvtabular.inference.triton.ensemble import ( # noqa + _convert_string2pytorch_dtype, export_hugectr_ensemble, export_pytorch_ensemble, export_tensorflow_ensemble,
{"golden_diff": "diff --git a/nvtabular/inference/triton/__init__.py b/nvtabular/inference/triton/__init__.py\n--- a/nvtabular/inference/triton/__init__.py\n+++ b/nvtabular/inference/triton/__init__.py\n@@ -25,6 +25,7 @@\n \n from nvtabular.dispatch import _is_list_dtype, _is_string_dtype, _make_df # noqa\n from nvtabular.inference.triton.ensemble import ( # noqa\n+ _convert_string2pytorch_dtype,\n export_hugectr_ensemble,\n export_pytorch_ensemble,\n export_tensorflow_ensemble,\n", "issue": "[BUG] Getting error when loading the TF4Rec PyTorch model to the TIS\n**Describe the bug**\r\nI am getting the following error when I load a trained TF4Rec PyTorch to TIS:\r\n\r\n```\r\n | t4r_pytorch_pt | 1 | UNAVAILABLE: Internal: ImportError: cannot import name '_convert_string2pytorch_dty |\r\n| | | pe' from 'nvtabular.inference.triton' (/nvtabular/nvtabular/inference/triton/__init |\r\n| | | __.py) |\r\n| | | |\r\n| | | At: |\r\n| | | /workspace/models/t4r_pytorch_pt/1/model.py(42): <module> |\r\n| | | <frozen importlib._bootstrap>(219): _call_with_frames_removed |\r\n| | | <frozen importlib._bootstrap_external>(848): exec_module |\r\n| | | <frozen importlib._bootstrap>(686): _load_unlocked |\r\n| | | <frozen importlib._bootstrap>(975): _find_and_load_unlocked |\r\n| | | <frozen importlib._bootstrap>(991): _find_and_load |\r\n+-----------------+---------+---------------------------------------------------------\r\n```\r\n\r\n**Steps/Code to reproduce bug**\r\n\r\nRun the 02 and 03 notebooks Transformers4Rec tutorial [notebooks](https://github.com/NVIDIA-Merlin/Transformers4Rec/tree/main/examples/tutorial) to train the model. Then serve the model to TIS based on the instructions given on the [inference notebook](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/tutorial/04-Inference-with-Triton.ipynb).\r\n\r\n`Oct-2019.parquet` Dataset can be downloaded from here: https://drive.google.com/drive/u/0/folders/1nTuG6UHWOEaZnBJj7YSIVvnphE1zGc1h\r\n\r\n**Expected behavior**\r\nModel should be loaded to the TIS without issue.\r\n\r\n**Environment details (please complete the following information):**\r\n - Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)] : Docker\r\n - Method of NVTabular install: [conda, Docker, or from source]: Docker `merlin-inference:21.11` and `merlin-pytoch-training:21.11` `\r\n Please do `git pull origin main` && `pip install -e .` to pull the latest main branch.\r\n - If method of install is [Docker], provide `docker pull` & `docker run` commands used\r\n \r\nThis issue was also submitted by a user on TF4Rec GH repo- https://github.com/NVIDIA-Merlin/Transformers4Rec/issues/339\r\n\n", "before_files": [{"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport json\nimport os\n\nimport pandas as pd\n\n# this needs to be before any modules that import protobuf\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n\nimport tritonclient.grpc as grpcclient # noqa\nfrom tritonclient.utils import np_to_triton_dtype # noqa\n\nfrom nvtabular.dispatch import _is_list_dtype, _is_string_dtype, _make_df # noqa\nfrom nvtabular.inference.triton.ensemble import ( # noqa\n export_hugectr_ensemble,\n export_pytorch_ensemble,\n export_tensorflow_ensemble,\n generate_hugectr_model,\n generate_nvtabular_model,\n)\n\n\ndef convert_df_to_triton_input(column_names, batch, input_class=grpcclient.InferInput):\n columns = [(col, batch[col]) for col in column_names]\n inputs = []\n for i, (name, col) in enumerate(columns):\n if _is_list_dtype(col):\n if isinstance(col, pd.Series):\n raise ValueError(\"this function doesn't support CPU list values yet\")\n inputs.append(\n _convert_column_to_triton_input(\n col._column.offsets.values_host.astype(\"int64\"), name + \"__nnzs\", input_class\n )\n )\n inputs.append(\n _convert_column_to_triton_input(\n col.list.leaves.values_host.astype(\"int64\"), name + \"__values\", input_class\n )\n )\n else:\n values = col.values if isinstance(col, pd.Series) else col.values_host\n inputs.append(_convert_column_to_triton_input(values, name, input_class))\n return inputs\n\n\ndef _convert_column_to_triton_input(col, name, input_class=grpcclient.InferInput):\n col = col.reshape(len(col), 1)\n input_tensor = input_class(name, col.shape, np_to_triton_dtype(col.dtype))\n input_tensor.set_data_from_numpy(col)\n return input_tensor\n\n\ndef convert_triton_output_to_df(columns, response):\n return _make_df({col: response.as_numpy(col) for col in columns})\n\n\ndef get_column_types(path):\n return json.load(open(os.path.join(path, \"column_types.json\")))\n\n\ndef _convert_tensor(t):\n out = t.as_numpy()\n if len(out.shape) == 2:\n out = out[:, 0]\n # cudf doesn't seem to handle dtypes like |S15 or object that well\n if _is_string_dtype(out.dtype):\n out = out.astype(\"str\")\n return out\n", "path": "nvtabular/inference/triton/__init__.py"}]}
2,027
146
gh_patches_debug_28929
rasdani/github-patches
git_diff
iterative__dvc-7729
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dvc list: Error on empty directory. # Bug Report Got error message on an empty directory, shouldn't it show nothing? like ls command. <!-- ## Issue name Issue names must follow the pattern `command: description` where the command is the dvc command that you are trying to run. The description should describe the consequence of the bug. Example: `repro: doesn't detect input changes` --> ## Description ![image](https://user-images.githubusercontent.com/6745454/115115559-bfcb1d80-9fc7-11eb-82d4-bb04d0413795.png) Error when list a empty path, strange behavior. Might relate to https://github.com/iterative/dvc/blob/daf07451f8e8f3e76a791c696b0ea175e8ed3ac1/dvc/repo/ls.py#L40-L41 <!-- A clear and concise description of what the bug is. --> ### Reproduce 1. git init 2. dvc init 3. mkdir empty 4. dvc list . empty <!-- Step list of how to reproduce the bug --> <!-- Example: 1. dvc init 2. Copy dataset.zip to the directory 3. dvc add dataset.zip 4. dvc run -d dataset.zip -o model ./train.sh 5. modify dataset.zip 6. dvc repro --> ### Expected Show nothing like ls command ![image](https://user-images.githubusercontent.com/6745454/115115651-2a7c5900-9fc8-11eb-9a86-884626d240ea.png) <!-- A clear and concise description of what you expect to happen. --> ### Environment information DVC version: 2.0.17+7e4851 --------------------------------- Platform: Python 3.8.8 on macOS-10.16-x86_64-i386-64bit Supports: All remotes Cache types: <https://error.dvc.org/no-dvc-cache> Caches: local Remotes: None Workspace directory: apfs on /dev/disk3s1s1 Repo: dvc, git <!-- This is required to ensure that we can reproduce the bug. --> **Output of `dvc doctor`:** ```console $ dvc doctor ``` **Additional Information (if any):** <!-- Please check https://github.com/iterative/dvc/wiki/Debugging-DVC on ways to gather more information regarding the issue. If applicable, please also provide a `--verbose` output of the command, eg: `dvc add --verbose`. If the issue is regarding the performance, please attach the profiling information and the benchmark comparisons. --> </issue> <code> [start of dvc/repo/ls.py] 1 import os 2 from itertools import chain 3 4 from dvc.exceptions import PathMissingError 5 6 7 def ls(url, path=None, rev=None, recursive=None, dvc_only=False): 8 """Methods for getting files and outputs for the repo. 9 10 Args: 11 url (str): the repo url 12 path (str, optional): relative path into the repo 13 rev (str, optional): SHA commit, branch or tag name 14 recursive (bool, optional): recursively walk the repo 15 dvc_only (bool, optional): show only DVC-artifacts 16 17 Returns: 18 list of `entry` 19 20 Notes: 21 `entry` is a dictionary with structure 22 { 23 "path": str, 24 "isout": bool, 25 "isdir": bool, 26 "isexec": bool, 27 } 28 """ 29 from . import Repo 30 31 with Repo.open(url, rev=rev, subrepos=True, uninitialized=True) as repo: 32 path = path or "" 33 34 ret = _ls(repo.repo_fs, path, recursive, dvc_only) 35 36 if path and not ret: 37 raise PathMissingError(path, repo, dvc_only=dvc_only) 38 39 ret_list = [] 40 for path, info in ret.items(): 41 info["path"] = path 42 ret_list.append(info) 43 ret_list.sort(key=lambda f: f["path"]) 44 return ret_list 45 46 47 def _ls(fs, path, recursive=None, dvc_only=False): 48 fs_path = fs.from_os_path(path) 49 50 try: 51 fs_path = fs.info(fs_path)["name"] 52 except FileNotFoundError: 53 return {} 54 55 infos = {} 56 for root, dirs, files in fs.walk( 57 fs_path, dvcfiles=True, dvc_only=dvc_only 58 ): 59 entries = chain(files, dirs) if not recursive else files 60 61 for entry in entries: 62 entry_fs_path = fs.path.join(root, entry) 63 relparts = fs.path.relparts(entry_fs_path, fs_path) 64 name = os.path.join(*relparts) 65 infos[name] = fs.info(entry_fs_path) 66 67 if not recursive: 68 break 69 70 if not infos and fs.isfile(fs_path): 71 infos[os.path.basename(path)] = fs.info(fs_path) 72 73 ret = {} 74 for name, info in infos.items(): 75 dvc_info = info.get("dvc_info", {}) 76 if dvc_info.get("outs") or not dvc_only: 77 ret[name] = { 78 "isout": dvc_info.get("isout", False), 79 "isdir": info["type"] == "directory", 80 "isexec": info.get("isexec", False), 81 } 82 83 return ret 84 [end of dvc/repo/ls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/repo/ls.py b/dvc/repo/ls.py --- a/dvc/repo/ls.py +++ b/dvc/repo/ls.py @@ -1,10 +1,22 @@ import os from itertools import chain +from typing import TYPE_CHECKING, Optional from dvc.exceptions import PathMissingError +if TYPE_CHECKING: + from dvc.fs.repo import RepoFileSystem -def ls(url, path=None, rev=None, recursive=None, dvc_only=False): + from . import Repo + + +def ls( + url: str, + path: Optional[str] = None, + rev: str = None, + recursive: bool = None, + dvc_only: bool = False, +): """Methods for getting files and outputs for the repo. Args: @@ -31,10 +43,7 @@ with Repo.open(url, rev=rev, subrepos=True, uninitialized=True) as repo: path = path or "" - ret = _ls(repo.repo_fs, path, recursive, dvc_only) - - if path and not ret: - raise PathMissingError(path, repo, dvc_only=dvc_only) + ret = _ls(repo, path, recursive, dvc_only) ret_list = [] for path, info in ret.items(): @@ -44,13 +53,16 @@ return ret_list -def _ls(fs, path, recursive=None, dvc_only=False): +def _ls( + repo: "Repo", path: str, recursive: bool = None, dvc_only: bool = False +): + fs: "RepoFileSystem" = repo.repo_fs fs_path = fs.from_os_path(path) try: fs_path = fs.info(fs_path)["name"] except FileNotFoundError: - return {} + raise PathMissingError(path, repo, dvc_only=dvc_only) infos = {} for root, dirs, files in fs.walk(
{"golden_diff": "diff --git a/dvc/repo/ls.py b/dvc/repo/ls.py\n--- a/dvc/repo/ls.py\n+++ b/dvc/repo/ls.py\n@@ -1,10 +1,22 @@\n import os\n from itertools import chain\n+from typing import TYPE_CHECKING, Optional\n \n from dvc.exceptions import PathMissingError\n \n+if TYPE_CHECKING:\n+ from dvc.fs.repo import RepoFileSystem\n \n-def ls(url, path=None, rev=None, recursive=None, dvc_only=False):\n+ from . import Repo\n+\n+\n+def ls(\n+ url: str,\n+ path: Optional[str] = None,\n+ rev: str = None,\n+ recursive: bool = None,\n+ dvc_only: bool = False,\n+):\n \"\"\"Methods for getting files and outputs for the repo.\n \n Args:\n@@ -31,10 +43,7 @@\n with Repo.open(url, rev=rev, subrepos=True, uninitialized=True) as repo:\n path = path or \"\"\n \n- ret = _ls(repo.repo_fs, path, recursive, dvc_only)\n-\n- if path and not ret:\n- raise PathMissingError(path, repo, dvc_only=dvc_only)\n+ ret = _ls(repo, path, recursive, dvc_only)\n \n ret_list = []\n for path, info in ret.items():\n@@ -44,13 +53,16 @@\n return ret_list\n \n \n-def _ls(fs, path, recursive=None, dvc_only=False):\n+def _ls(\n+ repo: \"Repo\", path: str, recursive: bool = None, dvc_only: bool = False\n+):\n+ fs: \"RepoFileSystem\" = repo.repo_fs\n fs_path = fs.from_os_path(path)\n \n try:\n fs_path = fs.info(fs_path)[\"name\"]\n except FileNotFoundError:\n- return {}\n+ raise PathMissingError(path, repo, dvc_only=dvc_only)\n \n infos = {}\n for root, dirs, files in fs.walk(\n", "issue": "dvc list: Error on empty directory. \n# Bug Report\r\n\r\nGot error message on an empty directory, shouldn't it show nothing? like ls command.\r\n\r\n\r\n<!--\r\n## Issue name\r\n\r\nIssue names must follow the pattern `command: description` where the command is the dvc command that you are trying to run. The description should describe the consequence of the bug. \r\n\r\nExample: `repro: doesn't detect input changes`\r\n-->\r\n\r\n## Description\r\n![image](https://user-images.githubusercontent.com/6745454/115115559-bfcb1d80-9fc7-11eb-82d4-bb04d0413795.png)\r\nError when list a empty path, strange behavior.\r\nMight relate to https://github.com/iterative/dvc/blob/daf07451f8e8f3e76a791c696b0ea175e8ed3ac1/dvc/repo/ls.py#L40-L41\r\n\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\n\r\n### Reproduce\r\n\r\n1. git init\r\n2. dvc init\r\n3. mkdir empty\r\n4. dvc list . empty\r\n\r\n<!--\r\nStep list of how to reproduce the bug\r\n-->\r\n\r\n<!--\r\nExample:\r\n\r\n1. dvc init\r\n2. Copy dataset.zip to the directory\r\n3. dvc add dataset.zip\r\n4. dvc run -d dataset.zip -o model ./train.sh\r\n5. modify dataset.zip\r\n6. dvc repro\r\n-->\r\n\r\n### Expected\r\nShow nothing like ls command\r\n![image](https://user-images.githubusercontent.com/6745454/115115651-2a7c5900-9fc8-11eb-9a86-884626d240ea.png)\r\n\r\n<!--\r\nA clear and concise description of what you expect to happen.\r\n-->\r\n\r\n### Environment information\r\nDVC version: 2.0.17+7e4851\r\n---------------------------------\r\nPlatform: Python 3.8.8 on macOS-10.16-x86_64-i386-64bit\r\nSupports: All remotes\r\nCache types: <https://error.dvc.org/no-dvc-cache>\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: apfs on /dev/disk3s1s1\r\nRepo: dvc, git\r\n<!--\r\nThis is required to ensure that we can reproduce the bug.\r\n-->\r\n\r\n**Output of `dvc doctor`:**\r\n\r\n```console\r\n$ dvc doctor\r\n```\r\n\r\n**Additional Information (if any):**\r\n\r\n<!--\r\nPlease check https://github.com/iterative/dvc/wiki/Debugging-DVC on ways to gather more information regarding the issue.\r\n\r\nIf applicable, please also provide a `--verbose` output of the command, eg: `dvc add --verbose`.\r\nIf the issue is regarding the performance, please attach the profiling information and the benchmark comparisons.\r\n-->\r\n\n", "before_files": [{"content": "import os\nfrom itertools import chain\n\nfrom dvc.exceptions import PathMissingError\n\n\ndef ls(url, path=None, rev=None, recursive=None, dvc_only=False):\n \"\"\"Methods for getting files and outputs for the repo.\n\n Args:\n url (str): the repo url\n path (str, optional): relative path into the repo\n rev (str, optional): SHA commit, branch or tag name\n recursive (bool, optional): recursively walk the repo\n dvc_only (bool, optional): show only DVC-artifacts\n\n Returns:\n list of `entry`\n\n Notes:\n `entry` is a dictionary with structure\n {\n \"path\": str,\n \"isout\": bool,\n \"isdir\": bool,\n \"isexec\": bool,\n }\n \"\"\"\n from . import Repo\n\n with Repo.open(url, rev=rev, subrepos=True, uninitialized=True) as repo:\n path = path or \"\"\n\n ret = _ls(repo.repo_fs, path, recursive, dvc_only)\n\n if path and not ret:\n raise PathMissingError(path, repo, dvc_only=dvc_only)\n\n ret_list = []\n for path, info in ret.items():\n info[\"path\"] = path\n ret_list.append(info)\n ret_list.sort(key=lambda f: f[\"path\"])\n return ret_list\n\n\ndef _ls(fs, path, recursive=None, dvc_only=False):\n fs_path = fs.from_os_path(path)\n\n try:\n fs_path = fs.info(fs_path)[\"name\"]\n except FileNotFoundError:\n return {}\n\n infos = {}\n for root, dirs, files in fs.walk(\n fs_path, dvcfiles=True, dvc_only=dvc_only\n ):\n entries = chain(files, dirs) if not recursive else files\n\n for entry in entries:\n entry_fs_path = fs.path.join(root, entry)\n relparts = fs.path.relparts(entry_fs_path, fs_path)\n name = os.path.join(*relparts)\n infos[name] = fs.info(entry_fs_path)\n\n if not recursive:\n break\n\n if not infos and fs.isfile(fs_path):\n infos[os.path.basename(path)] = fs.info(fs_path)\n\n ret = {}\n for name, info in infos.items():\n dvc_info = info.get(\"dvc_info\", {})\n if dvc_info.get(\"outs\") or not dvc_only:\n ret[name] = {\n \"isout\": dvc_info.get(\"isout\", False),\n \"isdir\": info[\"type\"] == \"directory\",\n \"isexec\": info.get(\"isexec\", False),\n }\n\n return ret\n", "path": "dvc/repo/ls.py"}]}
1,941
449
gh_patches_debug_14771
rasdani/github-patches
git_diff
litestar-org__litestar-992
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: Running `starlite run` after installing starlite[cli] gives error about missing cryptography package The error is here: ``` Traceback (most recent call last): File "C:\Users\hanne\Documents\Programme\analyze-wiktionary\.venv\lib\site-packages\starlite\middleware\session\cookie_backend.py", line 20, in <module> from cryptography.exceptions import InvalidTag ModuleNotFoundError: No module named 'cryptography' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "C:\Python310\lib\runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Python310\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "C:\Users\hanne\Documents\Programme\analyze-wiktionary\.venv\Scripts\starlite.exe\__main__.py", line 4, in <module> File "C:\Users\hanne\Documents\Programme\analyze-wiktionary\.venv\lib\site-packages\starlite\cli.py", line 41, in <module> from starlite.middleware.session import SessionMiddleware File "C:\Users\hanne\Documents\Programme\analyze-wiktionary\.venv\lib\site-packages\starlite\middleware\session\__init__.py", line 2, in <module> from .cookie_backend import ( File "C:\Users\hanne\Documents\Programme\analyze-wiktionary\.venv\lib\site-packages\starlite\middleware\session\cookie_backend.py", line 23, in <module> raise MissingDependencyException("cryptography is not installed") from e starlite.exceptions.base_exceptions.MissingDependencyException: cryptography is not installed ``` I thought it might be a good idea to install the package automatically with the CLI extra. (Or to update the [docs](https://starlite-api.github.io/starlite/usage/19-cli/?h=uvicorn) if I'm missing something). My versions: Windows, Python 3.10, starlite 1.46.0 PS: Thank you all for the great amount of effort you spend on this project! </issue> <code> [start of starlite/middleware/session/__init__.py] 1 from .base import SessionMiddleware 2 from .cookie_backend import ( 3 CookieBackendConfig as SessionCookieConfig, # backwards compatible export 4 ) 5 6 __all__ = [ 7 "SessionMiddleware", 8 "SessionCookieConfig", 9 ] 10 [end of starlite/middleware/session/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlite/middleware/session/__init__.py b/starlite/middleware/session/__init__.py --- a/starlite/middleware/session/__init__.py +++ b/starlite/middleware/session/__init__.py @@ -1,9 +1,27 @@ +from typing import Any + +from starlite.utils import warn_deprecation + from .base import SessionMiddleware -from .cookie_backend import ( - CookieBackendConfig as SessionCookieConfig, # backwards compatible export -) - -__all__ = [ - "SessionMiddleware", - "SessionCookieConfig", -] + + +def __getattr__(name: str) -> Any: + """Provide lazy importing as per https://peps.python.org/pep-0562/""" + + if name != "SessionCookieConfig": + raise AttributeError(f"Module {__package__} has no attribute {name}") + + from .cookie_backend import CookieBackendConfig + + warn_deprecation( + deprecated_name=f"{name} from {__package__}", + kind="import", + alternative="'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'", + version="1.47.0", + ) + + globals()[name] = CookieBackendConfig + return CookieBackendConfig + + +__all__ = ["SessionMiddleware"]
{"golden_diff": "diff --git a/starlite/middleware/session/__init__.py b/starlite/middleware/session/__init__.py\n--- a/starlite/middleware/session/__init__.py\n+++ b/starlite/middleware/session/__init__.py\n@@ -1,9 +1,27 @@\n+from typing import Any\n+\n+from starlite.utils import warn_deprecation\n+\n from .base import SessionMiddleware\n-from .cookie_backend import (\n- CookieBackendConfig as SessionCookieConfig, # backwards compatible export\n-)\n-\n-__all__ = [\n- \"SessionMiddleware\",\n- \"SessionCookieConfig\",\n-]\n+\n+\n+def __getattr__(name: str) -> Any:\n+ \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n+\n+ if name != \"SessionCookieConfig\":\n+ raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n+\n+ from .cookie_backend import CookieBackendConfig\n+\n+ warn_deprecation(\n+ deprecated_name=f\"{name} from {__package__}\",\n+ kind=\"import\",\n+ alternative=\"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\",\n+ version=\"1.47.0\",\n+ )\n+\n+ globals()[name] = CookieBackendConfig\n+ return CookieBackendConfig\n+\n+\n+__all__ = [\"SessionMiddleware\"]\n", "issue": "Bug: Running `starlite run` after installing starlite[cli] gives error about missing cryptography package\nThe error is here:\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\hanne\\Documents\\Programme\\analyze-wiktionary\\.venv\\lib\\site-packages\\starlite\\middleware\\session\\cookie_backend.py\", line 20, \r\nin <module>\r\n from cryptography.exceptions import InvalidTag\r\nModuleNotFoundError: No module named 'cryptography'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Python310\\lib\\runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"C:\\Python310\\lib\\runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\hanne\\Documents\\Programme\\analyze-wiktionary\\.venv\\Scripts\\starlite.exe\\__main__.py\", line 4, in <module>\r\n File \"C:\\Users\\hanne\\Documents\\Programme\\analyze-wiktionary\\.venv\\lib\\site-packages\\starlite\\cli.py\", line 41, in <module>\r\n from starlite.middleware.session import SessionMiddleware\r\n File \"C:\\Users\\hanne\\Documents\\Programme\\analyze-wiktionary\\.venv\\lib\\site-packages\\starlite\\middleware\\session\\__init__.py\", line 2, in <module>\r\n from .cookie_backend import (\r\n File \"C:\\Users\\hanne\\Documents\\Programme\\analyze-wiktionary\\.venv\\lib\\site-packages\\starlite\\middleware\\session\\cookie_backend.py\", line 23, \r\nin <module>\r\n raise MissingDependencyException(\"cryptography is not installed\") from e\r\nstarlite.exceptions.base_exceptions.MissingDependencyException: cryptography is not installed\r\n```\r\n\r\nI thought it might be a good idea to install the package automatically with the CLI extra. (Or to update the [docs](https://starlite-api.github.io/starlite/usage/19-cli/?h=uvicorn) if I'm missing something).\r\n\r\nMy versions: Windows, Python 3.10, starlite 1.46.0 \r\n\r\nPS: Thank you all for the great amount of effort you spend on this project!\n", "before_files": [{"content": "from .base import SessionMiddleware\nfrom .cookie_backend import (\n CookieBackendConfig as SessionCookieConfig, # backwards compatible export\n)\n\n__all__ = [\n \"SessionMiddleware\",\n \"SessionCookieConfig\",\n]\n", "path": "starlite/middleware/session/__init__.py"}]}
1,129
297
gh_patches_debug_19426
rasdani/github-patches
git_diff
nautobot__nautobot-975
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `::1/128` is not a valid prefix <!-- NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED. This form is only for reporting reproducible bugs. If you need assistance with Nautobot installation, or if you have a general question, please start a discussion instead: https://github.com/nautobot/nautobot/discussions Please describe the environment in which you are running Nautobot. Be sure that you are running an unmodified instance of the latest stable release before submitting a bug report, and that any plugins have been disabled. --> ### Environment * Python version: 3.6 * Nautobot version: 1.1.3 <!-- Describe in detail the exact steps that someone else can take to reproduce this bug using the current stable release of Nautobot. Begin with the creation of any necessary database objects and call out every operation being performed explicitly. If reporting a bug in the REST API, be sure to reconstruct the raw HTTP request(s) being made: Don't rely on a client library such as pynautobot. --> When trying to create the prefix `::1/128` I get the following error: ```no-highlight <class 'netaddr.core.AddrFormatError'> invalid IPNetwork 0.0.0.1/128 ``` Both Python netaddr and ipaddress modules see this as a valid IPNetwork. ### Steps to Reproduce 1. Create a prefix or aggregate using the prefix `::1/128` <!-- What did you expect to happen? --> ### Expected Behavior Prefix created <!-- What happened instead? --> ### Observed Behavior ``` invalid IPNetwork 0.0.0.1/128 ``` </issue> <code> [start of nautobot/ipam/fields.py] 1 from django.core.exceptions import ValidationError 2 from django.db import models 3 from django.utils.datastructures import DictWrapper 4 import netaddr 5 6 from .formfields import IPNetworkFormField 7 8 9 class VarbinaryIPField(models.BinaryField): 10 """ 11 IP network address 12 """ 13 14 description = "IP network address" 15 16 def __init__(self, **kwargs): 17 super().__init__(**kwargs) 18 19 def db_type(self, connection): 20 """Returns the correct field type for a given database vendor.""" 21 22 # Use 'bytea' type for PostgreSQL. 23 if connection.vendor == "postgresql": 24 return "bytea" 25 26 # Or 'varbinary' for everyone else. 27 return "varbinary(16)" 28 29 def value_to_string(self, obj): 30 """IPField is serialized as str(IPAddress())""" 31 value = self.value_from_object(obj) 32 if not value: 33 return value 34 35 return str(self._parse_address(value)) 36 37 def _parse_address(self, value): 38 """ 39 Parse `str`, `bytes` (varbinary), or `netaddr.IPAddress to `netaddr.IPAddress`. 40 """ 41 try: 42 value = int.from_bytes(value, "big") 43 except TypeError: 44 pass # It's a string 45 46 try: 47 return netaddr.IPAddress(value) 48 except netaddr.AddrFormatError: 49 raise ValidationError("Invalid IP address format: {}".format(value)) 50 except (TypeError, ValueError) as e: 51 raise ValidationError(e) 52 53 def from_db_value(self, value, expression, connection): 54 """Converts DB (varbinary) to Python (str).""" 55 return self.to_python(value) 56 57 def to_python(self, value): 58 """Converts `value` to Python (str).""" 59 if isinstance(value, netaddr.IPAddress): 60 return str(value) 61 62 if value is None: 63 return value 64 65 return str(self._parse_address(value)) 66 67 def get_db_prep_value(self, value, connection, prepared=False): 68 """Converts Python (str) to DB (varbinary).""" 69 if value is None: 70 return value 71 72 # Parse the address and then pack it to binary. 73 value = self._parse_address(value).packed 74 75 # Use defaults for PostgreSQL 76 if connection.vendor == "postgresql": 77 return super().get_db_prep_value(value, connection, prepared) 78 79 return value 80 81 def form_class(self): 82 return IPNetworkFormField 83 84 def formfield(self, **kwargs): 85 defaults = {"form_class": self.form_class()} 86 defaults.update(kwargs) 87 return super().formfield(**defaults) 88 [end of nautobot/ipam/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nautobot/ipam/fields.py b/nautobot/ipam/fields.py --- a/nautobot/ipam/fields.py +++ b/nautobot/ipam/fields.py @@ -39,12 +39,17 @@ Parse `str`, `bytes` (varbinary), or `netaddr.IPAddress to `netaddr.IPAddress`. """ try: - value = int.from_bytes(value, "big") + int_value = int.from_bytes(value, "big") + # Distinguish between + # \x00\x00\x00\x01 (IPv4 0.0.0.1) and + # \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01 (IPv6 ::1), among other cases + version = 4 if len(value) == 4 else 6 + value = int_value except TypeError: - pass # It's a string + version = None # It's a string, IP version should be self-evident try: - return netaddr.IPAddress(value) + return netaddr.IPAddress(value, version=version) except netaddr.AddrFormatError: raise ValidationError("Invalid IP address format: {}".format(value)) except (TypeError, ValueError) as e:
{"golden_diff": "diff --git a/nautobot/ipam/fields.py b/nautobot/ipam/fields.py\n--- a/nautobot/ipam/fields.py\n+++ b/nautobot/ipam/fields.py\n@@ -39,12 +39,17 @@\n Parse `str`, `bytes` (varbinary), or `netaddr.IPAddress to `netaddr.IPAddress`.\n \"\"\"\n try:\n- value = int.from_bytes(value, \"big\")\n+ int_value = int.from_bytes(value, \"big\")\n+ # Distinguish between\n+ # \\x00\\x00\\x00\\x01 (IPv4 0.0.0.1) and\n+ # \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01 (IPv6 ::1), among other cases\n+ version = 4 if len(value) == 4 else 6\n+ value = int_value\n except TypeError:\n- pass # It's a string\n+ version = None # It's a string, IP version should be self-evident\n \n try:\n- return netaddr.IPAddress(value)\n+ return netaddr.IPAddress(value, version=version)\n except netaddr.AddrFormatError:\n raise ValidationError(\"Invalid IP address format: {}\".format(value))\n except (TypeError, ValueError) as e:\n", "issue": "`::1/128` is not a valid prefix\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.6\r\n* Nautobot version: 1.1.3\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n\r\nWhen trying to create the prefix `::1/128` I get the following error:\r\n\r\n```no-highlight\r\n<class 'netaddr.core.AddrFormatError'>\r\n\r\ninvalid IPNetwork 0.0.0.1/128\r\n```\r\n\r\nBoth Python netaddr and ipaddress modules see this as a valid IPNetwork. \r\n\r\n### Steps to Reproduce\r\n1. Create a prefix or aggregate using the prefix `::1/128`\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nPrefix created\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\n```\r\ninvalid IPNetwork 0.0.0.1/128\r\n```\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.datastructures import DictWrapper\nimport netaddr\n\nfrom .formfields import IPNetworkFormField\n\n\nclass VarbinaryIPField(models.BinaryField):\n \"\"\"\n IP network address\n \"\"\"\n\n description = \"IP network address\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def db_type(self, connection):\n \"\"\"Returns the correct field type for a given database vendor.\"\"\"\n\n # Use 'bytea' type for PostgreSQL.\n if connection.vendor == \"postgresql\":\n return \"bytea\"\n\n # Or 'varbinary' for everyone else.\n return \"varbinary(16)\"\n\n def value_to_string(self, obj):\n \"\"\"IPField is serialized as str(IPAddress())\"\"\"\n value = self.value_from_object(obj)\n if not value:\n return value\n\n return str(self._parse_address(value))\n\n def _parse_address(self, value):\n \"\"\"\n Parse `str`, `bytes` (varbinary), or `netaddr.IPAddress to `netaddr.IPAddress`.\n \"\"\"\n try:\n value = int.from_bytes(value, \"big\")\n except TypeError:\n pass # It's a string\n\n try:\n return netaddr.IPAddress(value)\n except netaddr.AddrFormatError:\n raise ValidationError(\"Invalid IP address format: {}\".format(value))\n except (TypeError, ValueError) as e:\n raise ValidationError(e)\n\n def from_db_value(self, value, expression, connection):\n \"\"\"Converts DB (varbinary) to Python (str).\"\"\"\n return self.to_python(value)\n\n def to_python(self, value):\n \"\"\"Converts `value` to Python (str).\"\"\"\n if isinstance(value, netaddr.IPAddress):\n return str(value)\n\n if value is None:\n return value\n\n return str(self._parse_address(value))\n\n def get_db_prep_value(self, value, connection, prepared=False):\n \"\"\"Converts Python (str) to DB (varbinary).\"\"\"\n if value is None:\n return value\n\n # Parse the address and then pack it to binary.\n value = self._parse_address(value).packed\n\n # Use defaults for PostgreSQL\n if connection.vendor == \"postgresql\":\n return super().get_db_prep_value(value, connection, prepared)\n\n return value\n\n def form_class(self):\n return IPNetworkFormField\n\n def formfield(self, **kwargs):\n defaults = {\"form_class\": self.form_class()}\n defaults.update(kwargs)\n return super().formfield(**defaults)\n", "path": "nautobot/ipam/fields.py"}]}
1,662
328
gh_patches_debug_5419
rasdani/github-patches
git_diff
scrapy__scrapy-475
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ability to not send specific headers in HTTP requests Some web servers behave differently when they are receive or don't receive specific headers. For example FeedBurner (http://feeds.feedburner.com/someblog) sends out XML RSS feeds **only is you do not set the "Referer" header.** The idea would be to use the `headers` dict with some keys with a `None` value, and skip these headers when sending the HTTP request. Currently, for the "Referer" example: - `headers={"Referer": None}` sends "Referer: None" - `headers={"Referer": ""}` sends "Referer: " (which works for the FeedBurner case, but is not satisfactory) - disable `RefererMiddleware` but that feels a bit heavy (for this FeedBurner thing, apparently adding `?format=xml` also does the trick) </issue> <code> [start of scrapy/http/headers.py] 1 from w3lib.http import headers_dict_to_raw 2 from scrapy.utils.datatypes import CaselessDict 3 4 5 class Headers(CaselessDict): 6 """Case insensitive http headers dictionary""" 7 8 def __init__(self, seq=None, encoding='utf-8'): 9 self.encoding = encoding 10 super(Headers, self).__init__(seq) 11 12 def normkey(self, key): 13 """Headers must not be unicode""" 14 if isinstance(key, unicode): 15 return key.title().encode(self.encoding) 16 return key.title() 17 18 def normvalue(self, value): 19 """Headers must not be unicode""" 20 if not hasattr(value, '__iter__'): 21 value = [value] 22 return [x.encode(self.encoding) if isinstance(x, unicode) else x \ 23 for x in value] 24 25 def __getitem__(self, key): 26 try: 27 return super(Headers, self).__getitem__(key)[-1] 28 except IndexError: 29 return None 30 31 def get(self, key, def_val=None): 32 try: 33 return super(Headers, self).get(key, def_val)[-1] 34 except IndexError: 35 return None 36 37 def getlist(self, key, def_val=None): 38 try: 39 return super(Headers, self).__getitem__(key) 40 except KeyError: 41 if def_val is not None: 42 return self.normvalue(def_val) 43 return [] 44 45 def setlist(self, key, list_): 46 self[key] = list_ 47 48 def setlistdefault(self, key, default_list=()): 49 return self.setdefault(key, default_list) 50 51 def appendlist(self, key, value): 52 lst = self.getlist(key) 53 lst.extend(self.normvalue(value)) 54 self[key] = lst 55 56 def items(self): 57 return list(self.iteritems()) 58 59 def iteritems(self): 60 return ((k, self.getlist(k)) for k in self.keys()) 61 62 def values(self): 63 return [self[k] for k in self.keys()] 64 65 def to_string(self): 66 return headers_dict_to_raw(self) 67 68 def __copy__(self): 69 return self.__class__(self) 70 copy = __copy__ 71 72 73 [end of scrapy/http/headers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/http/headers.py b/scrapy/http/headers.py --- a/scrapy/http/headers.py +++ b/scrapy/http/headers.py @@ -17,7 +17,9 @@ def normvalue(self, value): """Headers must not be unicode""" - if not hasattr(value, '__iter__'): + if value is None: + value = [] + elif not hasattr(value, '__iter__'): value = [value] return [x.encode(self.encoding) if isinstance(x, unicode) else x \ for x in value]
{"golden_diff": "diff --git a/scrapy/http/headers.py b/scrapy/http/headers.py\n--- a/scrapy/http/headers.py\n+++ b/scrapy/http/headers.py\n@@ -17,7 +17,9 @@\n \n def normvalue(self, value):\n \"\"\"Headers must not be unicode\"\"\"\n- if not hasattr(value, '__iter__'):\n+ if value is None:\n+ value = []\n+ elif not hasattr(value, '__iter__'):\n value = [value]\n return [x.encode(self.encoding) if isinstance(x, unicode) else x \\\n for x in value]\n", "issue": "Ability to not send specific headers in HTTP requests\nSome web servers behave differently when they are receive or don't receive specific headers.\n\nFor example FeedBurner (http://feeds.feedburner.com/someblog) sends out XML RSS feeds **only is you do not set the \"Referer\" header.**\n\nThe idea would be to use the `headers` dict with some keys with a `None` value, and skip these headers when sending the HTTP request.\n\nCurrently, for the \"Referer\" example:\n- `headers={\"Referer\": None}` sends \"Referer: None\"\n- `headers={\"Referer\": \"\"}` sends \"Referer: \" (which works for the FeedBurner case, but is not satisfactory)\n- disable `RefererMiddleware` but that feels a bit heavy\n\n(for this FeedBurner thing, apparently adding `?format=xml` also does the trick)\n\n", "before_files": [{"content": "from w3lib.http import headers_dict_to_raw\nfrom scrapy.utils.datatypes import CaselessDict\n\n\nclass Headers(CaselessDict):\n \"\"\"Case insensitive http headers dictionary\"\"\"\n\n def __init__(self, seq=None, encoding='utf-8'):\n self.encoding = encoding\n super(Headers, self).__init__(seq)\n\n def normkey(self, key):\n \"\"\"Headers must not be unicode\"\"\"\n if isinstance(key, unicode):\n return key.title().encode(self.encoding)\n return key.title()\n\n def normvalue(self, value):\n \"\"\"Headers must not be unicode\"\"\"\n if not hasattr(value, '__iter__'):\n value = [value]\n return [x.encode(self.encoding) if isinstance(x, unicode) else x \\\n for x in value]\n\n def __getitem__(self, key):\n try:\n return super(Headers, self).__getitem__(key)[-1]\n except IndexError:\n return None\n\n def get(self, key, def_val=None):\n try:\n return super(Headers, self).get(key, def_val)[-1]\n except IndexError:\n return None\n\n def getlist(self, key, def_val=None):\n try:\n return super(Headers, self).__getitem__(key)\n except KeyError:\n if def_val is not None:\n return self.normvalue(def_val)\n return []\n\n def setlist(self, key, list_):\n self[key] = list_\n\n def setlistdefault(self, key, default_list=()):\n return self.setdefault(key, default_list)\n\n def appendlist(self, key, value):\n lst = self.getlist(key)\n lst.extend(self.normvalue(value))\n self[key] = lst\n\n def items(self):\n return list(self.iteritems())\n\n def iteritems(self):\n return ((k, self.getlist(k)) for k in self.keys())\n\n def values(self):\n return [self[k] for k in self.keys()]\n\n def to_string(self):\n return headers_dict_to_raw(self)\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n\n\n", "path": "scrapy/http/headers.py"}]}
1,316
129
gh_patches_debug_38762
rasdani/github-patches
git_diff
nilearn__nilearn-1225
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> remove examples/03_connectivity/plot_power_connectome.py ? - Signal extraction from spheres placed on Power coordinates is already done in `examples/03_connectivity/plot_seed_based_connectome.py` - Sparse inverse covariance estimation is already explained in `examples/03_connectivity/plot_inverse_covariance_connectome.py` for MSDL atlas. For me, it doesn't really make a difference estimating it on timeseries extracted from probabilistic maps or spheric ROIs. </issue> <code> [start of examples/03_connectivity/plot_power_connectome.py] 1 """ 2 Extracting signals and plotting a connectome for the Power-264 seed-region atlas 3 ================================================================================ 4 5 This example shows how to extract signals from spherical seed-regions based 6 on the Power-264 atlas (Power, 2011) and estimating a connectome using sparse 7 inverse covariance. 8 9 Power, Jonathan D., et al. "Functional network organization of the 10 human brain." Neuron 72.4 (2011): 665-678. 11 12 """ 13 14 import numpy as np 15 import matplotlib.pyplot as plt 16 from nilearn import datasets, connectome, plotting, input_data 17 18 19 ############################################################################### 20 # Atlas and dataset fetching 21 22 # Fetch the coordinates of power atlas 23 power = datasets.fetch_coords_power_2011() 24 power_coords = np.vstack(( 25 power.rois['x'], 26 power.rois['y'], 27 power.rois['z'], 28 )).T 29 30 # Fetch the first subject of ADHD dataset 31 adhd = datasets.fetch_adhd(n_subjects=1) 32 33 34 ############################################################################### 35 # Masking: taking the signal in a sphere of radius 5mm around Power coords 36 37 masker = input_data.NiftiSpheresMasker(seeds=power_coords, 38 smoothing_fwhm=4, 39 radius=5., 40 standardize=True, 41 detrend=True, 42 low_pass=0.1, 43 high_pass=0.01, 44 t_r=2.5) 45 46 timeseries = masker.fit_transform(adhd.func[0], confounds=adhd.confounds[0]) 47 48 ############################################################################### 49 # Extract and plot correlation matrix 50 51 # calculate connectivity and plot Power-264 correlation matrix 52 connectivity = connectome.ConnectivityMeasure(kind='correlation') 53 corr_matrix = connectivity.fit_transform([timeseries])[0] 54 np.fill_diagonal(corr_matrix, 0) 55 plt.imshow(corr_matrix, vmin=-1., vmax=1., cmap='RdBu_r') 56 plt.colorbar() 57 plt.title('Power 264 Connectivity') 58 59 # Plot the connectome 60 61 plotting.plot_connectome(corr_matrix, 62 power_coords, 63 edge_threshold='99.8%', 64 node_size=20) 65 66 67 ############################################################################### 68 # Extract and plot covariance and sparse covariance 69 70 # Compute the sparse inverse covariance 71 from sklearn.covariance import GraphLassoCV 72 73 estimator = GraphLassoCV() 74 estimator.fit(timeseries) 75 76 # Display the covariance 77 plt.figure(figsize=(5, 5)) 78 plt.imshow(estimator.covariance_, interpolation="nearest", 79 vmax=1, vmin=-1, cmap=plt.cm.RdBu_r) 80 plt.title('Covariance matrix') 81 82 # display the corresponding graph 83 plotting.plot_connectome(estimator.covariance_, 84 power_coords, 85 title='Covariance connectome', 86 edge_threshold='99.8%', 87 node_size=20) 88 89 # Display the sparse inverse covariance 90 plt.figure(figsize=(5, 5)) 91 plt.imshow(estimator.precision_, interpolation="nearest", 92 vmax=1, vmin=-1, cmap=plt.cm.RdBu_r) 93 plt.title('Precision matrix') 94 95 # And now display the corresponding graph 96 plotting.plot_connectome(estimator.precision_, power_coords, 97 title='Precision connectome', 98 edge_threshold="99.8%", 99 node_size=20) 100 plotting.show() 101 [end of examples/03_connectivity/plot_power_connectome.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/03_connectivity/plot_power_connectome.py b/examples/03_connectivity/plot_power_connectome.py deleted file mode 100644 --- a/examples/03_connectivity/plot_power_connectome.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -Extracting signals and plotting a connectome for the Power-264 seed-region atlas -================================================================================ - -This example shows how to extract signals from spherical seed-regions based -on the Power-264 atlas (Power, 2011) and estimating a connectome using sparse -inverse covariance. - -Power, Jonathan D., et al. "Functional network organization of the -human brain." Neuron 72.4 (2011): 665-678. - -""" - -import numpy as np -import matplotlib.pyplot as plt -from nilearn import datasets, connectome, plotting, input_data - - -############################################################################### -# Atlas and dataset fetching - -# Fetch the coordinates of power atlas -power = datasets.fetch_coords_power_2011() -power_coords = np.vstack(( - power.rois['x'], - power.rois['y'], - power.rois['z'], -)).T - -# Fetch the first subject of ADHD dataset -adhd = datasets.fetch_adhd(n_subjects=1) - - -############################################################################### -# Masking: taking the signal in a sphere of radius 5mm around Power coords - -masker = input_data.NiftiSpheresMasker(seeds=power_coords, - smoothing_fwhm=4, - radius=5., - standardize=True, - detrend=True, - low_pass=0.1, - high_pass=0.01, - t_r=2.5) - -timeseries = masker.fit_transform(adhd.func[0], confounds=adhd.confounds[0]) - -############################################################################### -# Extract and plot correlation matrix - -# calculate connectivity and plot Power-264 correlation matrix -connectivity = connectome.ConnectivityMeasure(kind='correlation') -corr_matrix = connectivity.fit_transform([timeseries])[0] -np.fill_diagonal(corr_matrix, 0) -plt.imshow(corr_matrix, vmin=-1., vmax=1., cmap='RdBu_r') -plt.colorbar() -plt.title('Power 264 Connectivity') - -# Plot the connectome - -plotting.plot_connectome(corr_matrix, - power_coords, - edge_threshold='99.8%', - node_size=20) - - -############################################################################### -# Extract and plot covariance and sparse covariance - -# Compute the sparse inverse covariance -from sklearn.covariance import GraphLassoCV - -estimator = GraphLassoCV() -estimator.fit(timeseries) - -# Display the covariance -plt.figure(figsize=(5, 5)) -plt.imshow(estimator.covariance_, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.RdBu_r) -plt.title('Covariance matrix') - -# display the corresponding graph -plotting.plot_connectome(estimator.covariance_, - power_coords, - title='Covariance connectome', - edge_threshold='99.8%', - node_size=20) - -# Display the sparse inverse covariance -plt.figure(figsize=(5, 5)) -plt.imshow(estimator.precision_, interpolation="nearest", - vmax=1, vmin=-1, cmap=plt.cm.RdBu_r) -plt.title('Precision matrix') - -# And now display the corresponding graph -plotting.plot_connectome(estimator.precision_, power_coords, - title='Precision connectome', - edge_threshold="99.8%", - node_size=20) -plotting.show()
{"golden_diff": "diff --git a/examples/03_connectivity/plot_power_connectome.py b/examples/03_connectivity/plot_power_connectome.py\ndeleted file mode 100644\n--- a/examples/03_connectivity/plot_power_connectome.py\n+++ /dev/null\n@@ -1,100 +0,0 @@\n-\"\"\"\n-Extracting signals and plotting a connectome for the Power-264 seed-region atlas\n-================================================================================\n-\n-This example shows how to extract signals from spherical seed-regions based\n-on the Power-264 atlas (Power, 2011) and estimating a connectome using sparse\n-inverse covariance.\n-\n-Power, Jonathan D., et al. \"Functional network organization of the\n-human brain.\" Neuron 72.4 (2011): 665-678.\n-\n-\"\"\"\n-\n-import numpy as np\n-import matplotlib.pyplot as plt\n-from nilearn import datasets, connectome, plotting, input_data\n-\n-\n-###############################################################################\n-# Atlas and dataset fetching\n-\n-# Fetch the coordinates of power atlas\n-power = datasets.fetch_coords_power_2011()\n-power_coords = np.vstack((\n- power.rois['x'],\n- power.rois['y'],\n- power.rois['z'],\n-)).T\n-\n-# Fetch the first subject of ADHD dataset\n-adhd = datasets.fetch_adhd(n_subjects=1)\n-\n-\n-###############################################################################\n-# Masking: taking the signal in a sphere of radius 5mm around Power coords\n-\n-masker = input_data.NiftiSpheresMasker(seeds=power_coords,\n- smoothing_fwhm=4,\n- radius=5.,\n- standardize=True,\n- detrend=True,\n- low_pass=0.1,\n- high_pass=0.01,\n- t_r=2.5)\n-\n-timeseries = masker.fit_transform(adhd.func[0], confounds=adhd.confounds[0])\n-\n-###############################################################################\n-# Extract and plot correlation matrix\n-\n-# calculate connectivity and plot Power-264 correlation matrix\n-connectivity = connectome.ConnectivityMeasure(kind='correlation')\n-corr_matrix = connectivity.fit_transform([timeseries])[0]\n-np.fill_diagonal(corr_matrix, 0)\n-plt.imshow(corr_matrix, vmin=-1., vmax=1., cmap='RdBu_r')\n-plt.colorbar()\n-plt.title('Power 264 Connectivity')\n-\n-# Plot the connectome\n-\n-plotting.plot_connectome(corr_matrix,\n- power_coords,\n- edge_threshold='99.8%',\n- node_size=20)\n-\n-\n-###############################################################################\n-# Extract and plot covariance and sparse covariance\n-\n-# Compute the sparse inverse covariance\n-from sklearn.covariance import GraphLassoCV\n-\n-estimator = GraphLassoCV()\n-estimator.fit(timeseries)\n-\n-# Display the covariance\n-plt.figure(figsize=(5, 5))\n-plt.imshow(estimator.covariance_, interpolation=\"nearest\",\n- vmax=1, vmin=-1, cmap=plt.cm.RdBu_r)\n-plt.title('Covariance matrix')\n-\n-# display the corresponding graph\n-plotting.plot_connectome(estimator.covariance_,\n- power_coords,\n- title='Covariance connectome',\n- edge_threshold='99.8%',\n- node_size=20)\n-\n-# Display the sparse inverse covariance\n-plt.figure(figsize=(5, 5))\n-plt.imshow(estimator.precision_, interpolation=\"nearest\",\n- vmax=1, vmin=-1, cmap=plt.cm.RdBu_r)\n-plt.title('Precision matrix')\n-\n-# And now display the corresponding graph\n-plotting.plot_connectome(estimator.precision_, power_coords,\n- title='Precision connectome',\n- edge_threshold=\"99.8%\",\n- node_size=20)\n-plotting.show()\n", "issue": "remove examples/03_connectivity/plot_power_connectome.py ?\n- Signal extraction from spheres placed on Power coordinates is already done in `examples/03_connectivity/plot_seed_based_connectome.py`\n- Sparse inverse covariance estimation is already explained in `examples/03_connectivity/plot_inverse_covariance_connectome.py` for MSDL atlas. For me, it doesn't really make a difference estimating it on timeseries extracted from probabilistic maps or spheric ROIs.\n\n", "before_files": [{"content": "\"\"\"\nExtracting signals and plotting a connectome for the Power-264 seed-region atlas\n================================================================================\n\nThis example shows how to extract signals from spherical seed-regions based\non the Power-264 atlas (Power, 2011) and estimating a connectome using sparse\ninverse covariance.\n\nPower, Jonathan D., et al. \"Functional network organization of the\nhuman brain.\" Neuron 72.4 (2011): 665-678.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom nilearn import datasets, connectome, plotting, input_data\n\n\n###############################################################################\n# Atlas and dataset fetching\n\n# Fetch the coordinates of power atlas\npower = datasets.fetch_coords_power_2011()\npower_coords = np.vstack((\n power.rois['x'],\n power.rois['y'],\n power.rois['z'],\n)).T\n\n# Fetch the first subject of ADHD dataset\nadhd = datasets.fetch_adhd(n_subjects=1)\n\n\n###############################################################################\n# Masking: taking the signal in a sphere of radius 5mm around Power coords\n\nmasker = input_data.NiftiSpheresMasker(seeds=power_coords,\n smoothing_fwhm=4,\n radius=5.,\n standardize=True,\n detrend=True,\n low_pass=0.1,\n high_pass=0.01,\n t_r=2.5)\n\ntimeseries = masker.fit_transform(adhd.func[0], confounds=adhd.confounds[0])\n\n###############################################################################\n# Extract and plot correlation matrix\n\n# calculate connectivity and plot Power-264 correlation matrix\nconnectivity = connectome.ConnectivityMeasure(kind='correlation')\ncorr_matrix = connectivity.fit_transform([timeseries])[0]\nnp.fill_diagonal(corr_matrix, 0)\nplt.imshow(corr_matrix, vmin=-1., vmax=1., cmap='RdBu_r')\nplt.colorbar()\nplt.title('Power 264 Connectivity')\n\n# Plot the connectome\n\nplotting.plot_connectome(corr_matrix,\n power_coords,\n edge_threshold='99.8%',\n node_size=20)\n\n\n###############################################################################\n# Extract and plot covariance and sparse covariance\n\n# Compute the sparse inverse covariance\nfrom sklearn.covariance import GraphLassoCV\n\nestimator = GraphLassoCV()\nestimator.fit(timeseries)\n\n# Display the covariance\nplt.figure(figsize=(5, 5))\nplt.imshow(estimator.covariance_, interpolation=\"nearest\",\n vmax=1, vmin=-1, cmap=plt.cm.RdBu_r)\nplt.title('Covariance matrix')\n\n# display the corresponding graph\nplotting.plot_connectome(estimator.covariance_,\n power_coords,\n title='Covariance connectome',\n edge_threshold='99.8%',\n node_size=20)\n\n# Display the sparse inverse covariance\nplt.figure(figsize=(5, 5))\nplt.imshow(estimator.precision_, interpolation=\"nearest\",\n vmax=1, vmin=-1, cmap=plt.cm.RdBu_r)\nplt.title('Precision matrix')\n\n# And now display the corresponding graph\nplotting.plot_connectome(estimator.precision_, power_coords,\n title='Precision connectome',\n edge_threshold=\"99.8%\",\n node_size=20)\nplotting.show()\n", "path": "examples/03_connectivity/plot_power_connectome.py"}]}
1,545
841
gh_patches_debug_30188
rasdani/github-patches
git_diff
internetarchive__openlibrary-8966
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support different seeds for random.hourly sort These carousels are all sorted by random.hourly, but we want them to have a different random subset! ![image](https://github.com/internetarchive/openlibrary/assets/6251786/408dcd86-f96a-4534-bacf-46f437f9f444) ### Proposal & Constraints Expand `random.hourly` sorting to support a custom seed like `random` ### Additional context <!-- Add any other context or screenshots about the feature request here. --> ### Stakeholders @RayBB </issue> <code> [start of openlibrary/plugins/worksearch/schemes/__init__.py] 1 import logging 2 from collections.abc import Callable 3 4 import luqum.tree 5 from luqum.exceptions import ParseError 6 from openlibrary.solr.query_utils import ( 7 escape_unknown_fields, 8 fully_escape_query, 9 luqum_parser, 10 ) 11 12 logger = logging.getLogger("openlibrary.worksearch") 13 14 15 class SearchScheme: 16 # Set of queries that define the universe of this scheme 17 universe: list[str] 18 # All actual solr fields that can be in a user query 19 all_fields: set[str] 20 # These fields are fetched for facets and can also be url params 21 facet_fields: set[str] 22 # Mapping of user-only fields to solr fields 23 field_name_map: dict[str, str] 24 # Mapping of user sort to solr sort 25 sorts: dict[str, str | Callable[[], str]] 26 # Default 27 default_fetched_fields: set[str] 28 # Fields that should be rewritten 29 facet_rewrites: dict[tuple[str, str], str | Callable[[], str]] 30 31 def is_search_field(self, field: str): 32 return field in self.all_fields or field in self.field_name_map 33 34 def process_user_sort(self, user_sort: str) -> str: 35 """ 36 Convert a user-provided sort to a solr sort 37 38 >>> from openlibrary.plugins.worksearch.schemes.works import WorkSearchScheme 39 >>> scheme = WorkSearchScheme() 40 >>> scheme.process_user_sort('editions') 41 'edition_count desc' 42 >>> scheme.process_user_sort('editions, new') 43 'edition_count desc,first_publish_year desc' 44 >>> scheme.process_user_sort('random') 45 'random_1 asc' 46 >>> scheme.process_user_sort('random_custom_seed') 47 'random_custom_seed asc' 48 >>> scheme.process_user_sort('random_custom_seed desc') 49 'random_custom_seed desc' 50 >>> scheme.process_user_sort('random_custom_seed asc') 51 'random_custom_seed asc' 52 """ 53 54 def process_individual_sort(sort: str): 55 if sort.startswith('random_'): 56 # Allow custom randoms; so anything random_* is allowed 57 return sort if ' ' in sort else f'{sort} asc' 58 else: 59 solr_sort = self.sorts[sort] 60 return solr_sort() if callable(solr_sort) else solr_sort 61 62 return ','.join( 63 process_individual_sort(s.strip()) for s in user_sort.split(',') 64 ) 65 66 def process_user_query(self, q_param: str) -> str: 67 if q_param == '*:*': 68 # This is a special solr syntax; don't process 69 return q_param 70 71 try: 72 q_param = escape_unknown_fields( 73 ( 74 # Solr 4+ has support for regexes (eg `key:/foo.*/`)! But for now, 75 # let's not expose that and escape all '/'. Otherwise 76 # `key:/works/OL1W` is interpreted as a regex. 77 q_param.strip() 78 .replace('/', '\\/') 79 # Also escape unexposed lucene features 80 .replace('?', '\\?') 81 .replace('~', '\\~') 82 ), 83 self.is_search_field, 84 lower=True, 85 ) 86 q_tree = luqum_parser(q_param) 87 except ParseError: 88 # This isn't a syntactically valid lucene query 89 logger.warning("Invalid lucene query", exc_info=True) 90 # Escape everything we can 91 q_tree = luqum_parser(fully_escape_query(q_param)) 92 93 q_tree = self.transform_user_query(q_param, q_tree) 94 return str(q_tree) 95 96 def transform_user_query( 97 self, 98 user_query: str, 99 q_tree: luqum.tree.Item, 100 ) -> luqum.tree.Item: 101 return q_tree 102 103 def build_q_from_params(self, params: dict) -> str | None: 104 return None 105 106 def q_to_solr_params( 107 self, 108 q: str, 109 solr_fields: set[str], 110 cur_solr_params: list[tuple[str, str]], 111 ) -> list[tuple[str, str]]: 112 return [('q', q)] 113 [end of openlibrary/plugins/worksearch/schemes/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/openlibrary/plugins/worksearch/schemes/__init__.py b/openlibrary/plugins/worksearch/schemes/__init__.py --- a/openlibrary/plugins/worksearch/schemes/__init__.py +++ b/openlibrary/plugins/worksearch/schemes/__init__.py @@ -44,17 +44,27 @@ >>> scheme.process_user_sort('random') 'random_1 asc' >>> scheme.process_user_sort('random_custom_seed') - 'random_custom_seed asc' + 'random_1_custom_seed asc' >>> scheme.process_user_sort('random_custom_seed desc') - 'random_custom_seed desc' + 'random_1_custom_seed desc' >>> scheme.process_user_sort('random_custom_seed asc') - 'random_custom_seed asc' + 'random_1_custom_seed asc' """ - def process_individual_sort(sort: str): - if sort.startswith('random_'): + def process_individual_sort(sort: str) -> str: + if sort.startswith(('random_', 'random.hourly_', 'random.daily_')): # Allow custom randoms; so anything random_* is allowed - return sort if ' ' in sort else f'{sort} asc' + # Also Allow custom time randoms to allow carousels with overlapping + # books to have a fresh ordering when on the same collection + sort_order: str | None = None + if ' ' in sort: + sort, sort_order = sort.split(' ', 1) + random_type, random_seed = sort.split('_', 1) + solr_sort = self.sorts[random_type] + solr_sort_str = solr_sort() if callable(solr_sort) else solr_sort + solr_sort_field, solr_sort_order = solr_sort_str.split(' ', 1) + sort_order = sort_order or solr_sort_order + return f'{solr_sort_field}_{random_seed} {sort_order}' else: solr_sort = self.sorts[sort] return solr_sort() if callable(solr_sort) else solr_sort
{"golden_diff": "diff --git a/openlibrary/plugins/worksearch/schemes/__init__.py b/openlibrary/plugins/worksearch/schemes/__init__.py\n--- a/openlibrary/plugins/worksearch/schemes/__init__.py\n+++ b/openlibrary/plugins/worksearch/schemes/__init__.py\n@@ -44,17 +44,27 @@\n >>> scheme.process_user_sort('random')\n 'random_1 asc'\n >>> scheme.process_user_sort('random_custom_seed')\n- 'random_custom_seed asc'\n+ 'random_1_custom_seed asc'\n >>> scheme.process_user_sort('random_custom_seed desc')\n- 'random_custom_seed desc'\n+ 'random_1_custom_seed desc'\n >>> scheme.process_user_sort('random_custom_seed asc')\n- 'random_custom_seed asc'\n+ 'random_1_custom_seed asc'\n \"\"\"\n \n- def process_individual_sort(sort: str):\n- if sort.startswith('random_'):\n+ def process_individual_sort(sort: str) -> str:\n+ if sort.startswith(('random_', 'random.hourly_', 'random.daily_')):\n # Allow custom randoms; so anything random_* is allowed\n- return sort if ' ' in sort else f'{sort} asc'\n+ # Also Allow custom time randoms to allow carousels with overlapping\n+ # books to have a fresh ordering when on the same collection\n+ sort_order: str | None = None\n+ if ' ' in sort:\n+ sort, sort_order = sort.split(' ', 1)\n+ random_type, random_seed = sort.split('_', 1)\n+ solr_sort = self.sorts[random_type]\n+ solr_sort_str = solr_sort() if callable(solr_sort) else solr_sort\n+ solr_sort_field, solr_sort_order = solr_sort_str.split(' ', 1)\n+ sort_order = sort_order or solr_sort_order\n+ return f'{solr_sort_field}_{random_seed} {sort_order}'\n else:\n solr_sort = self.sorts[sort]\n return solr_sort() if callable(solr_sort) else solr_sort\n", "issue": "Support different seeds for random.hourly sort\nThese carousels are all sorted by random.hourly, but we want them to have a different random subset!\r\n\r\n![image](https://github.com/internetarchive/openlibrary/assets/6251786/408dcd86-f96a-4534-bacf-46f437f9f444)\r\n\r\n\r\n### Proposal & Constraints\r\nExpand `random.hourly` sorting to support a custom seed like `random`\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n@RayBB \n", "before_files": [{"content": "import logging\nfrom collections.abc import Callable\n\nimport luqum.tree\nfrom luqum.exceptions import ParseError\nfrom openlibrary.solr.query_utils import (\n escape_unknown_fields,\n fully_escape_query,\n luqum_parser,\n)\n\nlogger = logging.getLogger(\"openlibrary.worksearch\")\n\n\nclass SearchScheme:\n # Set of queries that define the universe of this scheme\n universe: list[str]\n # All actual solr fields that can be in a user query\n all_fields: set[str]\n # These fields are fetched for facets and can also be url params\n facet_fields: set[str]\n # Mapping of user-only fields to solr fields\n field_name_map: dict[str, str]\n # Mapping of user sort to solr sort\n sorts: dict[str, str | Callable[[], str]]\n # Default\n default_fetched_fields: set[str]\n # Fields that should be rewritten\n facet_rewrites: dict[tuple[str, str], str | Callable[[], str]]\n\n def is_search_field(self, field: str):\n return field in self.all_fields or field in self.field_name_map\n\n def process_user_sort(self, user_sort: str) -> str:\n \"\"\"\n Convert a user-provided sort to a solr sort\n\n >>> from openlibrary.plugins.worksearch.schemes.works import WorkSearchScheme\n >>> scheme = WorkSearchScheme()\n >>> scheme.process_user_sort('editions')\n 'edition_count desc'\n >>> scheme.process_user_sort('editions, new')\n 'edition_count desc,first_publish_year desc'\n >>> scheme.process_user_sort('random')\n 'random_1 asc'\n >>> scheme.process_user_sort('random_custom_seed')\n 'random_custom_seed asc'\n >>> scheme.process_user_sort('random_custom_seed desc')\n 'random_custom_seed desc'\n >>> scheme.process_user_sort('random_custom_seed asc')\n 'random_custom_seed asc'\n \"\"\"\n\n def process_individual_sort(sort: str):\n if sort.startswith('random_'):\n # Allow custom randoms; so anything random_* is allowed\n return sort if ' ' in sort else f'{sort} asc'\n else:\n solr_sort = self.sorts[sort]\n return solr_sort() if callable(solr_sort) else solr_sort\n\n return ','.join(\n process_individual_sort(s.strip()) for s in user_sort.split(',')\n )\n\n def process_user_query(self, q_param: str) -> str:\n if q_param == '*:*':\n # This is a special solr syntax; don't process\n return q_param\n\n try:\n q_param = escape_unknown_fields(\n (\n # Solr 4+ has support for regexes (eg `key:/foo.*/`)! But for now,\n # let's not expose that and escape all '/'. Otherwise\n # `key:/works/OL1W` is interpreted as a regex.\n q_param.strip()\n .replace('/', '\\\\/')\n # Also escape unexposed lucene features\n .replace('?', '\\\\?')\n .replace('~', '\\\\~')\n ),\n self.is_search_field,\n lower=True,\n )\n q_tree = luqum_parser(q_param)\n except ParseError:\n # This isn't a syntactically valid lucene query\n logger.warning(\"Invalid lucene query\", exc_info=True)\n # Escape everything we can\n q_tree = luqum_parser(fully_escape_query(q_param))\n\n q_tree = self.transform_user_query(q_param, q_tree)\n return str(q_tree)\n\n def transform_user_query(\n self,\n user_query: str,\n q_tree: luqum.tree.Item,\n ) -> luqum.tree.Item:\n return q_tree\n\n def build_q_from_params(self, params: dict) -> str | None:\n return None\n\n def q_to_solr_params(\n self,\n q: str,\n solr_fields: set[str],\n cur_solr_params: list[tuple[str, str]],\n ) -> list[tuple[str, str]]:\n return [('q', q)]\n", "path": "openlibrary/plugins/worksearch/schemes/__init__.py"}]}
1,798
455
gh_patches_debug_11300
rasdani/github-patches
git_diff
pypa__setuptools-1986
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deprecated distutils bdist_wininst is going to be removed I proposed to remove the bdist_winstinst command from distutils in Python 3.9: * https://bugs.python.org/issue39541 * https://discuss.python.org/t/remove-distutils-bdist-wininst-command/3115 * https://github.com/python/cpython/pull/18329 Problem: setuptools always uses it on all platforms at: setuptools/command/install_scripts.py, line 35: ``` bw_cmd = self.get_finalized_command("bdist_wininst") ``` See #857 which is a closed duplicated which proposed different options to fix the issue. </issue> <code> [start of setuptools/command/install_scripts.py] 1 from distutils import log 2 import distutils.command.install_scripts as orig 3 import os 4 import sys 5 6 from pkg_resources import Distribution, PathMetadata, ensure_directory 7 8 9 class install_scripts(orig.install_scripts): 10 """Do normal script install, plus any egg_info wrapper scripts""" 11 12 def initialize_options(self): 13 orig.install_scripts.initialize_options(self) 14 self.no_ep = False 15 16 def run(self): 17 import setuptools.command.easy_install as ei 18 19 self.run_command("egg_info") 20 if self.distribution.scripts: 21 orig.install_scripts.run(self) # run first to set up self.outfiles 22 else: 23 self.outfiles = [] 24 if self.no_ep: 25 # don't install entry point scripts into .egg file! 26 return 27 28 ei_cmd = self.get_finalized_command("egg_info") 29 dist = Distribution( 30 ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), 31 ei_cmd.egg_name, ei_cmd.egg_version, 32 ) 33 bs_cmd = self.get_finalized_command('build_scripts') 34 exec_param = getattr(bs_cmd, 'executable', None) 35 bw_cmd = self.get_finalized_command("bdist_wininst") 36 is_wininst = getattr(bw_cmd, '_is_running', False) 37 writer = ei.ScriptWriter 38 if is_wininst: 39 exec_param = "python.exe" 40 writer = ei.WindowsScriptWriter 41 if exec_param == sys.executable: 42 # In case the path to the Python executable contains a space, wrap 43 # it so it's not split up. 44 exec_param = [exec_param] 45 # resolve the writer to the environment 46 writer = writer.best() 47 cmd = writer.command_spec_class.best().from_param(exec_param) 48 for args in writer.get_args(dist, cmd.as_header()): 49 self.write_script(*args) 50 51 def write_script(self, script_name, contents, mode="t", *ignored): 52 """Write an executable file to the scripts directory""" 53 from setuptools.command.easy_install import chmod, current_umask 54 55 log.info("Installing %s script to %s", script_name, self.install_dir) 56 target = os.path.join(self.install_dir, script_name) 57 self.outfiles.append(target) 58 59 mask = current_umask() 60 if not self.dry_run: 61 ensure_directory(target) 62 f = open(target, "w" + mode) 63 f.write(contents) 64 f.close() 65 chmod(target, 0o777 - mask) 66 [end of setuptools/command/install_scripts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setuptools/command/install_scripts.py b/setuptools/command/install_scripts.py --- a/setuptools/command/install_scripts.py +++ b/setuptools/command/install_scripts.py @@ -32,8 +32,11 @@ ) bs_cmd = self.get_finalized_command('build_scripts') exec_param = getattr(bs_cmd, 'executable', None) - bw_cmd = self.get_finalized_command("bdist_wininst") - is_wininst = getattr(bw_cmd, '_is_running', False) + try: + bw_cmd = self.get_finalized_command("bdist_wininst") + is_wininst = getattr(bw_cmd, '_is_running', False) + except ImportError: + is_wininst = False writer = ei.ScriptWriter if is_wininst: exec_param = "python.exe"
{"golden_diff": "diff --git a/setuptools/command/install_scripts.py b/setuptools/command/install_scripts.py\n--- a/setuptools/command/install_scripts.py\n+++ b/setuptools/command/install_scripts.py\n@@ -32,8 +32,11 @@\n )\n bs_cmd = self.get_finalized_command('build_scripts')\n exec_param = getattr(bs_cmd, 'executable', None)\n- bw_cmd = self.get_finalized_command(\"bdist_wininst\")\n- is_wininst = getattr(bw_cmd, '_is_running', False)\n+ try:\n+ bw_cmd = self.get_finalized_command(\"bdist_wininst\")\n+ is_wininst = getattr(bw_cmd, '_is_running', False)\n+ except ImportError:\n+ is_wininst = False\n writer = ei.ScriptWriter\n if is_wininst:\n exec_param = \"python.exe\"\n", "issue": "Deprecated distutils bdist_wininst is going to be removed\nI proposed to remove the bdist_winstinst command from distutils in Python 3.9:\r\n\r\n* https://bugs.python.org/issue39541\r\n* https://discuss.python.org/t/remove-distutils-bdist-wininst-command/3115\r\n* https://github.com/python/cpython/pull/18329\r\n\r\nProblem: setuptools always uses it on all platforms at: setuptools/command/install_scripts.py, line 35:\r\n\r\n```\r\n bw_cmd = self.get_finalized_command(\"bdist_wininst\")\r\n```\r\n\r\nSee #857 which is a closed duplicated which proposed different options to fix the issue.\n", "before_files": [{"content": "from distutils import log\nimport distutils.command.install_scripts as orig\nimport os\nimport sys\n\nfrom pkg_resources import Distribution, PathMetadata, ensure_directory\n\n\nclass install_scripts(orig.install_scripts):\n \"\"\"Do normal script install, plus any egg_info wrapper scripts\"\"\"\n\n def initialize_options(self):\n orig.install_scripts.initialize_options(self)\n self.no_ep = False\n\n def run(self):\n import setuptools.command.easy_install as ei\n\n self.run_command(\"egg_info\")\n if self.distribution.scripts:\n orig.install_scripts.run(self) # run first to set up self.outfiles\n else:\n self.outfiles = []\n if self.no_ep:\n # don't install entry point scripts into .egg file!\n return\n\n ei_cmd = self.get_finalized_command(\"egg_info\")\n dist = Distribution(\n ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),\n ei_cmd.egg_name, ei_cmd.egg_version,\n )\n bs_cmd = self.get_finalized_command('build_scripts')\n exec_param = getattr(bs_cmd, 'executable', None)\n bw_cmd = self.get_finalized_command(\"bdist_wininst\")\n is_wininst = getattr(bw_cmd, '_is_running', False)\n writer = ei.ScriptWriter\n if is_wininst:\n exec_param = \"python.exe\"\n writer = ei.WindowsScriptWriter\n if exec_param == sys.executable:\n # In case the path to the Python executable contains a space, wrap\n # it so it's not split up.\n exec_param = [exec_param]\n # resolve the writer to the environment\n writer = writer.best()\n cmd = writer.command_spec_class.best().from_param(exec_param)\n for args in writer.get_args(dist, cmd.as_header()):\n self.write_script(*args)\n\n def write_script(self, script_name, contents, mode=\"t\", *ignored):\n \"\"\"Write an executable file to the scripts directory\"\"\"\n from setuptools.command.easy_install import chmod, current_umask\n\n log.info(\"Installing %s script to %s\", script_name, self.install_dir)\n target = os.path.join(self.install_dir, script_name)\n self.outfiles.append(target)\n\n mask = current_umask()\n if not self.dry_run:\n ensure_directory(target)\n f = open(target, \"w\" + mode)\n f.write(contents)\n f.close()\n chmod(target, 0o777 - mask)\n", "path": "setuptools/command/install_scripts.py"}]}
1,338
182
gh_patches_debug_39724
rasdani/github-patches
git_diff
ephios-dev__ephios-178
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Event creation mails do not include event description </issue> <code> [start of ephios/event_management/mail.py] 1 from django.core import mail 2 from django.core.mail import EmailMultiAlternatives 3 from django.template.loader import render_to_string 4 from django.utils.translation import gettext as _ 5 from guardian.shortcuts import get_users_with_perms 6 7 from ephios.event_management.models import AbstractParticipation 8 from ephios.extra.permissions import get_groups_with_perms 9 from ephios.settings import SITE_URL 10 from ephios.user_management.models import UserProfile 11 12 13 def new_event(event): 14 messages = [] 15 users = UserProfile.objects.filter( 16 groups__in=get_groups_with_perms(event, only_with_perms_in=["view_event"]), is_active=True 17 ).distinct() 18 responsible_users = get_users_with_perms(event, only_with_perms_in=["change_event"]).distinct() 19 responsible_persons_mails = list(responsible_users.values_list("email", flat=True)) 20 21 subject = _("New {type}: {title}").format(type=event.type, title=event.title) 22 text_content = _( 23 "A new {type} ({title}) has been added. \n You can view it here: {link}" 24 ).format(type=event.type, title=event.title, link=event.get_absolute_url()) 25 html_content = render_to_string( 26 "event_management/mails/new_event.html", {"event": event, "site_url": SITE_URL} 27 ) 28 29 for user in users: 30 message = EmailMultiAlternatives( 31 to=[user.email], subject=subject, body=text_content, reply_to=responsible_persons_mails 32 ) 33 message.attach_alternative(html_content, "text/html") 34 messages.append(message) 35 mail.get_connection().send_messages(messages) 36 37 38 def participation_state_changed(participation: AbstractParticipation): 39 if participation.state != AbstractParticipation.States.USER_DECLINED: 40 messages = [] 41 42 # send mail to the participant whose participation has been changed 43 if participation.participant.email is not None: 44 text_content = _( 45 "The status for your participation for {shift} has changed. It is now {status}." 46 ).format(shift=participation.shift, status=participation.get_state_display()) 47 html_content = render_to_string("email_base.html", {"message_text": text_content}) 48 message = EmailMultiAlternatives( 49 to=[participation.participant.email], 50 subject=_("Your participation state changed"), 51 body=text_content, 52 ) 53 message.attach_alternative(html_content, "text/html") 54 messages.append(message) 55 56 # send mail to responsible users 57 responsible_users = get_users_with_perms( 58 participation.shift.event, only_with_perms_in=["change_event"] 59 ).distinct() 60 subject = _("Participation was changed for your event") 61 text_content = _( 62 "The participation of {participant} for {shift} was changed. The status is now {status}" 63 ).format( 64 participant=participation.participant, 65 shift=participation.shift, 66 status=participation.get_state_display(), 67 ) 68 html_content = render_to_string("email_base.html", {"message_text": text_content}) 69 for user in responsible_users: 70 message = EmailMultiAlternatives(to=[user.email], subject=subject, body=text_content) 71 message.attach_alternative(html_content, "text/html") 72 messages.append(message) 73 74 mail.get_connection().send_messages(messages) 75 [end of ephios/event_management/mail.py] [start of ephios/user_management/mail.py] 1 from django.contrib.auth.tokens import default_token_generator 2 from django.core.mail import EmailMultiAlternatives 3 from django.template.loader import render_to_string 4 from django.urls import reverse 5 from django.utils.encoding import force_bytes 6 from django.utils.http import urlsafe_base64_encode 7 from django.utils.translation import gettext as _ 8 9 from ephios.settings import SITE_URL 10 11 12 def send_account_creation_info(userprofile): 13 subject = _("Welcome to ephios!") 14 uid = urlsafe_base64_encode(force_bytes(userprofile.id)) 15 token = default_token_generator.make_token(userprofile) 16 reset_link = reverse("password_reset_confirm", kwargs={"uidb64": uid, "token": token}) 17 text_content = _( 18 "You're receiving this email because a new account has been created for you at ephios.\n" 19 "Please go to the following page and choose a password: {url}{reset_link}\n" 20 "Your username is your email address: {email}\n" 21 ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email) 22 23 html_content = render_to_string( 24 "user_management/new_account_email.html", 25 {"uid": uid, "token": token, "site_url": SITE_URL, "email": userprofile.email}, 26 ) 27 message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content) 28 message.attach_alternative(html_content, "text/html") 29 message.send() 30 31 32 def send_account_update_info(userprofile): 33 subject = _("ephios account updated") 34 url = reverse("user_management:profile") 35 text_content = _( 36 "You're receiving this email because your account at ephios has been updated.\n" 37 "You can see the changes in your profile: {site_url}{url}\n" 38 "Your username is your email address: {email}\n" 39 ).format(site_url=SITE_URL, url=url, email=userprofile.email) 40 41 html_content = render_to_string( 42 "user_management/account_updated_email.html", 43 {"site_url": SITE_URL, "url": url, "email": userprofile.email}, 44 ) 45 message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content) 46 message.attach_alternative(html_content, "text/html") 47 message.send() 48 [end of ephios/user_management/mail.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ephios/event_management/mail.py b/ephios/event_management/mail.py --- a/ephios/event_management/mail.py +++ b/ephios/event_management/mail.py @@ -1,3 +1,5 @@ +from urllib.parse import urljoin + from django.core import mail from django.core.mail import EmailMultiAlternatives from django.template.loader import render_to_string @@ -20,8 +22,16 @@ subject = _("New {type}: {title}").format(type=event.type, title=event.title) text_content = _( - "A new {type} ({title}) has been added. \n You can view it here: {link}" - ).format(type=event.type, title=event.title, link=event.get_absolute_url()) + "A new {type} ({title}, {location}) has been added.\n" + "Further information: {description}\n" + "You can view the event here: {url}" + ).format( + type=event.type, + title=event.title, + location=event.location, + description=event.description, + url=urljoin(SITE_URL, event.get_absolute_url()), + ) html_content = render_to_string( "event_management/mails/new_event.html", {"event": event, "site_url": SITE_URL} ) diff --git a/ephios/user_management/mail.py b/ephios/user_management/mail.py --- a/ephios/user_management/mail.py +++ b/ephios/user_management/mail.py @@ -1,3 +1,5 @@ +from urllib.parse import urljoin + from django.contrib.auth.tokens import default_token_generator from django.core.mail import EmailMultiAlternatives from django.template.loader import render_to_string @@ -16,9 +18,9 @@ reset_link = reverse("password_reset_confirm", kwargs={"uidb64": uid, "token": token}) text_content = _( "You're receiving this email because a new account has been created for you at ephios.\n" - "Please go to the following page and choose a password: {url}{reset_link}\n" + "Please go to the following page and choose a password: {url}\n" "Your username is your email address: {email}\n" - ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email) + ).format(url=urljoin(SITE_URL, reset_link), email=userprofile.email) html_content = render_to_string( "user_management/new_account_email.html", @@ -34,9 +36,9 @@ url = reverse("user_management:profile") text_content = _( "You're receiving this email because your account at ephios has been updated.\n" - "You can see the changes in your profile: {site_url}{url}\n" + "You can see the changes in your profile: {url}\n" "Your username is your email address: {email}\n" - ).format(site_url=SITE_URL, url=url, email=userprofile.email) + ).format(url=urljoin(SITE_URL, url), email=userprofile.email) html_content = render_to_string( "user_management/account_updated_email.html",
{"golden_diff": "diff --git a/ephios/event_management/mail.py b/ephios/event_management/mail.py\n--- a/ephios/event_management/mail.py\n+++ b/ephios/event_management/mail.py\n@@ -1,3 +1,5 @@\n+from urllib.parse import urljoin\n+\n from django.core import mail\n from django.core.mail import EmailMultiAlternatives\n from django.template.loader import render_to_string\n@@ -20,8 +22,16 @@\n \n subject = _(\"New {type}: {title}\").format(type=event.type, title=event.title)\n text_content = _(\n- \"A new {type} ({title}) has been added. \\n You can view it here: {link}\"\n- ).format(type=event.type, title=event.title, link=event.get_absolute_url())\n+ \"A new {type} ({title}, {location}) has been added.\\n\"\n+ \"Further information: {description}\\n\"\n+ \"You can view the event here: {url}\"\n+ ).format(\n+ type=event.type,\n+ title=event.title,\n+ location=event.location,\n+ description=event.description,\n+ url=urljoin(SITE_URL, event.get_absolute_url()),\n+ )\n html_content = render_to_string(\n \"event_management/mails/new_event.html\", {\"event\": event, \"site_url\": SITE_URL}\n )\ndiff --git a/ephios/user_management/mail.py b/ephios/user_management/mail.py\n--- a/ephios/user_management/mail.py\n+++ b/ephios/user_management/mail.py\n@@ -1,3 +1,5 @@\n+from urllib.parse import urljoin\n+\n from django.contrib.auth.tokens import default_token_generator\n from django.core.mail import EmailMultiAlternatives\n from django.template.loader import render_to_string\n@@ -16,9 +18,9 @@\n reset_link = reverse(\"password_reset_confirm\", kwargs={\"uidb64\": uid, \"token\": token})\n text_content = _(\n \"You're receiving this email because a new account has been created for you at ephios.\\n\"\n- \"Please go to the following page and choose a password: {url}{reset_link}\\n\"\n+ \"Please go to the following page and choose a password: {url}\\n\"\n \"Your username is your email address: {email}\\n\"\n- ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email)\n+ ).format(url=urljoin(SITE_URL, reset_link), email=userprofile.email)\n \n html_content = render_to_string(\n \"user_management/new_account_email.html\",\n@@ -34,9 +36,9 @@\n url = reverse(\"user_management:profile\")\n text_content = _(\n \"You're receiving this email because your account at ephios has been updated.\\n\"\n- \"You can see the changes in your profile: {site_url}{url}\\n\"\n+ \"You can see the changes in your profile: {url}\\n\"\n \"Your username is your email address: {email}\\n\"\n- ).format(site_url=SITE_URL, url=url, email=userprofile.email)\n+ ).format(url=urljoin(SITE_URL, url), email=userprofile.email)\n \n html_content = render_to_string(\n \"user_management/account_updated_email.html\",\n", "issue": "Event creation mails do not include event description\n\n", "before_files": [{"content": "from django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext as _\nfrom guardian.shortcuts import get_users_with_perms\n\nfrom ephios.event_management.models import AbstractParticipation\nfrom ephios.extra.permissions import get_groups_with_perms\nfrom ephios.settings import SITE_URL\nfrom ephios.user_management.models import UserProfile\n\n\ndef new_event(event):\n messages = []\n users = UserProfile.objects.filter(\n groups__in=get_groups_with_perms(event, only_with_perms_in=[\"view_event\"]), is_active=True\n ).distinct()\n responsible_users = get_users_with_perms(event, only_with_perms_in=[\"change_event\"]).distinct()\n responsible_persons_mails = list(responsible_users.values_list(\"email\", flat=True))\n\n subject = _(\"New {type}: {title}\").format(type=event.type, title=event.title)\n text_content = _(\n \"A new {type} ({title}) has been added. \\n You can view it here: {link}\"\n ).format(type=event.type, title=event.title, link=event.get_absolute_url())\n html_content = render_to_string(\n \"event_management/mails/new_event.html\", {\"event\": event, \"site_url\": SITE_URL}\n )\n\n for user in users:\n message = EmailMultiAlternatives(\n to=[user.email], subject=subject, body=text_content, reply_to=responsible_persons_mails\n )\n message.attach_alternative(html_content, \"text/html\")\n messages.append(message)\n mail.get_connection().send_messages(messages)\n\n\ndef participation_state_changed(participation: AbstractParticipation):\n if participation.state != AbstractParticipation.States.USER_DECLINED:\n messages = []\n\n # send mail to the participant whose participation has been changed\n if participation.participant.email is not None:\n text_content = _(\n \"The status for your participation for {shift} has changed. It is now {status}.\"\n ).format(shift=participation.shift, status=participation.get_state_display())\n html_content = render_to_string(\"email_base.html\", {\"message_text\": text_content})\n message = EmailMultiAlternatives(\n to=[participation.participant.email],\n subject=_(\"Your participation state changed\"),\n body=text_content,\n )\n message.attach_alternative(html_content, \"text/html\")\n messages.append(message)\n\n # send mail to responsible users\n responsible_users = get_users_with_perms(\n participation.shift.event, only_with_perms_in=[\"change_event\"]\n ).distinct()\n subject = _(\"Participation was changed for your event\")\n text_content = _(\n \"The participation of {participant} for {shift} was changed. The status is now {status}\"\n ).format(\n participant=participation.participant,\n shift=participation.shift,\n status=participation.get_state_display(),\n )\n html_content = render_to_string(\"email_base.html\", {\"message_text\": text_content})\n for user in responsible_users:\n message = EmailMultiAlternatives(to=[user.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n messages.append(message)\n\n mail.get_connection().send_messages(messages)\n", "path": "ephios/event_management/mail.py"}, {"content": "from django.contrib.auth.tokens import default_token_generator\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.utils.translation import gettext as _\n\nfrom ephios.settings import SITE_URL\n\n\ndef send_account_creation_info(userprofile):\n subject = _(\"Welcome to ephios!\")\n uid = urlsafe_base64_encode(force_bytes(userprofile.id))\n token = default_token_generator.make_token(userprofile)\n reset_link = reverse(\"password_reset_confirm\", kwargs={\"uidb64\": uid, \"token\": token})\n text_content = _(\n \"You're receiving this email because a new account has been created for you at ephios.\\n\"\n \"Please go to the following page and choose a password: {url}{reset_link}\\n\"\n \"Your username is your email address: {email}\\n\"\n ).format(url=SITE_URL, reset_link=reset_link, email=userprofile.email)\n\n html_content = render_to_string(\n \"user_management/new_account_email.html\",\n {\"uid\": uid, \"token\": token, \"site_url\": SITE_URL, \"email\": userprofile.email},\n )\n message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n message.send()\n\n\ndef send_account_update_info(userprofile):\n subject = _(\"ephios account updated\")\n url = reverse(\"user_management:profile\")\n text_content = _(\n \"You're receiving this email because your account at ephios has been updated.\\n\"\n \"You can see the changes in your profile: {site_url}{url}\\n\"\n \"Your username is your email address: {email}\\n\"\n ).format(site_url=SITE_URL, url=url, email=userprofile.email)\n\n html_content = render_to_string(\n \"user_management/account_updated_email.html\",\n {\"site_url\": SITE_URL, \"url\": url, \"email\": userprofile.email},\n )\n message = EmailMultiAlternatives(to=[userprofile.email], subject=subject, body=text_content)\n message.attach_alternative(html_content, \"text/html\")\n message.send()\n", "path": "ephios/user_management/mail.py"}]}
1,970
710
gh_patches_debug_535
rasdani/github-patches
git_diff
neptune-ai__neptune-client-155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> create_experiment() fails on windows 10 Hi there, I enjoy neptune very much and on my macbook everything works fine. But when I run the same code on my Windows 10 machine, I get an error when calling create_experiment(). `Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\ProgramData\Anaconda3\envs\rl_insurance\lib\site-packages\neptune\__init__.py", line 177, in create_experiment notebook_id=notebook_id File "C:\ProgramData\Anaconda3\envs\rl_insurance\lib\site-packages\neptune\projects.py", line 400, in create_experiment click.echo(str(experiment.id)) File "C:\ProgramData\Anaconda3\envs\rl_insurance\lib\site-packages\click\utils.py", line 218, in echo file = _default_text_stdout() File "C:\ProgramData\Anaconda3\envs\rl_insurance\lib\site-packages\click\_compat.py", line 675, in func rv = wrapper_func() File "C:\ProgramData\Anaconda3\envs\rl_insurance\lib\site-packages\click\_compat.py", line 436, in get_text_stdout rv = _get_windows_console_stream(sys.stdout, encoding, errors) File "C:\ProgramData\Anaconda3\envs\rl_insurance\lib\site-packages\click\_winconsole.py", line 295, in _get_windows_console_stream func = _stream_factories.get(f.fileno()) AttributeError: 'StdOutWithUpload' object has no attribute 'fileno'` It happens when I run: `import neptune ` `import cfg` `neptune.init(api_token=cfg.neptune_token, project_qualified_name=cfg.neptune_project_name) ` `neptune.create_experiment()` I run it in conda environments both times. </issue> <code> [start of neptune/internal/streams/stdstream_uploader.py] 1 # 2 # Copyright (c) 2019, Neptune Labs Sp. z o.o. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 import sys 17 18 from neptune.internal.channels.channels import ChannelNamespace 19 from neptune.internal.streams.channel_writer import ChannelWriter 20 21 22 class StdStreamWithUpload(object): 23 24 def __init__(self, experiment, channel_name, stream): 25 # pylint:disable=protected-access 26 self._channel = experiment._get_channel(channel_name, 'text', ChannelNamespace.SYSTEM) 27 self._channel_writer = ChannelWriter(experiment, channel_name, ChannelNamespace.SYSTEM) 28 self._stream = stream 29 30 def write(self, data): 31 self._stream.write(data) 32 try: 33 self._channel_writer.write(data) 34 # pylint:disable=bare-except 35 except: 36 pass 37 38 def isatty(self): 39 return hasattr(self._stream, 'isatty') and self._stream.isatty() 40 41 def flush(self): 42 self._stream.flush() 43 44 45 class StdOutWithUpload(StdStreamWithUpload): 46 47 def __init__(self, experiment): 48 super(StdOutWithUpload, self).__init__(experiment, 'stdout', sys.__stdout__) 49 sys.stdout = self 50 51 def close(self): 52 sys.stdout = sys.__stdout__ 53 54 55 class StdErrWithUpload(StdStreamWithUpload): 56 57 def __init__(self, experiment): 58 super(StdErrWithUpload, self).__init__(experiment, 'stderr', sys.__stderr__) 59 sys.stderr = self 60 61 def close(self): 62 sys.stderr = sys.__stderr__ 63 [end of neptune/internal/streams/stdstream_uploader.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/neptune/internal/streams/stdstream_uploader.py b/neptune/internal/streams/stdstream_uploader.py --- a/neptune/internal/streams/stdstream_uploader.py +++ b/neptune/internal/streams/stdstream_uploader.py @@ -41,6 +41,9 @@ def flush(self): self._stream.flush() + def fileno(self): + return self._stream.fileno() + class StdOutWithUpload(StdStreamWithUpload):
{"golden_diff": "diff --git a/neptune/internal/streams/stdstream_uploader.py b/neptune/internal/streams/stdstream_uploader.py\n--- a/neptune/internal/streams/stdstream_uploader.py\n+++ b/neptune/internal/streams/stdstream_uploader.py\n@@ -41,6 +41,9 @@\n def flush(self):\n self._stream.flush()\n \n+ def fileno(self):\n+ return self._stream.fileno()\n+\n \n class StdOutWithUpload(StdStreamWithUpload):\n", "issue": "create_experiment() fails on windows 10\nHi there, \r\n\r\nI enjoy neptune very much and on my macbook everything works fine. But when I run the same code on my Windows 10 machine, I get an error when calling create_experiment().\r\n\r\n`Traceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\rl_insurance\\lib\\site-packages\\neptune\\__init__.py\", line 177, in create_experiment\r\n notebook_id=notebook_id\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\rl_insurance\\lib\\site-packages\\neptune\\projects.py\", line 400, in create_experiment\r\n click.echo(str(experiment.id))\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\rl_insurance\\lib\\site-packages\\click\\utils.py\", line 218, in echo\r\n file = _default_text_stdout()\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\rl_insurance\\lib\\site-packages\\click\\_compat.py\", line 675, in func\r\n rv = wrapper_func()\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\rl_insurance\\lib\\site-packages\\click\\_compat.py\", line 436, in get_text_stdout\r\n rv = _get_windows_console_stream(sys.stdout, encoding, errors)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\rl_insurance\\lib\\site-packages\\click\\_winconsole.py\", line 295, in _get_windows_console_stream\r\n func = _stream_factories.get(f.fileno())\r\nAttributeError: 'StdOutWithUpload' object has no attribute 'fileno'`\r\n\r\nIt happens when I run:\r\n\r\n`import neptune `\r\n`import cfg`\r\n`neptune.init(api_token=cfg.neptune_token, project_qualified_name=cfg.neptune_project_name) `\r\n`neptune.create_experiment()`\r\n\r\nI run it in conda environments both times.\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2019, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport sys\n\nfrom neptune.internal.channels.channels import ChannelNamespace\nfrom neptune.internal.streams.channel_writer import ChannelWriter\n\n\nclass StdStreamWithUpload(object):\n\n def __init__(self, experiment, channel_name, stream):\n # pylint:disable=protected-access\n self._channel = experiment._get_channel(channel_name, 'text', ChannelNamespace.SYSTEM)\n self._channel_writer = ChannelWriter(experiment, channel_name, ChannelNamespace.SYSTEM)\n self._stream = stream\n\n def write(self, data):\n self._stream.write(data)\n try:\n self._channel_writer.write(data)\n # pylint:disable=bare-except\n except:\n pass\n\n def isatty(self):\n return hasattr(self._stream, 'isatty') and self._stream.isatty()\n\n def flush(self):\n self._stream.flush()\n\n\nclass StdOutWithUpload(StdStreamWithUpload):\n\n def __init__(self, experiment):\n super(StdOutWithUpload, self).__init__(experiment, 'stdout', sys.__stdout__)\n sys.stdout = self\n\n def close(self):\n sys.stdout = sys.__stdout__\n\n\nclass StdErrWithUpload(StdStreamWithUpload):\n\n def __init__(self, experiment):\n super(StdErrWithUpload, self).__init__(experiment, 'stderr', sys.__stderr__)\n sys.stderr = self\n\n def close(self):\n sys.stderr = sys.__stderr__\n", "path": "neptune/internal/streams/stdstream_uploader.py"}]}
1,565
106
gh_patches_debug_32741
rasdani/github-patches
git_diff
WeblateOrg__weblate-9260
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document weblate migrate command ### Describe the problem `weblate migrate` command is mentioned in the docs, but not actually documented. It is also used inconsistently: 1. https://docs.weblate.org/en/latest/admin/languages.html#built-in-language-definitions 2. https://docs.weblate.org/en/latest/admin/install.html#filling-up-the-database ### Describe the solution you'd like document the usage and link it in mentioned occurrences. ### Describe alternatives you've considered _No response_ ### Screenshots _No response_ ### Additional context _No response_ </issue> <code> [start of docs/_ext/djangodocs.py] 1 """Sphinx plugins for Weblate documentation.""" 2 import re 3 4 from sphinx import addnodes 5 from sphinx.domains.std import Cmdoption 6 7 # RE for option descriptions without a '--' prefix 8 simple_option_desc_re = re.compile(r"([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)") 9 10 11 def setup(app): 12 app.add_crossref_type( 13 directivename="setting", rolename="setting", indextemplate="pair: %s; setting" 14 ) 15 app.add_object_type( 16 directivename="django-admin", 17 rolename="djadmin", 18 indextemplate="pair: %s; weblate admin command", 19 parse_node=parse_django_admin_node, 20 ) 21 app.add_directive("django-admin-option", Cmdoption) 22 23 24 def parse_django_admin_node(env, sig, signode): 25 command = sig.split(" ")[0] 26 env.ref_context["std:program"] = command 27 title = f"weblate {sig}" 28 signode += addnodes.desc_name(title, title) 29 return command 30 [end of docs/_ext/djangodocs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/_ext/djangodocs.py b/docs/_ext/djangodocs.py --- a/docs/_ext/djangodocs.py +++ b/docs/_ext/djangodocs.py @@ -1,6 +1,7 @@ """Sphinx plugins for Weblate documentation.""" import re +from docutils.nodes import literal from sphinx import addnodes from sphinx.domains.std import Cmdoption @@ -8,22 +9,45 @@ simple_option_desc_re = re.compile(r"([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)") +class WeblateCommandLiteral(literal): + def __init__(self, rawsource="", text="", *children, **attributes): + if not text: + text = "weblate " + super().__init__(rawsource, text, *children, **attributes) + + def setup(app): app.add_crossref_type( directivename="setting", rolename="setting", indextemplate="pair: %s; setting" ) + app.add_object_type( + directivename="weblate-admin", + rolename="wladmin", + indextemplate="pair: %s; weblate admin command", + parse_node=parse_weblate_admin_node, + ref_nodeclass=WeblateCommandLiteral, + ) + app.add_directive("weblate-admin-option", Cmdoption) app.add_object_type( directivename="django-admin", rolename="djadmin", - indextemplate="pair: %s; weblate admin command", + indextemplate="pair: %s; django-admin command", parse_node=parse_django_admin_node, ) - app.add_directive("django-admin-option", Cmdoption) -def parse_django_admin_node(env, sig, signode): +def parse_weblate_admin_node(env, sig, signode): command = sig.split(" ")[0] + # Context for options env.ref_context["std:program"] = command title = f"weblate {sig}" signode += addnodes.desc_name(title, title) return command + + +def parse_django_admin_node(env, sig, signode): + command = sig.split(" ")[0] + env.ref_context["std:program"] = command + title = "django-admin %s" % sig + signode += addnodes.desc_name(title, title) + return command
{"golden_diff": "diff --git a/docs/_ext/djangodocs.py b/docs/_ext/djangodocs.py\n--- a/docs/_ext/djangodocs.py\n+++ b/docs/_ext/djangodocs.py\n@@ -1,6 +1,7 @@\n \"\"\"Sphinx plugins for Weblate documentation.\"\"\"\n import re\n \n+from docutils.nodes import literal\n from sphinx import addnodes\n from sphinx.domains.std import Cmdoption\n \n@@ -8,22 +9,45 @@\n simple_option_desc_re = re.compile(r\"([-_a-zA-Z0-9]+)(\\s*.*?)(?=,\\s+(?:/|-|--)|$)\")\n \n \n+class WeblateCommandLiteral(literal):\n+ def __init__(self, rawsource=\"\", text=\"\", *children, **attributes):\n+ if not text:\n+ text = \"weblate \"\n+ super().__init__(rawsource, text, *children, **attributes)\n+\n+\n def setup(app):\n app.add_crossref_type(\n directivename=\"setting\", rolename=\"setting\", indextemplate=\"pair: %s; setting\"\n )\n+ app.add_object_type(\n+ directivename=\"weblate-admin\",\n+ rolename=\"wladmin\",\n+ indextemplate=\"pair: %s; weblate admin command\",\n+ parse_node=parse_weblate_admin_node,\n+ ref_nodeclass=WeblateCommandLiteral,\n+ )\n+ app.add_directive(\"weblate-admin-option\", Cmdoption)\n app.add_object_type(\n directivename=\"django-admin\",\n rolename=\"djadmin\",\n- indextemplate=\"pair: %s; weblate admin command\",\n+ indextemplate=\"pair: %s; django-admin command\",\n parse_node=parse_django_admin_node,\n )\n- app.add_directive(\"django-admin-option\", Cmdoption)\n \n \n-def parse_django_admin_node(env, sig, signode):\n+def parse_weblate_admin_node(env, sig, signode):\n command = sig.split(\" \")[0]\n+ # Context for options\n env.ref_context[\"std:program\"] = command\n title = f\"weblate {sig}\"\n signode += addnodes.desc_name(title, title)\n return command\n+\n+\n+def parse_django_admin_node(env, sig, signode):\n+ command = sig.split(\" \")[0]\n+ env.ref_context[\"std:program\"] = command\n+ title = \"django-admin %s\" % sig\n+ signode += addnodes.desc_name(title, title)\n+ return command\n", "issue": "Document weblate migrate command\n### Describe the problem\n\n`weblate migrate` command is mentioned in the docs, but not actually documented. \r\n\r\nIt is also used inconsistently:\r\n1. https://docs.weblate.org/en/latest/admin/languages.html#built-in-language-definitions\r\n2. https://docs.weblate.org/en/latest/admin/install.html#filling-up-the-database\n\n### Describe the solution you'd like\n\ndocument the usage and link it in mentioned occurrences.\n\n### Describe alternatives you've considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "\"\"\"Sphinx plugins for Weblate documentation.\"\"\"\nimport re\n\nfrom sphinx import addnodes\nfrom sphinx.domains.std import Cmdoption\n\n# RE for option descriptions without a '--' prefix\nsimple_option_desc_re = re.compile(r\"([-_a-zA-Z0-9]+)(\\s*.*?)(?=,\\s+(?:/|-|--)|$)\")\n\n\ndef setup(app):\n app.add_crossref_type(\n directivename=\"setting\", rolename=\"setting\", indextemplate=\"pair: %s; setting\"\n )\n app.add_object_type(\n directivename=\"django-admin\",\n rolename=\"djadmin\",\n indextemplate=\"pair: %s; weblate admin command\",\n parse_node=parse_django_admin_node,\n )\n app.add_directive(\"django-admin-option\", Cmdoption)\n\n\ndef parse_django_admin_node(env, sig, signode):\n command = sig.split(\" \")[0]\n env.ref_context[\"std:program\"] = command\n title = f\"weblate {sig}\"\n signode += addnodes.desc_name(title, title)\n return command\n", "path": "docs/_ext/djangodocs.py"}]}
959
560
gh_patches_debug_32632
rasdani/github-patches
git_diff
docker__docker-py-727
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> split_port() does not properly handle ":80" or "127.0.0.1:" properly Initially reported as https://github.com/docker/compose/issues/1887 Example: ``` python def test_port_only_with_colon(self): self.assertRaises(ValueError, lambda: split_port(":80")) def test_host_only_with_colon(self): self.assertRaises(ValueError, lambda: split_port("localhost:")) ``` Results: ``` ====================================================================== ERROR: test_host_only_with_colon (__main__.UtilsTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/utils_test.py", line 428, in test_host_only_with_colon lambda: split_port("localhost:")) File "/usr/lib/python2.7/unittest/case.py", line 473, in assertRaises callableObj(*args, **kwargs) File "tests/utils_test.py", line 428, in <lambda> lambda: split_port("localhost:")) File "/home/mark/Projects/docker-py/docker/utils/ports/ports.py", line 77, in split_port if len(internal_range) != len(external_range): TypeError: object of type 'NoneType' has no len() ====================================================================== ERROR: test_port_only_with_colon (__main__.UtilsTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/utils_test.py", line 424, in test_port_only_with_colon lambda: split_port(":80")) File "/usr/lib/python2.7/unittest/case.py", line 473, in assertRaises callableObj(*args, **kwargs) File "tests/utils_test.py", line 424, in <lambda> lambda: split_port(":80")) File "/home/mark/Projects/docker-py/docker/utils/ports/ports.py", line 77, in split_port if len(internal_range) != len(external_range): TypeError: object of type 'NoneType' has no len() ``` </issue> <code> [start of docker/utils/ports/ports.py] 1 2 3 def add_port_mapping(port_bindings, internal_port, external): 4 if internal_port in port_bindings: 5 port_bindings[internal_port].append(external) 6 else: 7 port_bindings[internal_port] = [external] 8 9 10 def add_port(port_bindings, internal_port_range, external_range): 11 if external_range is None: 12 for internal_port in internal_port_range: 13 add_port_mapping(port_bindings, internal_port, None) 14 else: 15 ports = zip(internal_port_range, external_range) 16 for internal_port, external_port in ports: 17 add_port_mapping(port_bindings, internal_port, external_port) 18 19 20 def build_port_bindings(ports): 21 port_bindings = {} 22 for port in ports: 23 internal_port_range, external_range = split_port(port) 24 add_port(port_bindings, internal_port_range, external_range) 25 return port_bindings 26 27 28 def to_port_range(port): 29 if not port: 30 return None 31 32 protocol = "" 33 if "/" in port: 34 parts = port.split("/") 35 if len(parts) != 2: 36 raise ValueError('Invalid port "%s", should be ' 37 '[[remote_ip:]remote_port[-remote_port]:]' 38 'port[/protocol]' % port) 39 port, protocol = parts 40 protocol = "/" + protocol 41 42 parts = str(port).split('-') 43 44 if len(parts) == 1: 45 return ["%s%s" % (port, protocol)] 46 47 if len(parts) == 2: 48 full_port_range = range(int(parts[0]), int(parts[1]) + 1) 49 return ["%s%s" % (p, protocol) for p in full_port_range] 50 51 raise ValueError('Invalid port range "%s", should be ' 52 'port or startport-endport' % port) 53 54 55 def split_port(port): 56 parts = str(port).split(':') 57 if not 1 <= len(parts) <= 3: 58 raise ValueError('Invalid port "%s", should be ' 59 '[[remote_ip:]remote_port:]port[/protocol]' % port) 60 61 if len(parts) == 1: 62 internal_port, = parts 63 return to_port_range(internal_port), None 64 if len(parts) == 2: 65 external_port, internal_port = parts 66 67 internal_range = to_port_range(internal_port) 68 external_range = to_port_range(external_port) 69 if len(internal_range) != len(external_range): 70 raise ValueError('Port ranges don\'t match in length') 71 72 return internal_range, external_range 73 74 external_ip, external_port, internal_port = parts 75 internal_range = to_port_range(internal_port) 76 external_range = to_port_range(external_port) 77 if not external_range: 78 external_range = [None] * len(internal_range) 79 80 if len(internal_range) != len(external_range): 81 raise ValueError('Port ranges don\'t match in length') 82 83 return internal_range, [(external_ip, ex_port or None) 84 for ex_port in external_range] 85 [end of docker/utils/ports/ports.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/utils/ports/ports.py b/docker/utils/ports/ports.py --- a/docker/utils/ports/ports.py +++ b/docker/utils/ports/ports.py @@ -1,5 +1,4 @@ - def add_port_mapping(port_bindings, internal_port, external): if internal_port in port_bindings: port_bindings[internal_port].append(external) @@ -33,9 +32,8 @@ if "/" in port: parts = port.split("/") if len(parts) != 2: - raise ValueError('Invalid port "%s", should be ' - '[[remote_ip:]remote_port[-remote_port]:]' - 'port[/protocol]' % port) + _raise_invalid_port(port) + port, protocol = parts protocol = "/" + protocol @@ -52,11 +50,17 @@ 'port or startport-endport' % port) +def _raise_invalid_port(port): + raise ValueError('Invalid port "%s", should be ' + '[[remote_ip:]remote_port[-remote_port]:]' + 'port[/protocol]' % port) + + def split_port(port): parts = str(port).split(':') + if not 1 <= len(parts) <= 3: - raise ValueError('Invalid port "%s", should be ' - '[[remote_ip:]remote_port:]port[/protocol]' % port) + _raise_invalid_port(port) if len(parts) == 1: internal_port, = parts @@ -66,6 +70,10 @@ internal_range = to_port_range(internal_port) external_range = to_port_range(external_port) + + if internal_range is None or external_range is None: + _raise_invalid_port(port) + if len(internal_range) != len(external_range): raise ValueError('Port ranges don\'t match in length')
{"golden_diff": "diff --git a/docker/utils/ports/ports.py b/docker/utils/ports/ports.py\n--- a/docker/utils/ports/ports.py\n+++ b/docker/utils/ports/ports.py\n@@ -1,5 +1,4 @@\n \n-\n def add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n@@ -33,9 +32,8 @@\n if \"/\" in port:\n parts = port.split(\"/\")\n if len(parts) != 2:\n- raise ValueError('Invalid port \"%s\", should be '\n- '[[remote_ip:]remote_port[-remote_port]:]'\n- 'port[/protocol]' % port)\n+ _raise_invalid_port(port)\n+\n port, protocol = parts\n protocol = \"/\" + protocol\n \n@@ -52,11 +50,17 @@\n 'port or startport-endport' % port)\n \n \n+def _raise_invalid_port(port):\n+ raise ValueError('Invalid port \"%s\", should be '\n+ '[[remote_ip:]remote_port[-remote_port]:]'\n+ 'port[/protocol]' % port)\n+\n+\n def split_port(port):\n parts = str(port).split(':')\n+\n if not 1 <= len(parts) <= 3:\n- raise ValueError('Invalid port \"%s\", should be '\n- '[[remote_ip:]remote_port:]port[/protocol]' % port)\n+ _raise_invalid_port(port)\n \n if len(parts) == 1:\n internal_port, = parts\n@@ -66,6 +70,10 @@\n \n internal_range = to_port_range(internal_port)\n external_range = to_port_range(external_port)\n+\n+ if internal_range is None or external_range is None:\n+ _raise_invalid_port(port)\n+\n if len(internal_range) != len(external_range):\n raise ValueError('Port ranges don\\'t match in length')\n", "issue": "split_port() does not properly handle \":80\" or \"127.0.0.1:\" properly\nInitially reported as https://github.com/docker/compose/issues/1887 \n\nExample:\n\n``` python\n def test_port_only_with_colon(self):\n self.assertRaises(ValueError,\n lambda: split_port(\":80\"))\n\n def test_host_only_with_colon(self):\n self.assertRaises(ValueError,\n lambda: split_port(\"localhost:\"))\n```\n\nResults:\n\n```\n======================================================================\nERROR: test_host_only_with_colon (__main__.UtilsTest)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"tests/utils_test.py\", line 428, in test_host_only_with_colon\n lambda: split_port(\"localhost:\"))\n File \"/usr/lib/python2.7/unittest/case.py\", line 473, in assertRaises\n callableObj(*args, **kwargs)\n File \"tests/utils_test.py\", line 428, in <lambda>\n lambda: split_port(\"localhost:\"))\n File \"/home/mark/Projects/docker-py/docker/utils/ports/ports.py\", line 77, in split_port\n if len(internal_range) != len(external_range):\nTypeError: object of type 'NoneType' has no len()\n\n======================================================================\nERROR: test_port_only_with_colon (__main__.UtilsTest)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"tests/utils_test.py\", line 424, in test_port_only_with_colon\n lambda: split_port(\":80\"))\n File \"/usr/lib/python2.7/unittest/case.py\", line 473, in assertRaises\n callableObj(*args, **kwargs)\n File \"tests/utils_test.py\", line 424, in <lambda>\n lambda: split_port(\":80\"))\n File \"/home/mark/Projects/docker-py/docker/utils/ports/ports.py\", line 77, in split_port\n if len(internal_range) != len(external_range):\nTypeError: object of type 'NoneType' has no len()\n```\n\n", "before_files": [{"content": "\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef to_port_range(port):\n if not port:\n return None\n\n protocol = \"\"\n if \"/\" in port:\n parts = port.split(\"/\")\n if len(parts) != 2:\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n port, protocol = parts\n protocol = \"/\" + protocol\n\n parts = str(port).split('-')\n\n if len(parts) == 1:\n return [\"%s%s\" % (port, protocol)]\n\n if len(parts) == 2:\n full_port_range = range(int(parts[0]), int(parts[1]) + 1)\n return [\"%s%s\" % (p, protocol) for p in full_port_range]\n\n raise ValueError('Invalid port range \"%s\", should be '\n 'port or startport-endport' % port)\n\n\ndef split_port(port):\n parts = str(port).split(':')\n if not 1 <= len(parts) <= 3:\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port:]port[/protocol]' % port)\n\n if len(parts) == 1:\n internal_port, = parts\n return to_port_range(internal_port), None\n if len(parts) == 2:\n external_port, internal_port = parts\n\n internal_range = to_port_range(internal_port)\n external_range = to_port_range(external_port)\n if len(internal_range) != len(external_range):\n raise ValueError('Port ranges don\\'t match in length')\n\n return internal_range, external_range\n\n external_ip, external_port, internal_port = parts\n internal_range = to_port_range(internal_port)\n external_range = to_port_range(external_port)\n if not external_range:\n external_range = [None] * len(internal_range)\n\n if len(internal_range) != len(external_range):\n raise ValueError('Port ranges don\\'t match in length')\n\n return internal_range, [(external_ip, ex_port or None)\n for ex_port in external_range]\n", "path": "docker/utils/ports/ports.py"}]}
1,778
417
gh_patches_debug_35677
rasdani/github-patches
git_diff
medtagger__MedTagger-519
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Log in user after registration ## Current Behavior User needs to log in after first registration. ## Expected Behavior User should be logged into MedTagger right after filling registration form. ## Steps to Reproduce the Problem 1. Register new user. 2. You will be redirected to the login page. 3. Type your login once again... </issue> <code> [start of backend/medtagger/api/auth/business.py] 1 """Module responsible for business logic in all Auth endpoint.""" 2 from medtagger.api import InvalidArgumentsException 3 from medtagger.api.security import hash_password, verify_user_password, generate_auth_token 4 from medtagger.database.models import User 5 from medtagger.repositories import roles as RolesRepository, users as UsersRepository 6 7 8 def create_user(email: str, password: str, first_name: str, last_name: str) -> int: 9 """Create user with the given user information. Password is being hashed. 10 11 :param email: user email in string format 12 :param password: user password in string format 13 :param first_name: user first name in string format 14 :param last_name: user last name in string format 15 16 :return: id of the new user 17 """ 18 user = UsersRepository.get_user_by_email(email) 19 if user: 20 raise InvalidArgumentsException('User with this email already exists') 21 password_hash = hash_password(password) 22 new_user = User(email, password_hash, first_name, last_name) 23 role = RolesRepository.get_role_with_name('volunteer') 24 if not role: 25 raise InvalidArgumentsException('Role does not exist.') 26 new_user.roles.append(role) 27 return UsersRepository.add_new_user(new_user) 28 29 30 def sign_in_user(email: str, password: str) -> str: 31 """Sign in user using given username and password. 32 33 :param email: user email in string format 34 :param password: user password in string format 35 36 :return: authentication token 37 """ 38 user = UsersRepository.get_user_by_email(email) 39 if not user: 40 raise InvalidArgumentsException('User does not exist.') 41 if not verify_user_password(user, password): 42 raise InvalidArgumentsException('Password does not match.') 43 return generate_auth_token(user) 44 [end of backend/medtagger/api/auth/business.py] [start of backend/medtagger/api/auth/service.py] 1 """Module responsible for definition of Auth service.""" 2 from typing import Any 3 4 from flask import request 5 from flask_restplus import Resource 6 7 from medtagger.api import api 8 from medtagger.api.auth.business import create_user, sign_in_user 9 from medtagger.api.auth import serializers 10 11 auth_ns = api.namespace('auth', 'Auth methods') 12 13 14 @auth_ns.route('/register') 15 class Register(Resource): 16 """Register user endpoint.""" 17 18 @staticmethod 19 @api.expect(serializers.new_user) 20 @api.doc(responses={201: 'User created', 400: 'Invalid arguments'}) 21 def post() -> Any: 22 """Register the user.""" 23 user = request.json 24 user_id = create_user(user['email'], user['password'], user['firstName'], user['lastName']) 25 return {'id': user_id}, 201 26 27 28 @auth_ns.route('/sign-in') 29 class SignIn(Resource): 30 """Sign in endpoint.""" 31 32 @staticmethod 33 @api.expect(serializers.sign_in) 34 @api.doc(responses={200: 'Signed in', 400: 'User does not exist or wrong password was provided'}) 35 def post() -> Any: 36 """Sign in the user.""" 37 sign_in = request.json 38 token = sign_in_user(sign_in['email'], sign_in['password']) 39 return {"token": token}, 200 40 [end of backend/medtagger/api/auth/service.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py --- a/backend/medtagger/api/auth/business.py +++ b/backend/medtagger/api/auth/business.py @@ -1,11 +1,12 @@ """Module responsible for business logic in all Auth endpoint.""" +from typing import Tuple from medtagger.api import InvalidArgumentsException from medtagger.api.security import hash_password, verify_user_password, generate_auth_token from medtagger.database.models import User from medtagger.repositories import roles as RolesRepository, users as UsersRepository -def create_user(email: str, password: str, first_name: str, last_name: str) -> int: +def create_user(email: str, password: str, first_name: str, last_name: str) -> Tuple[int, str]: """Create user with the given user information. Password is being hashed. :param email: user email in string format @@ -13,7 +14,7 @@ :param first_name: user first name in string format :param last_name: user last name in string format - :return: id of the new user + :return: tuple with user id and authentication token """ user = UsersRepository.get_user_by_email(email) if user: @@ -24,7 +25,9 @@ if not role: raise InvalidArgumentsException('Role does not exist.') new_user.roles.append(role) - return UsersRepository.add_new_user(new_user) + user_id = UsersRepository.add_new_user(new_user) + user_token = generate_auth_token(new_user) + return user_id, user_token def sign_in_user(email: str, password: str) -> str: diff --git a/backend/medtagger/api/auth/service.py b/backend/medtagger/api/auth/service.py --- a/backend/medtagger/api/auth/service.py +++ b/backend/medtagger/api/auth/service.py @@ -21,8 +21,8 @@ def post() -> Any: """Register the user.""" user = request.json - user_id = create_user(user['email'], user['password'], user['firstName'], user['lastName']) - return {'id': user_id}, 201 + user_id, user_token = create_user(user['email'], user['password'], user['firstName'], user['lastName']) + return {'id': user_id, 'token': user_token}, 201 @auth_ns.route('/sign-in')
{"golden_diff": "diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py\n--- a/backend/medtagger/api/auth/business.py\n+++ b/backend/medtagger/api/auth/business.py\n@@ -1,11 +1,12 @@\n \"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\n+from typing import Tuple\n from medtagger.api import InvalidArgumentsException\n from medtagger.api.security import hash_password, verify_user_password, generate_auth_token\n from medtagger.database.models import User\n from medtagger.repositories import roles as RolesRepository, users as UsersRepository\n \n \n-def create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n+def create_user(email: str, password: str, first_name: str, last_name: str) -> Tuple[int, str]:\n \"\"\"Create user with the given user information. Password is being hashed.\n \n :param email: user email in string format\n@@ -13,7 +14,7 @@\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n \n- :return: id of the new user\n+ :return: tuple with user id and authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n@@ -24,7 +25,9 @@\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n- return UsersRepository.add_new_user(new_user)\n+ user_id = UsersRepository.add_new_user(new_user)\n+ user_token = generate_auth_token(new_user)\n+ return user_id, user_token\n \n \n def sign_in_user(email: str, password: str) -> str:\ndiff --git a/backend/medtagger/api/auth/service.py b/backend/medtagger/api/auth/service.py\n--- a/backend/medtagger/api/auth/service.py\n+++ b/backend/medtagger/api/auth/service.py\n@@ -21,8 +21,8 @@\n def post() -> Any:\n \"\"\"Register the user.\"\"\"\n user = request.json\n- user_id = create_user(user['email'], user['password'], user['firstName'], user['lastName'])\n- return {'id': user_id}, 201\n+ user_id, user_token = create_user(user['email'], user['password'], user['firstName'], user['lastName'])\n+ return {'id': user_id, 'token': user_token}, 201\n \n \n @auth_ns.route('/sign-in')\n", "issue": "Log in user after registration\n## Current Behavior\r\n\r\nUser needs to log in after first registration.\r\n\r\n## Expected Behavior\r\n\r\nUser should be logged into MedTagger right after filling registration form.\r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Register new user.\r\n 2. You will be redirected to the login page.\r\n 3. Type your login once again...\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.api.security import hash_password, verify_user_password, generate_auth_token\nfrom medtagger.database.models import User\nfrom medtagger.repositories import roles as RolesRepository, users as UsersRepository\n\n\ndef create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n \"\"\"Create user with the given user information. Password is being hashed.\n\n :param email: user email in string format\n :param password: user password in string format\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n\n :return: id of the new user\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n raise InvalidArgumentsException('User with this email already exists')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n return UsersRepository.add_new_user(new_user)\n\n\ndef sign_in_user(email: str, password: str) -> str:\n \"\"\"Sign in user using given username and password.\n\n :param email: user email in string format\n :param password: user password in string format\n\n :return: authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if not user:\n raise InvalidArgumentsException('User does not exist.')\n if not verify_user_password(user, password):\n raise InvalidArgumentsException('Password does not match.')\n return generate_auth_token(user)\n", "path": "backend/medtagger/api/auth/business.py"}, {"content": "\"\"\"Module responsible for definition of Auth service.\"\"\"\nfrom typing import Any\n\nfrom flask import request\nfrom flask_restplus import Resource\n\nfrom medtagger.api import api\nfrom medtagger.api.auth.business import create_user, sign_in_user\nfrom medtagger.api.auth import serializers\n\nauth_ns = api.namespace('auth', 'Auth methods')\n\n\n@auth_ns.route('/register')\nclass Register(Resource):\n \"\"\"Register user endpoint.\"\"\"\n\n @staticmethod\n @api.expect(serializers.new_user)\n @api.doc(responses={201: 'User created', 400: 'Invalid arguments'})\n def post() -> Any:\n \"\"\"Register the user.\"\"\"\n user = request.json\n user_id = create_user(user['email'], user['password'], user['firstName'], user['lastName'])\n return {'id': user_id}, 201\n\n\n@auth_ns.route('/sign-in')\nclass SignIn(Resource):\n \"\"\"Sign in endpoint.\"\"\"\n\n @staticmethod\n @api.expect(serializers.sign_in)\n @api.doc(responses={200: 'Signed in', 400: 'User does not exist or wrong password was provided'})\n def post() -> Any:\n \"\"\"Sign in the user.\"\"\"\n sign_in = request.json\n token = sign_in_user(sign_in['email'], sign_in['password'])\n return {\"token\": token}, 200\n", "path": "backend/medtagger/api/auth/service.py"}]}
1,474
556
gh_patches_debug_23773
rasdani/github-patches
git_diff
mirumee__ariadne-481
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unexpected Snake Case for Acronyms The snake case conversion of the `snake_case_fallback_resolvers` yields unexpected results for words with multiple uppercase letters in a row, e.g. - `getHTTPResponse` is converted to `get_h_t_t_p_response`, or - `externalID` is converted to "external_i_d`. These are unlikely names for python attributes and I would expect the resolver to look for `get_http_response` / `external_id` instead. Possible implementations for the camel to snake case conversions are discussed here: https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case Unexpected Snake Case for Acronyms The snake case conversion of the `snake_case_fallback_resolvers` yields unexpected results for words with multiple uppercase letters in a row, e.g. - `getHTTPResponse` is converted to `get_h_t_t_p_response`, or - `externalID` is converted to "external_i_d`. These are unlikely names for python attributes and I would expect the resolver to look for `get_http_response` / `external_id` instead. Possible implementations for the camel to snake case conversions are discussed here: https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case </issue> <code> [start of ariadne/utils.py] 1 import asyncio 2 from functools import wraps 3 from typing import Optional, Union, Callable, Dict, Any 4 5 from graphql import GraphQLError, parse 6 7 8 def convert_camel_case_to_snake(graphql_name: str) -> str: 9 python_name = "" 10 for i, c in enumerate(graphql_name.lower()): 11 if ( 12 i > 0 13 and ( 14 all( 15 ( 16 c != graphql_name[i], 17 graphql_name[i - 1] != "_", 18 graphql_name[i - 1] == python_name[-1], 19 ) 20 ) 21 ) 22 or all((c.isdigit(), graphql_name[i - 1].isdigit() is False)) 23 ): 24 python_name += "_" 25 python_name += c 26 return python_name 27 28 29 def gql(value: str) -> str: 30 parse(value) 31 return value 32 33 34 def unwrap_graphql_error( 35 error: Union[GraphQLError, Optional[Exception]] 36 ) -> Optional[Exception]: 37 if isinstance(error, GraphQLError): 38 return unwrap_graphql_error(error.original_error) 39 return error 40 41 42 def convert_kwargs_to_snake_case(func: Callable) -> Callable: 43 def convert_to_snake_case(d: Dict) -> Dict: 44 converted: Dict = {} 45 for k, v in d.items(): 46 if isinstance(v, dict): 47 v = convert_to_snake_case(v) 48 if isinstance(v, list): 49 v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v] 50 converted[convert_camel_case_to_snake(k)] = v 51 return converted 52 53 if asyncio.iscoroutinefunction(func): 54 55 @wraps(func) 56 async def async_wrapper(*args: Any, **kwargs: Any) -> Any: 57 return await func(*args, **convert_to_snake_case(kwargs)) 58 59 return async_wrapper 60 61 @wraps(func) 62 def wrapper(*args: Any, **kwargs: Any) -> Any: 63 return func(*args, **convert_to_snake_case(kwargs)) 64 65 return wrapper 66 [end of ariadne/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ariadne/utils.py b/ariadne/utils.py --- a/ariadne/utils.py +++ b/ariadne/utils.py @@ -6,20 +6,29 @@ def convert_camel_case_to_snake(graphql_name: str) -> str: + # pylint: disable=too-many-boolean-expressions + max_index = len(graphql_name) - 1 + lowered_name = graphql_name.lower() + python_name = "" - for i, c in enumerate(graphql_name.lower()): - if ( - i > 0 - and ( - all( - ( - c != graphql_name[i], - graphql_name[i - 1] != "_", - graphql_name[i - 1] == python_name[-1], - ) - ) + for i, c in enumerate(lowered_name): + if i > 0 and ( + # testWord -> test_word + ( + c != graphql_name[i] + and graphql_name[i - 1] != "_" + and graphql_name[i - 1] == python_name[-1] + ) + # TESTWord -> test_word + or ( + i < max_index + and graphql_name[i] != lowered_name[i] + and graphql_name[i + 1] == lowered_name[i + 1] ) - or all((c.isdigit(), graphql_name[i - 1].isdigit() is False)) + # test134 -> test_134 + or (c.isdigit() and not graphql_name[i - 1].isdigit()) + # 134test -> 134_test + or (not c.isdigit() and graphql_name[i - 1].isdigit()) ): python_name += "_" python_name += c
{"golden_diff": "diff --git a/ariadne/utils.py b/ariadne/utils.py\n--- a/ariadne/utils.py\n+++ b/ariadne/utils.py\n@@ -6,20 +6,29 @@\n \n \n def convert_camel_case_to_snake(graphql_name: str) -> str:\n+ # pylint: disable=too-many-boolean-expressions\n+ max_index = len(graphql_name) - 1\n+ lowered_name = graphql_name.lower()\n+\n python_name = \"\"\n- for i, c in enumerate(graphql_name.lower()):\n- if (\n- i > 0\n- and (\n- all(\n- (\n- c != graphql_name[i],\n- graphql_name[i - 1] != \"_\",\n- graphql_name[i - 1] == python_name[-1],\n- )\n- )\n+ for i, c in enumerate(lowered_name):\n+ if i > 0 and (\n+ # testWord -> test_word\n+ (\n+ c != graphql_name[i]\n+ and graphql_name[i - 1] != \"_\"\n+ and graphql_name[i - 1] == python_name[-1]\n+ )\n+ # TESTWord -> test_word\n+ or (\n+ i < max_index\n+ and graphql_name[i] != lowered_name[i]\n+ and graphql_name[i + 1] == lowered_name[i + 1]\n )\n- or all((c.isdigit(), graphql_name[i - 1].isdigit() is False))\n+ # test134 -> test_134\n+ or (c.isdigit() and not graphql_name[i - 1].isdigit())\n+ # 134test -> 134_test\n+ or (not c.isdigit() and graphql_name[i - 1].isdigit())\n ):\n python_name += \"_\"\n python_name += c\n", "issue": "Unexpected Snake Case for Acronyms\nThe snake case conversion of the `snake_case_fallback_resolvers` yields unexpected results for words with multiple uppercase letters in a row, e.g.\r\n - `getHTTPResponse` is converted to `get_h_t_t_p_response`, or\r\n - `externalID` is converted to \"external_i_d`. \r\n\r\nThese are unlikely names for python attributes and I would expect the resolver to look for `get_http_response` / `external_id` instead.\r\n\r\nPossible implementations for the camel to snake case conversions are discussed here: https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case\nUnexpected Snake Case for Acronyms\nThe snake case conversion of the `snake_case_fallback_resolvers` yields unexpected results for words with multiple uppercase letters in a row, e.g.\r\n - `getHTTPResponse` is converted to `get_h_t_t_p_response`, or\r\n - `externalID` is converted to \"external_i_d`. \r\n\r\nThese are unlikely names for python attributes and I would expect the resolver to look for `get_http_response` / `external_id` instead.\r\n\r\nPossible implementations for the camel to snake case conversions are discussed here: https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case\n", "before_files": [{"content": "import asyncio\nfrom functools import wraps\nfrom typing import Optional, Union, Callable, Dict, Any\n\nfrom graphql import GraphQLError, parse\n\n\ndef convert_camel_case_to_snake(graphql_name: str) -> str:\n python_name = \"\"\n for i, c in enumerate(graphql_name.lower()):\n if (\n i > 0\n and (\n all(\n (\n c != graphql_name[i],\n graphql_name[i - 1] != \"_\",\n graphql_name[i - 1] == python_name[-1],\n )\n )\n )\n or all((c.isdigit(), graphql_name[i - 1].isdigit() is False))\n ):\n python_name += \"_\"\n python_name += c\n return python_name\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n\n\ndef unwrap_graphql_error(\n error: Union[GraphQLError, Optional[Exception]]\n) -> Optional[Exception]:\n if isinstance(error, GraphQLError):\n return unwrap_graphql_error(error.original_error)\n return error\n\n\ndef convert_kwargs_to_snake_case(func: Callable) -> Callable:\n def convert_to_snake_case(d: Dict) -> Dict:\n converted: Dict = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = convert_to_snake_case(v)\n if isinstance(v, list):\n v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]\n converted[convert_camel_case_to_snake(k)] = v\n return converted\n\n if asyncio.iscoroutinefunction(func):\n\n @wraps(func)\n async def async_wrapper(*args: Any, **kwargs: Any) -> Any:\n return await func(*args, **convert_to_snake_case(kwargs))\n\n return async_wrapper\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n return func(*args, **convert_to_snake_case(kwargs))\n\n return wrapper\n", "path": "ariadne/utils.py"}]}
1,378
410
gh_patches_debug_18920
rasdani/github-patches
git_diff
google__turbinia-1098
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set a default file size limit for PlasoTask hashers Currently, all PlasoTask instances will attempt to hash files of any size, potentially very large ones .This could lead to unusually long processing times. This is a small part of a larger effort to try to optimize how Turbinia configures Plaso tasks to better utilize inherent parallel processing capabilities. </issue> <code> [start of turbinia/workers/plaso.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2015 Google Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Task for running Plaso.""" 16 17 from __future__ import unicode_literals 18 19 import os 20 import logging 21 22 from turbinia import config 23 from turbinia.evidence import EvidenceState as state 24 from turbinia.evidence import PlasoFile 25 from turbinia.workers import TurbiniaTask 26 from turbinia.lib import file_helpers 27 28 29 class PlasoTask(TurbiniaTask): 30 """Task to run Plaso (log2timeline).""" 31 32 # Plaso requires the Disk to be attached, but doesn't require it be mounted. 33 REQUIRED_STATES = [ 34 state.ATTACHED, state.DECOMPRESSED, state.CONTAINER_MOUNTED 35 ] 36 37 TASK_CONFIG = { 38 # 'none' as indicated in the options for status_view within 39 # the Plaso documentation 40 'status_view': 'none', 41 'hashers': 'all', 42 'partitions': 'all', 43 'vss_stores': 'none', 44 # artifact_filters and file_filter are mutually exclusive 45 # parameters and Plaso will error out if both parameters are used. 46 'artifact_filters': None, 47 'file_filter': None, 48 'custom_artifact_definitions': None, 49 'parsers': None, 50 'yara_rules': None 51 } 52 53 def build_plaso_command(self, base_command, conf): 54 """Builds a typical plaso command, contains logic specific to log2timeline. 55 56 Args: 57 base_command (str): Command to invoke log2timeline (e.g. log2timeline.py) 58 conf (dict): Dynamic config containing the parameters for the command. 59 60 Returns: 61 String for valid Log2timeline command. 62 """ 63 self.result.log( 64 'Generating Plaso command line from arguments: {0!s}'.format(conf), 65 level=logging.DEBUG) 66 cmd = [base_command] 67 for k, v in conf.items(): 68 cli_args = [ 69 'status_view', 'hashers', 'partitions', 'vss_stores', 70 'custom_artifact_definitions', 'parsers', 'artifact_filters', 71 'file_filter', 'yara_rules' 72 ] 73 if (k not in cli_args or not v): 74 continue 75 prepend = '-' 76 if len(k) > 1: 77 prepend = '--' 78 if k == 'file_filter': 79 file_path = file_helpers.write_list_to_temp_file( 80 v, preferred_dir=self.tmp_dir) 81 cmd.extend(['-f', file_path]) 82 elif k == 'yara_rules': 83 file_path = file_helpers.write_str_to_temp_file( 84 v, preferred_dir=self.tmp_dir) 85 cmd.extend(['--yara_rules', file_path]) 86 elif isinstance(v, list): 87 cmd.extend([prepend + k, ','.join(v)]) 88 elif isinstance(v, bool): 89 cmd.append(prepend + k) 90 elif isinstance(v, str): 91 cmd.extend([prepend + k, v]) 92 return cmd 93 94 def run(self, evidence, result): 95 """Task that process data with Plaso. 96 97 Args: 98 evidence (Evidence object): The evidence we will process. 99 result (TurbiniaTaskResult): The object to place task results into. 100 101 Returns: 102 TurbiniaTaskResult object. 103 """ 104 105 config.LoadConfig() 106 107 # Write plaso file into tmp_dir because sqlite has issues with some shared 108 # filesystems (e.g NFS). 109 plaso_file = os.path.join(self.tmp_dir, '{0:s}.plaso'.format(self.id)) 110 plaso_evidence = PlasoFile(source_path=plaso_file) 111 plaso_log = os.path.join(self.output_dir, '{0:s}.log'.format(self.id)) 112 113 cmd = self.build_plaso_command('log2timeline.py', self.task_config) 114 115 if config.DEBUG_TASKS or self.task_config.get('debug_tasks'): 116 cmd.append('-d') 117 118 if evidence.credentials: 119 for credential_type, credential_data in evidence.credentials: 120 cmd.extend([ 121 '--credential', '{0:s}:{1:s}'.format( 122 credential_type, credential_data) 123 ]) 124 125 cmd.extend(['--temporary_directory', self.tmp_dir]) 126 cmd.extend(['--logfile', plaso_log]) 127 cmd.extend(['--unattended']) 128 cmd.extend(['--storage_file', plaso_file]) 129 cmd.extend([evidence.local_path]) 130 131 result.log('Running plaso as [{0:s}]'.format(' '.join(cmd))) 132 self.execute( 133 cmd, result, log_files=[plaso_log], new_evidence=[plaso_evidence], 134 close=True) 135 136 return result 137 [end of turbinia/workers/plaso.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/turbinia/workers/plaso.py b/turbinia/workers/plaso.py --- a/turbinia/workers/plaso.py +++ b/turbinia/workers/plaso.py @@ -39,6 +39,7 @@ # the Plaso documentation 'status_view': 'none', 'hashers': 'all', + 'hasher_file_size_limit': '1073741824', 'partitions': 'all', 'vss_stores': 'none', # artifact_filters and file_filter are mutually exclusive @@ -66,9 +67,9 @@ cmd = [base_command] for k, v in conf.items(): cli_args = [ - 'status_view', 'hashers', 'partitions', 'vss_stores', - 'custom_artifact_definitions', 'parsers', 'artifact_filters', - 'file_filter', 'yara_rules' + 'status_view', 'hashers', 'hasher_file_size_limit', 'partitions', + 'vss_stores', 'custom_artifact_definitions', 'parsers', + 'artifact_filters', 'file_filter', 'yara_rules' ] if (k not in cli_args or not v): continue
{"golden_diff": "diff --git a/turbinia/workers/plaso.py b/turbinia/workers/plaso.py\n--- a/turbinia/workers/plaso.py\n+++ b/turbinia/workers/plaso.py\n@@ -39,6 +39,7 @@\n # the Plaso documentation\n 'status_view': 'none',\n 'hashers': 'all',\n+ 'hasher_file_size_limit': '1073741824',\n 'partitions': 'all',\n 'vss_stores': 'none',\n # artifact_filters and file_filter are mutually exclusive\n@@ -66,9 +67,9 @@\n cmd = [base_command]\n for k, v in conf.items():\n cli_args = [\n- 'status_view', 'hashers', 'partitions', 'vss_stores',\n- 'custom_artifact_definitions', 'parsers', 'artifact_filters',\n- 'file_filter', 'yara_rules'\n+ 'status_view', 'hashers', 'hasher_file_size_limit', 'partitions',\n+ 'vss_stores', 'custom_artifact_definitions', 'parsers',\n+ 'artifact_filters', 'file_filter', 'yara_rules'\n ]\n if (k not in cli_args or not v):\n continue\n", "issue": "Set a default file size limit for PlasoTask hashers\nCurrently, all PlasoTask instances will attempt to hash files of any size, potentially very large ones .This could lead to unusually long processing times.\r\n\r\nThis is a small part of a larger effort to try to optimize how Turbinia configures Plaso tasks to better utilize inherent parallel processing capabilities.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for running Plaso.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport logging\n\nfrom turbinia import config\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import PlasoFile\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.lib import file_helpers\n\n\nclass PlasoTask(TurbiniaTask):\n \"\"\"Task to run Plaso (log2timeline).\"\"\"\n\n # Plaso requires the Disk to be attached, but doesn't require it be mounted.\n REQUIRED_STATES = [\n state.ATTACHED, state.DECOMPRESSED, state.CONTAINER_MOUNTED\n ]\n\n TASK_CONFIG = {\n # 'none' as indicated in the options for status_view within\n # the Plaso documentation\n 'status_view': 'none',\n 'hashers': 'all',\n 'partitions': 'all',\n 'vss_stores': 'none',\n # artifact_filters and file_filter are mutually exclusive\n # parameters and Plaso will error out if both parameters are used.\n 'artifact_filters': None,\n 'file_filter': None,\n 'custom_artifact_definitions': None,\n 'parsers': None,\n 'yara_rules': None\n }\n\n def build_plaso_command(self, base_command, conf):\n \"\"\"Builds a typical plaso command, contains logic specific to log2timeline.\n\n Args:\n base_command (str): Command to invoke log2timeline (e.g. log2timeline.py)\n conf (dict): Dynamic config containing the parameters for the command.\n\n Returns:\n String for valid Log2timeline command.\n \"\"\"\n self.result.log(\n 'Generating Plaso command line from arguments: {0!s}'.format(conf),\n level=logging.DEBUG)\n cmd = [base_command]\n for k, v in conf.items():\n cli_args = [\n 'status_view', 'hashers', 'partitions', 'vss_stores',\n 'custom_artifact_definitions', 'parsers', 'artifact_filters',\n 'file_filter', 'yara_rules'\n ]\n if (k not in cli_args or not v):\n continue\n prepend = '-'\n if len(k) > 1:\n prepend = '--'\n if k == 'file_filter':\n file_path = file_helpers.write_list_to_temp_file(\n v, preferred_dir=self.tmp_dir)\n cmd.extend(['-f', file_path])\n elif k == 'yara_rules':\n file_path = file_helpers.write_str_to_temp_file(\n v, preferred_dir=self.tmp_dir)\n cmd.extend(['--yara_rules', file_path])\n elif isinstance(v, list):\n cmd.extend([prepend + k, ','.join(v)])\n elif isinstance(v, bool):\n cmd.append(prepend + k)\n elif isinstance(v, str):\n cmd.extend([prepend + k, v])\n return cmd\n\n def run(self, evidence, result):\n \"\"\"Task that process data with Plaso.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n config.LoadConfig()\n\n # Write plaso file into tmp_dir because sqlite has issues with some shared\n # filesystems (e.g NFS).\n plaso_file = os.path.join(self.tmp_dir, '{0:s}.plaso'.format(self.id))\n plaso_evidence = PlasoFile(source_path=plaso_file)\n plaso_log = os.path.join(self.output_dir, '{0:s}.log'.format(self.id))\n\n cmd = self.build_plaso_command('log2timeline.py', self.task_config)\n\n if config.DEBUG_TASKS or self.task_config.get('debug_tasks'):\n cmd.append('-d')\n\n if evidence.credentials:\n for credential_type, credential_data in evidence.credentials:\n cmd.extend([\n '--credential', '{0:s}:{1:s}'.format(\n credential_type, credential_data)\n ])\n\n cmd.extend(['--temporary_directory', self.tmp_dir])\n cmd.extend(['--logfile', plaso_log])\n cmd.extend(['--unattended'])\n cmd.extend(['--storage_file', plaso_file])\n cmd.extend([evidence.local_path])\n\n result.log('Running plaso as [{0:s}]'.format(' '.join(cmd)))\n self.execute(\n cmd, result, log_files=[plaso_log], new_evidence=[plaso_evidence],\n close=True)\n\n return result\n", "path": "turbinia/workers/plaso.py"}]}
2,031
283
gh_patches_debug_49043
rasdani/github-patches
git_diff
arviz-devs__arviz-2032
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plot_dot **Describe the bug** plotdot fig size doesn't behave the way I expect, in that when I set `figsize` in an axes that triple a previous one its not triple the size. There also are some minor bugs where the dots seem to be overlapping some **To Reproduce** ``` samples = stats.beta(2,2).rvs(100) width = 10 fig, ax = plt.subplots(figsize=(width, 10)) az.plot_dot(samples, ax=ax) ax.set_title(f"Width: {width}") ax.set_xlim(0,1) ``` Then try this, but see that figure is not three times the width ``` width = 30 fig, ax = plt.subplots(figsize=(width, 10)) az.plot_dot(samples, ax=ax) ax.set_title(f"Width: {width}") ax.set_xlim(0,1) ``` ![image](https://user-images.githubusercontent.com/7213793/168452674-b1a38bf9-db25-459e-85b0-cd61348283cd.png) ![image](https://user-images.githubusercontent.com/7213793/168452677-1410fd7b-7208-4045-ac8d-cbc52c0595dd.png) **Expected behavior** Figsize from `plt.subplots` is respected **Additional context** Arviz '0.12.0' </issue> <code> [start of arviz/plots/backends/matplotlib/dotplot.py] 1 """Matplotlib dotplot.""" 2 import math 3 import warnings 4 import numpy as np 5 import matplotlib.pyplot as plt 6 from matplotlib import _pylab_helpers 7 8 from ...plot_utils import _scale_fig_size 9 from . import backend_kwarg_defaults, create_axes_grid, backend_show 10 from ...plot_utils import plot_point_interval 11 from ...dotplot import wilkinson_algorithm, layout_stacks 12 13 14 def plot_dot( 15 values, 16 binwidth, 17 dotsize, 18 stackratio, 19 hdi_prob, 20 quartiles, 21 rotated, 22 dotcolor, 23 intervalcolor, 24 markersize, 25 markercolor, 26 marker, 27 figsize, 28 linewidth, 29 point_estimate, 30 nquantiles, 31 point_interval, 32 ax, 33 show, 34 backend_kwargs, 35 plot_kwargs, 36 ): 37 """Matplotlib dotplot.""" 38 if backend_kwargs is None: 39 backend_kwargs = {} 40 41 backend_kwargs = {**backend_kwarg_defaults(), **backend_kwargs} 42 43 backend_kwargs.setdefault("figsize", figsize) 44 backend_kwargs["squeeze"] = True 45 46 (figsize, _, _, _, auto_linewidth, auto_markersize) = _scale_fig_size(figsize, None) 47 48 if plot_kwargs is None: 49 plot_kwargs = {} 50 plot_kwargs.setdefault("color", dotcolor) 51 52 if linewidth is None: 53 linewidth = auto_linewidth 54 55 if markersize is None: 56 markersize = auto_markersize 57 58 if ax is None: 59 fig_manager = _pylab_helpers.Gcf.get_active() 60 if fig_manager is not None: 61 ax = fig_manager.canvas.figure.gca() 62 else: 63 _, ax = create_axes_grid( 64 1, 65 backend_kwargs=backend_kwargs, 66 ) 67 68 if point_interval: 69 ax = plot_point_interval( 70 ax, 71 values, 72 point_estimate, 73 hdi_prob, 74 quartiles, 75 linewidth, 76 markersize, 77 markercolor, 78 marker, 79 rotated, 80 intervalcolor, 81 "matplotlib", 82 ) 83 84 if nquantiles > values.shape[0]: 85 warnings.warn( 86 "nquantiles must be less than or equal to the number of data points", UserWarning 87 ) 88 nquantiles = values.shape[0] 89 else: 90 qlist = np.linspace(1 / (2 * nquantiles), 1 - 1 / (2 * nquantiles), nquantiles) 91 values = np.quantile(values, qlist) 92 93 if binwidth is None: 94 binwidth = math.sqrt((values[-1] - values[0] + 1) ** 2 / (2 * nquantiles * np.pi)) 95 96 ## Wilkinson's Algorithm 97 stack_locs, stack_count = wilkinson_algorithm(values, binwidth) 98 x, y = layout_stacks(stack_locs, stack_count, binwidth, stackratio, rotated) 99 100 for (x_i, y_i) in zip(x, y): 101 dot = plt.Circle((x_i, y_i), dotsize * binwidth / 2, **plot_kwargs) 102 ax.add_patch(dot) 103 104 if rotated: 105 ax.tick_params(bottom=False, labelbottom=False) 106 else: 107 ax.tick_params(left=False, labelleft=False) 108 109 ax.set_aspect("equal", adjustable="box") 110 ax.autoscale() 111 112 if backend_show(show): 113 plt.show() 114 115 return ax 116 [end of arviz/plots/backends/matplotlib/dotplot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/arviz/plots/backends/matplotlib/dotplot.py b/arviz/plots/backends/matplotlib/dotplot.py --- a/arviz/plots/backends/matplotlib/dotplot.py +++ b/arviz/plots/backends/matplotlib/dotplot.py @@ -106,7 +106,7 @@ else: ax.tick_params(left=False, labelleft=False) - ax.set_aspect("equal", adjustable="box") + ax.set_aspect("equal", adjustable="datalim") ax.autoscale() if backend_show(show):
{"golden_diff": "diff --git a/arviz/plots/backends/matplotlib/dotplot.py b/arviz/plots/backends/matplotlib/dotplot.py\n--- a/arviz/plots/backends/matplotlib/dotplot.py\n+++ b/arviz/plots/backends/matplotlib/dotplot.py\n@@ -106,7 +106,7 @@\n else:\n ax.tick_params(left=False, labelleft=False)\n \n- ax.set_aspect(\"equal\", adjustable=\"box\")\n+ ax.set_aspect(\"equal\", adjustable=\"datalim\")\n ax.autoscale()\n \n if backend_show(show):\n", "issue": "plot_dot \n**Describe the bug**\r\nplotdot fig size doesn't behave the way I expect, in that when I set `figsize` in an axes that triple a previous one its not triple the size. There also are some minor bugs where the dots seem to be overlapping some\r\n\r\n**To Reproduce**\r\n```\r\nsamples = stats.beta(2,2).rvs(100)\r\n\r\nwidth = 10\r\nfig, ax = plt.subplots(figsize=(width, 10))\r\naz.plot_dot(samples, ax=ax)\r\nax.set_title(f\"Width: {width}\")\r\nax.set_xlim(0,1)\r\n```\r\n\r\nThen try this, but see that figure is not three times the width\r\n```\r\nwidth = 30\r\nfig, ax = plt.subplots(figsize=(width, 10))\r\naz.plot_dot(samples, ax=ax)\r\nax.set_title(f\"Width: {width}\")\r\nax.set_xlim(0,1)\r\n```\r\n\r\n![image](https://user-images.githubusercontent.com/7213793/168452674-b1a38bf9-db25-459e-85b0-cd61348283cd.png)\r\n![image](https://user-images.githubusercontent.com/7213793/168452677-1410fd7b-7208-4045-ac8d-cbc52c0595dd.png)\r\n\r\n\r\n**Expected behavior**\r\nFigsize from `plt.subplots` is respected\r\n\r\n**Additional context**\r\nArviz '0.12.0'\r\n\n", "before_files": [{"content": "\"\"\"Matplotlib dotplot.\"\"\"\nimport math\nimport warnings\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import _pylab_helpers\n\nfrom ...plot_utils import _scale_fig_size\nfrom . import backend_kwarg_defaults, create_axes_grid, backend_show\nfrom ...plot_utils import plot_point_interval\nfrom ...dotplot import wilkinson_algorithm, layout_stacks\n\n\ndef plot_dot(\n values,\n binwidth,\n dotsize,\n stackratio,\n hdi_prob,\n quartiles,\n rotated,\n dotcolor,\n intervalcolor,\n markersize,\n markercolor,\n marker,\n figsize,\n linewidth,\n point_estimate,\n nquantiles,\n point_interval,\n ax,\n show,\n backend_kwargs,\n plot_kwargs,\n):\n \"\"\"Matplotlib dotplot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {**backend_kwarg_defaults(), **backend_kwargs}\n\n backend_kwargs.setdefault(\"figsize\", figsize)\n backend_kwargs[\"squeeze\"] = True\n\n (figsize, _, _, _, auto_linewidth, auto_markersize) = _scale_fig_size(figsize, None)\n\n if plot_kwargs is None:\n plot_kwargs = {}\n plot_kwargs.setdefault(\"color\", dotcolor)\n\n if linewidth is None:\n linewidth = auto_linewidth\n\n if markersize is None:\n markersize = auto_markersize\n\n if ax is None:\n fig_manager = _pylab_helpers.Gcf.get_active()\n if fig_manager is not None:\n ax = fig_manager.canvas.figure.gca()\n else:\n _, ax = create_axes_grid(\n 1,\n backend_kwargs=backend_kwargs,\n )\n\n if point_interval:\n ax = plot_point_interval(\n ax,\n values,\n point_estimate,\n hdi_prob,\n quartiles,\n linewidth,\n markersize,\n markercolor,\n marker,\n rotated,\n intervalcolor,\n \"matplotlib\",\n )\n\n if nquantiles > values.shape[0]:\n warnings.warn(\n \"nquantiles must be less than or equal to the number of data points\", UserWarning\n )\n nquantiles = values.shape[0]\n else:\n qlist = np.linspace(1 / (2 * nquantiles), 1 - 1 / (2 * nquantiles), nquantiles)\n values = np.quantile(values, qlist)\n\n if binwidth is None:\n binwidth = math.sqrt((values[-1] - values[0] + 1) ** 2 / (2 * nquantiles * np.pi))\n\n ## Wilkinson's Algorithm\n stack_locs, stack_count = wilkinson_algorithm(values, binwidth)\n x, y = layout_stacks(stack_locs, stack_count, binwidth, stackratio, rotated)\n\n for (x_i, y_i) in zip(x, y):\n dot = plt.Circle((x_i, y_i), dotsize * binwidth / 2, **plot_kwargs)\n ax.add_patch(dot)\n\n if rotated:\n ax.tick_params(bottom=False, labelbottom=False)\n else:\n ax.tick_params(left=False, labelleft=False)\n\n ax.set_aspect(\"equal\", adjustable=\"box\")\n ax.autoscale()\n\n if backend_show(show):\n plt.show()\n\n return ax\n", "path": "arviz/plots/backends/matplotlib/dotplot.py"}]}
1,848
128
gh_patches_debug_9828
rasdani/github-patches
git_diff
secdev__scapy-3473
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> L2TP post_build is broken ### Brief description l2tp.py post_build is supposed to update the length. However, it only does this if current length is None, and the length field is initialized to 0, not None, resulting in the length never being updated. ### Scapy version 2.4.5 ### Python version 3.8 ### Operating system Ubuntu 20.04 ### Additional environment information _No response_ ### How to reproduce print( (L2TP(header=['control', 'length'], version=2) / 'blahblah').build() ) ### Actual result b'\xc0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00blahblah' ### Expected result b'\xc0\x02\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00blahblah' ### Related resources _No response_ </issue> <code> [start of scapy/layers/l2tp.py] 1 # This file is part of Scapy 2 # See http://www.secdev.org/projects/scapy for more information 3 # Copyright (C) Philippe Biondi <[email protected]> 4 # This program is published under a GPLv2 license 5 6 """ 7 L2TP (Layer 2 Tunneling Protocol) for VPNs. 8 9 [RFC 2661] 10 """ 11 12 import struct 13 14 from scapy.packet import Packet, bind_layers, bind_bottom_up 15 from scapy.fields import BitEnumField, ConditionalField, FlagsField, \ 16 PadField, ShortField 17 from scapy.layers.inet import UDP 18 from scapy.layers.ppp import PPP 19 20 21 class L2TP(Packet): 22 name = "L2TP" 23 fields_desc = [ 24 FlagsField("hdr", 0, 12, ['res00', 'res01', 'res02', 'res03', 'priority', 'offset', # noqa: E501 25 'res06', 'sequence', 'res08', 'res09', 'length', 'control']), # noqa: E501 26 BitEnumField("version", 2, 4, {2: 'L2TPv2'}), 27 28 ConditionalField(ShortField("len", 0), 29 lambda pkt: pkt.hdr & 'control+length'), 30 ShortField("tunnel_id", 0), 31 ShortField("session_id", 0), 32 ConditionalField(ShortField("ns", 0), 33 lambda pkt: pkt.hdr & 'sequence+control'), 34 ConditionalField(ShortField("nr", 0), 35 lambda pkt: pkt.hdr & 'sequence+control'), 36 ConditionalField( 37 PadField(ShortField("offset", 0), 4, b"\x00"), 38 lambda pkt: not (pkt.hdr & 'control') and pkt.hdr & 'offset' 39 ) 40 ] 41 42 def post_build(self, pkt, pay): 43 if self.len is None and self.hdr & 'control+length': 44 tmp_len = len(pkt) + len(pay) 45 pkt = pkt[:2] + struct.pack("!H", tmp_len) + pkt[4:] 46 return pkt + pay 47 48 49 bind_bottom_up(UDP, L2TP, dport=1701) 50 bind_bottom_up(UDP, L2TP, sport=1701) 51 bind_layers(UDP, L2TP, dport=1701, sport=1701) 52 bind_layers(L2TP, PPP,) 53 [end of scapy/layers/l2tp.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scapy/layers/l2tp.py b/scapy/layers/l2tp.py --- a/scapy/layers/l2tp.py +++ b/scapy/layers/l2tp.py @@ -25,7 +25,7 @@ 'res06', 'sequence', 'res08', 'res09', 'length', 'control']), # noqa: E501 BitEnumField("version", 2, 4, {2: 'L2TPv2'}), - ConditionalField(ShortField("len", 0), + ConditionalField(ShortField("len", None), lambda pkt: pkt.hdr & 'control+length'), ShortField("tunnel_id", 0), ShortField("session_id", 0),
{"golden_diff": "diff --git a/scapy/layers/l2tp.py b/scapy/layers/l2tp.py\n--- a/scapy/layers/l2tp.py\n+++ b/scapy/layers/l2tp.py\n@@ -25,7 +25,7 @@\n 'res06', 'sequence', 'res08', 'res09', 'length', 'control']), # noqa: E501\n BitEnumField(\"version\", 2, 4, {2: 'L2TPv2'}),\n \n- ConditionalField(ShortField(\"len\", 0),\n+ ConditionalField(ShortField(\"len\", None),\n lambda pkt: pkt.hdr & 'control+length'),\n ShortField(\"tunnel_id\", 0),\n ShortField(\"session_id\", 0),\n", "issue": "L2TP post_build is broken\n### Brief description\n\nl2tp.py post_build is supposed to update the length. However, it only does this if current length is None, and the length field is initialized to 0, not None, resulting in the length never being updated. \n\n### Scapy version\n\n2.4.5\n\n### Python version\n\n3.8\n\n### Operating system\n\nUbuntu 20.04\n\n### Additional environment information\n\n_No response_\n\n### How to reproduce\n\nprint( (L2TP(header=['control', 'length'], version=2) / 'blahblah').build() )\r\n\n\n### Actual result\n\nb'\\xc0\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00blahblah'\n\n### Expected result\n\nb'\\xc0\\x02\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00blahblah'\n\n### Related resources\n\n_No response_\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nL2TP (Layer 2 Tunneling Protocol) for VPNs.\n\n[RFC 2661]\n\"\"\"\n\nimport struct\n\nfrom scapy.packet import Packet, bind_layers, bind_bottom_up\nfrom scapy.fields import BitEnumField, ConditionalField, FlagsField, \\\n PadField, ShortField\nfrom scapy.layers.inet import UDP\nfrom scapy.layers.ppp import PPP\n\n\nclass L2TP(Packet):\n name = \"L2TP\"\n fields_desc = [\n FlagsField(\"hdr\", 0, 12, ['res00', 'res01', 'res02', 'res03', 'priority', 'offset', # noqa: E501\n 'res06', 'sequence', 'res08', 'res09', 'length', 'control']), # noqa: E501\n BitEnumField(\"version\", 2, 4, {2: 'L2TPv2'}),\n\n ConditionalField(ShortField(\"len\", 0),\n lambda pkt: pkt.hdr & 'control+length'),\n ShortField(\"tunnel_id\", 0),\n ShortField(\"session_id\", 0),\n ConditionalField(ShortField(\"ns\", 0),\n lambda pkt: pkt.hdr & 'sequence+control'),\n ConditionalField(ShortField(\"nr\", 0),\n lambda pkt: pkt.hdr & 'sequence+control'),\n ConditionalField(\n PadField(ShortField(\"offset\", 0), 4, b\"\\x00\"),\n lambda pkt: not (pkt.hdr & 'control') and pkt.hdr & 'offset'\n )\n ]\n\n def post_build(self, pkt, pay):\n if self.len is None and self.hdr & 'control+length':\n tmp_len = len(pkt) + len(pay)\n pkt = pkt[:2] + struct.pack(\"!H\", tmp_len) + pkt[4:]\n return pkt + pay\n\n\nbind_bottom_up(UDP, L2TP, dport=1701)\nbind_bottom_up(UDP, L2TP, sport=1701)\nbind_layers(UDP, L2TP, dport=1701, sport=1701)\nbind_layers(L2TP, PPP,)\n", "path": "scapy/layers/l2tp.py"}]}
1,410
174
gh_patches_debug_18028
rasdani/github-patches
git_diff
Mailu__Mailu-1316
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Rainloop Webmail - Authentication fails if you have a special character in your password In the admin interface, you can define a new password and you can put a special character like `è`. It works fine with admin interface but it doesn't work at all with the Rainloop webmail. If you try to log in, you will have a message to indicate that the authentication fails, see screenshoot in french: ![Capture d’écran de 2019-08-25 10-57-56](https://user-images.githubusercontent.com/21110485/63647853-904b0b80-c727-11e9-9938-d66a1e10ca4e.png) </issue> <code> [start of core/admin/mailu/internal/nginx.py] 1 from mailu import models 2 from flask import current_app as app 3 4 import re 5 import urllib 6 import ipaddress 7 import socket 8 import tenacity 9 10 11 SUPPORTED_AUTH_METHODS = ["none", "plain"] 12 13 14 STATUSES = { 15 "authentication": ("Authentication credentials invalid", { 16 "imap": "AUTHENTICATIONFAILED", 17 "smtp": "535 5.7.8", 18 "pop3": "-ERR Authentication failed" 19 }), 20 } 21 22 23 def handle_authentication(headers): 24 """ Handle an HTTP nginx authentication request 25 See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol 26 """ 27 method = headers["Auth-Method"] 28 protocol = headers["Auth-Protocol"] 29 # Incoming mail, no authentication 30 if method == "none" and protocol == "smtp": 31 server, port = get_server(headers["Auth-Protocol"], False) 32 return { 33 "Auth-Status": "OK", 34 "Auth-Server": server, 35 "Auth-Port": port 36 } 37 # Authenticated user 38 elif method == "plain": 39 server, port = get_server(headers["Auth-Protocol"], True) 40 user_email = urllib.parse.unquote(headers["Auth-User"]) 41 password = urllib.parse.unquote(headers["Auth-Pass"]) 42 ip = urllib.parse.unquote(headers["Client-Ip"]) 43 user = models.User.query.get(user_email) 44 status = False 45 if user: 46 for token in user.tokens: 47 if (token.check_password(password) and 48 (not token.ip or token.ip == ip)): 49 status = True 50 if user.check_password(password): 51 status = True 52 if status: 53 if protocol == "imap" and not user.enable_imap: 54 status = False 55 elif protocol == "pop3" and not user.enable_pop: 56 status = False 57 if status and user.enabled: 58 return { 59 "Auth-Status": "OK", 60 "Auth-Server": server, 61 "Auth-Port": port 62 } 63 else: 64 status, code = get_status(protocol, "authentication") 65 return { 66 "Auth-Status": status, 67 "Auth-Error-Code": code, 68 "Auth-Wait": 0 69 } 70 # Unexpected 71 return {} 72 73 74 def get_status(protocol, status): 75 """ Return the proper error code depending on the protocol 76 """ 77 status, codes = STATUSES[status] 78 return status, codes[protocol] 79 80 def extract_host_port(host_and_port, default_port): 81 host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups() 82 return host, int(port) if port else default_port 83 84 def get_server(protocol, authenticated=False): 85 if protocol == "imap": 86 hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143) 87 elif protocol == "pop3": 88 hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110) 89 elif protocol == "smtp": 90 if authenticated: 91 hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025) 92 else: 93 hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25) 94 try: 95 # test if hostname is already resolved to an ip adddress 96 ipaddress.ip_address(hostname) 97 except: 98 # hostname is not an ip address - so we need to resolve it 99 hostname = resolve_hostname(hostname) 100 return hostname, port 101 102 @tenacity.retry(stop=tenacity.stop_after_attempt(100), 103 wait=tenacity.wait_random(min=2, max=5)) 104 def resolve_hostname(hostname): 105 """ This function uses system DNS to resolve a hostname. 106 It is capable of retrying in case the host is not immediately available 107 """ 108 return socket.gethostbyname(hostname) 109 [end of core/admin/mailu/internal/nginx.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py --- a/core/admin/mailu/internal/nginx.py +++ b/core/admin/mailu/internal/nginx.py @@ -37,8 +37,14 @@ # Authenticated user elif method == "plain": server, port = get_server(headers["Auth-Protocol"], True) - user_email = urllib.parse.unquote(headers["Auth-User"]) - password = urllib.parse.unquote(headers["Auth-Pass"]) + # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should + # be ASCII and are generally considered ISO8859-1. However when passing + # the password, nginx does not transcode the input UTF string, thus + # we need to manually decode. + raw_user_email = urllib.parse.unquote(headers["Auth-User"]) + user_email = raw_user_email.encode("iso8859-1").decode("utf8") + raw_password = urllib.parse.unquote(headers["Auth-Pass"]) + password = raw_password.encode("iso8859-1").decode("utf8") ip = urllib.parse.unquote(headers["Client-Ip"]) user = models.User.query.get(user_email) status = False
{"golden_diff": "diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py\n--- a/core/admin/mailu/internal/nginx.py\n+++ b/core/admin/mailu/internal/nginx.py\n@@ -37,8 +37,14 @@\n # Authenticated user\n elif method == \"plain\":\n server, port = get_server(headers[\"Auth-Protocol\"], True)\n- user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n- password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n+ # According to RFC2616 section 3.7.1 and PEP 3333, HTTP headers should\n+ # be ASCII and are generally considered ISO8859-1. However when passing\n+ # the password, nginx does not transcode the input UTF string, thus\n+ # we need to manually decode.\n+ raw_user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n+ user_email = raw_user_email.encode(\"iso8859-1\").decode(\"utf8\")\n+ raw_password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n+ password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n user = models.User.query.get(user_email)\n status = False\n", "issue": "Rainloop Webmail - Authentication fails if you have a special character in your password\nIn the admin interface, you can define a new password and you can put a special character like `\u00e8`.\r\n\r\nIt works fine with admin interface but it doesn't work at all with the Rainloop webmail. If you try to log in, you will have a message to indicate that the authentication fails, see screenshoot in french:\r\n\r\n![Capture d\u2019\u00e9cran de 2019-08-25 10-57-56](https://user-images.githubusercontent.com/21110485/63647853-904b0b80-c727-11e9-9938-d66a1e10ca4e.png)\r\n\n", "before_files": [{"content": "from mailu import models\nfrom flask import current_app as app\n\nimport re\nimport urllib\nimport ipaddress\nimport socket\nimport tenacity\n\n\nSUPPORTED_AUTH_METHODS = [\"none\", \"plain\"]\n\n\nSTATUSES = {\n \"authentication\": (\"Authentication credentials invalid\", {\n \"imap\": \"AUTHENTICATIONFAILED\",\n \"smtp\": \"535 5.7.8\",\n \"pop3\": \"-ERR Authentication failed\"\n }),\n}\n\n\ndef handle_authentication(headers):\n \"\"\" Handle an HTTP nginx authentication request\n See: http://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#protocol\n \"\"\"\n method = headers[\"Auth-Method\"]\n protocol = headers[\"Auth-Protocol\"]\n # Incoming mail, no authentication\n if method == \"none\" and protocol == \"smtp\":\n server, port = get_server(headers[\"Auth-Protocol\"], False)\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n # Authenticated user\n elif method == \"plain\":\n server, port = get_server(headers[\"Auth-Protocol\"], True)\n user_email = urllib.parse.unquote(headers[\"Auth-User\"])\n password = urllib.parse.unquote(headers[\"Auth-Pass\"])\n ip = urllib.parse.unquote(headers[\"Client-Ip\"])\n user = models.User.query.get(user_email)\n status = False\n if user:\n for token in user.tokens:\n if (token.check_password(password) and\n (not token.ip or token.ip == ip)):\n status = True\n if user.check_password(password):\n status = True\n if status:\n if protocol == \"imap\" and not user.enable_imap:\n status = False\n elif protocol == \"pop3\" and not user.enable_pop:\n status = False\n if status and user.enabled:\n return {\n \"Auth-Status\": \"OK\",\n \"Auth-Server\": server,\n \"Auth-Port\": port\n }\n else:\n status, code = get_status(protocol, \"authentication\")\n return {\n \"Auth-Status\": status,\n \"Auth-Error-Code\": code,\n \"Auth-Wait\": 0\n }\n # Unexpected\n return {}\n\n\ndef get_status(protocol, status):\n \"\"\" Return the proper error code depending on the protocol\n \"\"\"\n status, codes = STATUSES[status]\n return status, codes[protocol]\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\ndef get_server(protocol, authenticated=False):\n if protocol == \"imap\":\n hostname, port = extract_host_port(app.config['IMAP_ADDRESS'], 143)\n elif protocol == \"pop3\":\n hostname, port = extract_host_port(app.config['POP3_ADDRESS'], 110)\n elif protocol == \"smtp\":\n if authenticated:\n hostname, port = extract_host_port(app.config['AUTHSMTP_ADDRESS'], 10025)\n else:\n hostname, port = extract_host_port(app.config['SMTP_ADDRESS'], 25)\n try:\n # test if hostname is already resolved to an ip adddress\n ipaddress.ip_address(hostname)\n except:\n # hostname is not an ip address - so we need to resolve it\n hostname = resolve_hostname(hostname)\n return hostname, port\n\[email protected](stop=tenacity.stop_after_attempt(100),\n wait=tenacity.wait_random(min=2, max=5))\ndef resolve_hostname(hostname):\n \"\"\" This function uses system DNS to resolve a hostname.\n It is capable of retrying in case the host is not immediately available\n \"\"\"\n return socket.gethostbyname(hostname)\n", "path": "core/admin/mailu/internal/nginx.py"}]}
1,760
292
gh_patches_debug_15801
rasdani/github-patches
git_diff
pyca__cryptography-1430
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> OpenSSL's HMAC Context isn't marked as implementing MACContext It ought to be. </issue> <code> [start of cryptography/hazmat/backends/commoncrypto/hmac.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 from cryptography import utils 17 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons 18 from cryptography.hazmat.primitives import interfaces 19 20 21 @utils.register_interface(interfaces.HashContext) 22 class _HMACContext(object): 23 def __init__(self, backend, key, algorithm, ctx=None): 24 self.algorithm = algorithm 25 self._backend = backend 26 if ctx is None: 27 ctx = self._backend._ffi.new("CCHmacContext *") 28 try: 29 alg = self._backend._supported_hmac_algorithms[algorithm.name] 30 except KeyError: 31 raise UnsupportedAlgorithm( 32 "{0} is not a supported HMAC hash on this backend.".format( 33 algorithm.name), 34 _Reasons.UNSUPPORTED_HASH 35 ) 36 37 self._backend._lib.CCHmacInit(ctx, alg, key, len(key)) 38 39 self._ctx = ctx 40 self._key = key 41 42 def copy(self): 43 copied_ctx = self._backend._ffi.new("CCHmacContext *") 44 # CommonCrypto has no APIs for copying HMACs, so we have to copy the 45 # underlying struct. 46 copied_ctx[0] = self._ctx[0] 47 return _HMACContext( 48 self._backend, self._key, self.algorithm, ctx=copied_ctx 49 ) 50 51 def update(self, data): 52 self._backend._lib.CCHmacUpdate(self._ctx, data, len(data)) 53 54 def finalize(self): 55 buf = self._backend._ffi.new("unsigned char[]", 56 self.algorithm.digest_size) 57 self._backend._lib.CCHmacFinal(self._ctx, buf) 58 return self._backend._ffi.buffer(buf)[:] 59 [end of cryptography/hazmat/backends/commoncrypto/hmac.py] [start of cryptography/hazmat/backends/openssl/hmac.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 17 from cryptography import utils 18 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons 19 from cryptography.hazmat.primitives import interfaces 20 21 22 @utils.register_interface(interfaces.HashContext) 23 class _HMACContext(object): 24 def __init__(self, backend, key, algorithm, ctx=None): 25 self.algorithm = algorithm 26 self._backend = backend 27 28 if ctx is None: 29 ctx = self._backend._ffi.new("HMAC_CTX *") 30 self._backend._lib.HMAC_CTX_init(ctx) 31 ctx = self._backend._ffi.gc( 32 ctx, self._backend._lib.HMAC_CTX_cleanup 33 ) 34 evp_md = self._backend._lib.EVP_get_digestbyname( 35 algorithm.name.encode('ascii')) 36 if evp_md == self._backend._ffi.NULL: 37 raise UnsupportedAlgorithm( 38 "{0} is not a supported hash on this backend.".format( 39 algorithm.name), 40 _Reasons.UNSUPPORTED_HASH 41 ) 42 res = self._backend._lib.Cryptography_HMAC_Init_ex( 43 ctx, key, len(key), evp_md, self._backend._ffi.NULL 44 ) 45 assert res != 0 46 47 self._ctx = ctx 48 self._key = key 49 50 def copy(self): 51 copied_ctx = self._backend._ffi.new("HMAC_CTX *") 52 self._backend._lib.HMAC_CTX_init(copied_ctx) 53 copied_ctx = self._backend._ffi.gc( 54 copied_ctx, self._backend._lib.HMAC_CTX_cleanup 55 ) 56 res = self._backend._lib.Cryptography_HMAC_CTX_copy( 57 copied_ctx, self._ctx 58 ) 59 assert res != 0 60 return _HMACContext( 61 self._backend, self._key, self.algorithm, ctx=copied_ctx 62 ) 63 64 def update(self, data): 65 res = self._backend._lib.Cryptography_HMAC_Update( 66 self._ctx, data, len(data) 67 ) 68 assert res != 0 69 70 def finalize(self): 71 buf = self._backend._ffi.new("unsigned char[]", 72 self._backend._lib.EVP_MAX_MD_SIZE) 73 outlen = self._backend._ffi.new("unsigned int *") 74 res = self._backend._lib.Cryptography_HMAC_Final( 75 self._ctx, buf, outlen 76 ) 77 assert res != 0 78 assert outlen[0] == self.algorithm.digest_size 79 self._backend._lib.HMAC_CTX_cleanup(self._ctx) 80 return self._backend._ffi.buffer(buf)[:outlen[0]] 81 [end of cryptography/hazmat/backends/openssl/hmac.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cryptography/hazmat/backends/commoncrypto/hmac.py b/cryptography/hazmat/backends/commoncrypto/hmac.py --- a/cryptography/hazmat/backends/commoncrypto/hmac.py +++ b/cryptography/hazmat/backends/commoncrypto/hmac.py @@ -18,6 +18,7 @@ from cryptography.hazmat.primitives import interfaces [email protected]_interface(interfaces.MACContext) @utils.register_interface(interfaces.HashContext) class _HMACContext(object): def __init__(self, backend, key, algorithm, ctx=None): diff --git a/cryptography/hazmat/backends/openssl/hmac.py b/cryptography/hazmat/backends/openssl/hmac.py --- a/cryptography/hazmat/backends/openssl/hmac.py +++ b/cryptography/hazmat/backends/openssl/hmac.py @@ -19,6 +19,7 @@ from cryptography.hazmat.primitives import interfaces [email protected]_interface(interfaces.MACContext) @utils.register_interface(interfaces.HashContext) class _HMACContext(object): def __init__(self, backend, key, algorithm, ctx=None):
{"golden_diff": "diff --git a/cryptography/hazmat/backends/commoncrypto/hmac.py b/cryptography/hazmat/backends/commoncrypto/hmac.py\n--- a/cryptography/hazmat/backends/commoncrypto/hmac.py\n+++ b/cryptography/hazmat/backends/commoncrypto/hmac.py\n@@ -18,6 +18,7 @@\n from cryptography.hazmat.primitives import interfaces\n \n \[email protected]_interface(interfaces.MACContext)\n @utils.register_interface(interfaces.HashContext)\n class _HMACContext(object):\n def __init__(self, backend, key, algorithm, ctx=None):\ndiff --git a/cryptography/hazmat/backends/openssl/hmac.py b/cryptography/hazmat/backends/openssl/hmac.py\n--- a/cryptography/hazmat/backends/openssl/hmac.py\n+++ b/cryptography/hazmat/backends/openssl/hmac.py\n@@ -19,6 +19,7 @@\n from cryptography.hazmat.primitives import interfaces\n \n \[email protected]_interface(interfaces.MACContext)\n @utils.register_interface(interfaces.HashContext)\n class _HMACContext(object):\n def __init__(self, backend, key, algorithm, ctx=None):\n", "issue": "OpenSSL's HMAC Context isn't marked as implementing MACContext\nIt ought to be.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import interfaces\n\n\[email protected]_interface(interfaces.HashContext)\nclass _HMACContext(object):\n def __init__(self, backend, key, algorithm, ctx=None):\n self.algorithm = algorithm\n self._backend = backend\n if ctx is None:\n ctx = self._backend._ffi.new(\"CCHmacContext *\")\n try:\n alg = self._backend._supported_hmac_algorithms[algorithm.name]\n except KeyError:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported HMAC hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n self._backend._lib.CCHmacInit(ctx, alg, key, len(key))\n\n self._ctx = ctx\n self._key = key\n\n def copy(self):\n copied_ctx = self._backend._ffi.new(\"CCHmacContext *\")\n # CommonCrypto has no APIs for copying HMACs, so we have to copy the\n # underlying struct.\n copied_ctx[0] = self._ctx[0]\n return _HMACContext(\n self._backend, self._key, self.algorithm, ctx=copied_ctx\n )\n\n def update(self, data):\n self._backend._lib.CCHmacUpdate(self._ctx, data, len(data))\n\n def finalize(self):\n buf = self._backend._ffi.new(\"unsigned char[]\",\n self.algorithm.digest_size)\n self._backend._lib.CCHmacFinal(self._ctx, buf)\n return self._backend._ffi.buffer(buf)[:]\n", "path": "cryptography/hazmat/backends/commoncrypto/hmac.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.primitives import interfaces\n\n\[email protected]_interface(interfaces.HashContext)\nclass _HMACContext(object):\n def __init__(self, backend, key, algorithm, ctx=None):\n self.algorithm = algorithm\n self._backend = backend\n\n if ctx is None:\n ctx = self._backend._ffi.new(\"HMAC_CTX *\")\n self._backend._lib.HMAC_CTX_init(ctx)\n ctx = self._backend._ffi.gc(\n ctx, self._backend._lib.HMAC_CTX_cleanup\n )\n evp_md = self._backend._lib.EVP_get_digestbyname(\n algorithm.name.encode('ascii'))\n if evp_md == self._backend._ffi.NULL:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._backend._lib.Cryptography_HMAC_Init_ex(\n ctx, key, len(key), evp_md, self._backend._ffi.NULL\n )\n assert res != 0\n\n self._ctx = ctx\n self._key = key\n\n def copy(self):\n copied_ctx = self._backend._ffi.new(\"HMAC_CTX *\")\n self._backend._lib.HMAC_CTX_init(copied_ctx)\n copied_ctx = self._backend._ffi.gc(\n copied_ctx, self._backend._lib.HMAC_CTX_cleanup\n )\n res = self._backend._lib.Cryptography_HMAC_CTX_copy(\n copied_ctx, self._ctx\n )\n assert res != 0\n return _HMACContext(\n self._backend, self._key, self.algorithm, ctx=copied_ctx\n )\n\n def update(self, data):\n res = self._backend._lib.Cryptography_HMAC_Update(\n self._ctx, data, len(data)\n )\n assert res != 0\n\n def finalize(self):\n buf = self._backend._ffi.new(\"unsigned char[]\",\n self._backend._lib.EVP_MAX_MD_SIZE)\n outlen = self._backend._ffi.new(\"unsigned int *\")\n res = self._backend._lib.Cryptography_HMAC_Final(\n self._ctx, buf, outlen\n )\n assert res != 0\n assert outlen[0] == self.algorithm.digest_size\n self._backend._lib.HMAC_CTX_cleanup(self._ctx)\n return self._backend._ffi.buffer(buf)[:outlen[0]]\n", "path": "cryptography/hazmat/backends/openssl/hmac.py"}]}
2,029
253
gh_patches_debug_6549
rasdani/github-patches
git_diff
encode__uvicorn-754
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lifespan scope is missing asgi dict the lifespan scope (https://asgi.readthedocs.io/en/latest/specs/lifespan.html#scope) is missing the asgi dict prompted by _Originally posted by @florimondmanca in https://github.com/encode/uvicorn/pull/751_ </issue> <code> [start of uvicorn/lifespan/on.py] 1 import asyncio 2 import logging 3 4 STATE_TRANSITION_ERROR = "Got invalid state transition on lifespan protocol." 5 6 7 class LifespanOn: 8 def __init__(self, config): 9 if not config.loaded: 10 config.load() 11 12 self.config = config 13 self.logger = logging.getLogger("uvicorn.error") 14 self.startup_event = asyncio.Event() 15 self.shutdown_event = asyncio.Event() 16 self.receive_queue = asyncio.Queue() 17 self.error_occured = False 18 self.startup_failed = False 19 self.should_exit = False 20 21 async def startup(self): 22 self.logger.info("Waiting for application startup.") 23 24 loop = asyncio.get_event_loop() 25 loop.create_task(self.main()) 26 27 await self.receive_queue.put({"type": "lifespan.startup"}) 28 await self.startup_event.wait() 29 30 if self.startup_failed or (self.error_occured and self.config.lifespan == "on"): 31 self.logger.error("Application startup failed. Exiting.") 32 self.should_exit = True 33 else: 34 self.logger.info("Application startup complete.") 35 36 async def shutdown(self): 37 if self.error_occured: 38 return 39 self.logger.info("Waiting for application shutdown.") 40 await self.receive_queue.put({"type": "lifespan.shutdown"}) 41 await self.shutdown_event.wait() 42 self.logger.info("Application shutdown complete.") 43 44 async def main(self): 45 try: 46 app = self.config.loaded_app 47 scope = {"type": "lifespan"} 48 await app(scope, self.receive, self.send) 49 except BaseException as exc: 50 self.asgi = None 51 self.error_occured = True 52 if self.startup_failed: 53 return 54 if self.config.lifespan == "auto": 55 msg = "ASGI 'lifespan' protocol appears unsupported." 56 self.logger.info(msg) 57 else: 58 msg = "Exception in 'lifespan' protocol\n" 59 self.logger.error(msg, exc_info=exc) 60 finally: 61 self.startup_event.set() 62 self.shutdown_event.set() 63 64 async def send(self, message): 65 assert message["type"] in ( 66 "lifespan.startup.complete", 67 "lifespan.startup.failed", 68 "lifespan.shutdown.complete", 69 ) 70 71 if message["type"] == "lifespan.startup.complete": 72 assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR 73 assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR 74 self.startup_event.set() 75 76 elif message["type"] == "lifespan.startup.failed": 77 assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR 78 assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR 79 self.startup_event.set() 80 self.startup_failed = True 81 if message.get("message"): 82 self.logger.error(message["message"]) 83 84 elif message["type"] == "lifespan.shutdown.complete": 85 assert self.startup_event.is_set(), STATE_TRANSITION_ERROR 86 assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR 87 self.shutdown_event.set() 88 89 async def receive(self): 90 return await self.receive_queue.get() 91 [end of uvicorn/lifespan/on.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/lifespan/on.py b/uvicorn/lifespan/on.py --- a/uvicorn/lifespan/on.py +++ b/uvicorn/lifespan/on.py @@ -44,7 +44,10 @@ async def main(self): try: app = self.config.loaded_app - scope = {"type": "lifespan"} + scope = { + "type": "lifespan", + "asgi": {"version": self.config.asgi_version, "spec_version": "2.0"}, + } await app(scope, self.receive, self.send) except BaseException as exc: self.asgi = None
{"golden_diff": "diff --git a/uvicorn/lifespan/on.py b/uvicorn/lifespan/on.py\n--- a/uvicorn/lifespan/on.py\n+++ b/uvicorn/lifespan/on.py\n@@ -44,7 +44,10 @@\n async def main(self):\n try:\n app = self.config.loaded_app\n- scope = {\"type\": \"lifespan\"}\n+ scope = {\n+ \"type\": \"lifespan\",\n+ \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.0\"},\n+ }\n await app(scope, self.receive, self.send)\n except BaseException as exc:\n self.asgi = None\n", "issue": "Lifespan scope is missing asgi dict\nthe lifespan scope (https://asgi.readthedocs.io/en/latest/specs/lifespan.html#scope) is missing the asgi dict\r\n\r\nprompted by\r\n\r\n_Originally posted by @florimondmanca in https://github.com/encode/uvicorn/pull/751_\n", "before_files": [{"content": "import asyncio\nimport logging\n\nSTATE_TRANSITION_ERROR = \"Got invalid state transition on lifespan protocol.\"\n\n\nclass LifespanOn:\n def __init__(self, config):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.startup_event = asyncio.Event()\n self.shutdown_event = asyncio.Event()\n self.receive_queue = asyncio.Queue()\n self.error_occured = False\n self.startup_failed = False\n self.should_exit = False\n\n async def startup(self):\n self.logger.info(\"Waiting for application startup.\")\n\n loop = asyncio.get_event_loop()\n loop.create_task(self.main())\n\n await self.receive_queue.put({\"type\": \"lifespan.startup\"})\n await self.startup_event.wait()\n\n if self.startup_failed or (self.error_occured and self.config.lifespan == \"on\"):\n self.logger.error(\"Application startup failed. Exiting.\")\n self.should_exit = True\n else:\n self.logger.info(\"Application startup complete.\")\n\n async def shutdown(self):\n if self.error_occured:\n return\n self.logger.info(\"Waiting for application shutdown.\")\n await self.receive_queue.put({\"type\": \"lifespan.shutdown\"})\n await self.shutdown_event.wait()\n self.logger.info(\"Application shutdown complete.\")\n\n async def main(self):\n try:\n app = self.config.loaded_app\n scope = {\"type\": \"lifespan\"}\n await app(scope, self.receive, self.send)\n except BaseException as exc:\n self.asgi = None\n self.error_occured = True\n if self.startup_failed:\n return\n if self.config.lifespan == \"auto\":\n msg = \"ASGI 'lifespan' protocol appears unsupported.\"\n self.logger.info(msg)\n else:\n msg = \"Exception in 'lifespan' protocol\\n\"\n self.logger.error(msg, exc_info=exc)\n finally:\n self.startup_event.set()\n self.shutdown_event.set()\n\n async def send(self, message):\n assert message[\"type\"] in (\n \"lifespan.startup.complete\",\n \"lifespan.startup.failed\",\n \"lifespan.shutdown.complete\",\n )\n\n if message[\"type\"] == \"lifespan.startup.complete\":\n assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.startup_event.set()\n\n elif message[\"type\"] == \"lifespan.startup.failed\":\n assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.startup_event.set()\n self.startup_failed = True\n if message.get(\"message\"):\n self.logger.error(message[\"message\"])\n\n elif message[\"type\"] == \"lifespan.shutdown.complete\":\n assert self.startup_event.is_set(), STATE_TRANSITION_ERROR\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.shutdown_event.set()\n\n async def receive(self):\n return await self.receive_queue.get()\n", "path": "uvicorn/lifespan/on.py"}]}
1,452
154
gh_patches_debug_3306
rasdani/github-patches
git_diff
kivy__python-for-android-1815
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> packaged python is built with IPv6 disabled Is there any reason for IPv6 being disabled? https://github.com/kivy/python-for-android/blob/f16a66590c92bb57c69b214b2b69dfaa6dc1e0ed/pythonforandroid/recipes/python3/__init__.py#L37 </issue> <code> [start of pythonforandroid/recipes/python3/__init__.py] 1 import sh 2 from pythonforandroid.python import GuestPythonRecipe 3 from pythonforandroid.recipe import Recipe 4 5 6 class Python3Recipe(GuestPythonRecipe): 7 ''' 8 The python3's recipe. 9 10 .. note:: This recipe can be built only against API 21+. Also, in order to 11 build certain python modules, we need to add some extra recipes to our 12 build requirements: 13 14 - ctypes: you must add the recipe for ``libffi``. 15 16 .. versionchanged:: 0.6.0 17 Refactored into class 18 :class:`~pythonforandroid.python.GuestPythonRecipe` 19 ''' 20 21 version = '3.7.1' 22 url = 'https://www.python.org/ftp/python/{version}/Python-{version}.tgz' 23 name = 'python3' 24 25 patches = ["patches/fix-ctypes-util-find-library.patch"] 26 27 if sh.which('lld') is not None: 28 patches = patches + ["patches/remove-fix-cortex-a8.patch"] 29 30 depends = ['hostpython3', 'sqlite3', 'openssl', 'libffi'] 31 conflicts = ['python3crystax', 'python2', 'python2legacy'] 32 33 configure_args = ( 34 '--host={android_host}', 35 '--build={android_build}', 36 '--enable-shared', 37 '--disable-ipv6', 38 'ac_cv_file__dev_ptmx=yes', 39 'ac_cv_file__dev_ptc=no', 40 '--without-ensurepip', 41 'ac_cv_little_endian_double=yes', 42 '--prefix={prefix}', 43 '--exec-prefix={exec_prefix}') 44 45 def set_libs_flags(self, env, arch): 46 env = super(Python3Recipe, self).set_libs_flags(env, arch) 47 if 'openssl' in self.ctx.recipe_build_order: 48 recipe = Recipe.get_recipe('openssl', self.ctx) 49 self.configure_args += \ 50 ('--with-openssl=' + recipe.get_build_dir(arch.arch),) 51 return env 52 53 54 recipe = Python3Recipe() 55 [end of pythonforandroid/recipes/python3/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pythonforandroid/recipes/python3/__init__.py b/pythonforandroid/recipes/python3/__init__.py --- a/pythonforandroid/recipes/python3/__init__.py +++ b/pythonforandroid/recipes/python3/__init__.py @@ -34,7 +34,7 @@ '--host={android_host}', '--build={android_build}', '--enable-shared', - '--disable-ipv6', + '--enable-ipv6', 'ac_cv_file__dev_ptmx=yes', 'ac_cv_file__dev_ptc=no', '--without-ensurepip',
{"golden_diff": "diff --git a/pythonforandroid/recipes/python3/__init__.py b/pythonforandroid/recipes/python3/__init__.py\n--- a/pythonforandroid/recipes/python3/__init__.py\n+++ b/pythonforandroid/recipes/python3/__init__.py\n@@ -34,7 +34,7 @@\n '--host={android_host}',\n '--build={android_build}',\n '--enable-shared',\n- '--disable-ipv6',\n+ '--enable-ipv6',\n 'ac_cv_file__dev_ptmx=yes',\n 'ac_cv_file__dev_ptc=no',\n '--without-ensurepip',\n", "issue": "packaged python is built with IPv6 disabled\nIs there any reason for IPv6 being disabled?\r\nhttps://github.com/kivy/python-for-android/blob/f16a66590c92bb57c69b214b2b69dfaa6dc1e0ed/pythonforandroid/recipes/python3/__init__.py#L37\n", "before_files": [{"content": "import sh\nfrom pythonforandroid.python import GuestPythonRecipe\nfrom pythonforandroid.recipe import Recipe\n\n\nclass Python3Recipe(GuestPythonRecipe):\n '''\n The python3's recipe.\n\n .. note:: This recipe can be built only against API 21+. Also, in order to\n build certain python modules, we need to add some extra recipes to our\n build requirements:\n\n - ctypes: you must add the recipe for ``libffi``.\n\n .. versionchanged:: 0.6.0\n Refactored into class\n :class:`~pythonforandroid.python.GuestPythonRecipe`\n '''\n\n version = '3.7.1'\n url = 'https://www.python.org/ftp/python/{version}/Python-{version}.tgz'\n name = 'python3'\n\n patches = [\"patches/fix-ctypes-util-find-library.patch\"]\n\n if sh.which('lld') is not None:\n patches = patches + [\"patches/remove-fix-cortex-a8.patch\"]\n\n depends = ['hostpython3', 'sqlite3', 'openssl', 'libffi']\n conflicts = ['python3crystax', 'python2', 'python2legacy']\n\n configure_args = (\n '--host={android_host}',\n '--build={android_build}',\n '--enable-shared',\n '--disable-ipv6',\n 'ac_cv_file__dev_ptmx=yes',\n 'ac_cv_file__dev_ptc=no',\n '--without-ensurepip',\n 'ac_cv_little_endian_double=yes',\n '--prefix={prefix}',\n '--exec-prefix={exec_prefix}')\n\n def set_libs_flags(self, env, arch):\n env = super(Python3Recipe, self).set_libs_flags(env, arch)\n if 'openssl' in self.ctx.recipe_build_order:\n recipe = Recipe.get_recipe('openssl', self.ctx)\n self.configure_args += \\\n ('--with-openssl=' + recipe.get_build_dir(arch.arch),)\n return env\n\n\nrecipe = Python3Recipe()\n", "path": "pythonforandroid/recipes/python3/__init__.py"}]}
1,163
133
gh_patches_debug_596
rasdani/github-patches
git_diff
pex-tool__pex-1673
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.72 On the docket: + [x] Fix Locker to prune un-downloaded entries. (#1666) + [x] Fix venv creation to ignore ambient PEX env vars. #1669 + [x] Lockfiles: requirement might not be compatible with requested interpreter constraints #1667 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.71" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.71" +__version__ = "2.1.72"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.71\"\n+__version__ = \"2.1.72\"\n", "issue": "Release 2.1.72\nOn the docket:\r\n+ [x] Fix Locker to prune un-downloaded entries. (#1666)\r\n+ [x] Fix venv creation to ignore ambient PEX env vars. #1669\r\n+ [x] Lockfiles: requirement might not be compatible with requested interpreter constraints #1667 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.71\"\n", "path": "pex/version.py"}]}
662
97
gh_patches_debug_361
rasdani/github-patches
git_diff
NVIDIA__NVFlare-1314
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Command "nvflare" not found Unfortunately I can't run nvflare on Ubuntu because it tells me that the command (`nvflare simulator -h`) is not found. Even if I use the Docker solution. Inside the docker it also shows "command not found". Thus unable to run local experiments. I tried the MWE from https://nvflare.readthedocs.io/en/2.2.1/getting_started.html Used OS: Ubuntu 18.04.5 LTS </issue> <code> [start of docs/conf.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # Configuration file for the Sphinx documentation builder. 16 # 17 # This file only contains a selection of the most common options. For a full 18 # list see the documentation: 19 # http://www.sphinx-doc.org/en/master/config 20 21 # -- Path setup -------------------------------------------------------------- 22 23 # If extensions (or modules to document with autodoc) are in another directory, 24 # add these directories to sys.path here. If the directory is relative to the 25 # documentation root, use os.path.abspath to make it absolute, like shown here. 26 # 27 import sphinx_rtd_theme 28 import os 29 import sys 30 from sphinx.domains.python import PythonDomain 31 import subprocess 32 33 34 class PatchedPythonDomain(PythonDomain): 35 def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): 36 if "refspecific" in node: 37 del node["refspecific"] 38 return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode) 39 40 41 sys.path.insert(0, os.path.abspath("..")) 42 print(sys.path) 43 44 # -- Project information ----------------------------------------------------- 45 46 project = "NVIDIA FLARE" 47 copyright = "2022, NVIDIA" 48 author = "NVIDIA" 49 50 # The full version, including alpha/beta/rc tags 51 release = "2.2.1" 52 version = "2.2.1" 53 54 55 # -- General configuration --------------------------------------------------- 56 57 # Add any Sphinx extension module names here, as strings. They can be 58 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 59 # ones. 60 # Add napoleon to the extensions list 61 # source_parsers = {'.md': CommonMarkParser} 62 63 templates_path = ["templates"] 64 65 source_suffix = { 66 ".rst": "restructuredtext", 67 ".txt": "restructuredtext", 68 ".md": "markdown", 69 } 70 71 extensions = [ 72 "recommonmark", 73 "sphinx.ext.intersphinx", 74 "sphinx.ext.mathjax", 75 "sphinx.ext.napoleon", 76 "sphinx.ext.autodoc", 77 "sphinx.ext.viewcode", 78 "sphinx.ext.autosectionlabel", 79 ] 80 81 autoclass_content = "both" 82 add_module_names = False 83 autosectionlabel_prefix_document = True 84 85 # Add any paths that contain templates here, relative to this directory. 86 # templates_path = ['_templates'] 87 88 # List of patterns, relative to source directory, that match files and 89 # directories to ignore when looking for source files. 90 # This pattern also affects html_static_path and html_extra_path. 91 exclude_patterns = [] 92 93 94 # -- Options for HTML output ------------------------------------------------- 95 96 # The theme to use for HTML and HTML Help pages. See the documentation for 97 # a list of builtin themes. 98 # 99 html_theme = "sphinx_rtd_theme" 100 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 101 html_theme_options = { 102 "collapse_navigation": True, 103 "display_version": True, 104 "navigation_depth": 5, 105 "sticky_navigation": True, # Set to False to disable the sticky nav while scrolling. 106 # 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text 107 } 108 html_scaled_image_link = False 109 html_show_sourcelink = True 110 html_favicon = "favicon.ico" 111 112 # Add any paths that contain custom static files (such as style sheets) here, 113 # relative to this directory. They are copied after the builtin static files, 114 # so a file named "default.css" will overwrite the builtin "default.css". 115 html_static_path = ["_static"] 116 117 118 def generate_apidocs(*args): 119 """Generate API docs automatically by trawling the available modules""" 120 module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "nvflare")) 121 output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "apidocs")) 122 print(f"output_path {output_path}") 123 print(f"module_path {module_path}") 124 subprocess.check_call( 125 [sys.executable, "-m", "sphinx.ext.apidoc", "-f", "-e"] 126 + ["-o", output_path] 127 + [module_path] 128 + [os.path.join(module_path, p) for p in exclude_patterns] 129 ) 130 131 132 def setup(app): 133 app.connect("builder-inited", generate_apidocs) 134 app.add_domain(PatchedPythonDomain, override=True) 135 app.add_css_file("css/additions.css") 136 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -76,6 +76,7 @@ "sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel", + "sphinx_copybutton", ] autoclass_content = "both"
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -76,6 +76,7 @@\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n+ \"sphinx_copybutton\",\n ]\n \n autoclass_content = \"both\"\n", "issue": "[BUG] Command \"nvflare\" not found \nUnfortunately I can't run nvflare on Ubuntu because it tells me that the command (`nvflare simulator -h`) is not found. Even if I use the Docker solution. Inside the docker it also shows \"command not found\". Thus unable to run local experiments.\r\n\r\nI tried the MWE from https://nvflare.readthedocs.io/en/2.2.1/getting_started.html\r\n\r\nUsed OS: Ubuntu 18.04.5 LTS\r\n\n", "before_files": [{"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport sphinx_rtd_theme\nimport os\nimport sys\nfrom sphinx.domains.python import PythonDomain\nimport subprocess\n\n\nclass PatchedPythonDomain(PythonDomain):\n def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n if \"refspecific\" in node:\n del node[\"refspecific\"]\n return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)\n\n\nsys.path.insert(0, os.path.abspath(\"..\"))\nprint(sys.path)\n\n# -- Project information -----------------------------------------------------\n\nproject = \"NVIDIA FLARE\"\ncopyright = \"2022, NVIDIA\"\nauthor = \"NVIDIA\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"2.2.1\"\nversion = \"2.2.1\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n# Add napoleon to the extensions list\n# source_parsers = {'.md': CommonMarkParser}\n\ntemplates_path = [\"templates\"]\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".txt\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\nextensions = [\n \"recommonmark\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\nautoclass_content = \"both\"\nadd_module_names = False\nautosectionlabel_prefix_document = True\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_theme_options = {\n \"collapse_navigation\": True,\n \"display_version\": True,\n \"navigation_depth\": 5,\n \"sticky_navigation\": True, # Set to False to disable the sticky nav while scrolling.\n # 'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text\n}\nhtml_scaled_image_link = False\nhtml_show_sourcelink = True\nhtml_favicon = \"favicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\ndef generate_apidocs(*args):\n \"\"\"Generate API docs automatically by trawling the available modules\"\"\"\n module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"nvflare\"))\n output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"apidocs\"))\n print(f\"output_path {output_path}\")\n print(f\"module_path {module_path}\")\n subprocess.check_call(\n [sys.executable, \"-m\", \"sphinx.ext.apidoc\", \"-f\", \"-e\"]\n + [\"-o\", output_path]\n + [module_path]\n + [os.path.join(module_path, p) for p in exclude_patterns]\n )\n\n\ndef setup(app):\n app.connect(\"builder-inited\", generate_apidocs)\n app.add_domain(PatchedPythonDomain, override=True)\n app.add_css_file(\"css/additions.css\")\n", "path": "docs/conf.py"}]}
2,035
81
gh_patches_debug_906
rasdani/github-patches
git_diff
dotkom__onlineweb4-501
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> UserResource in API should not display last login date publicly Somewhat sensitive information... </issue> <code> [start of apps/api/v0/authentication.py] 1 # -*- coding: utf-8 -*- 2 3 from tastypie import fields 4 from tastypie.resources import ModelResource 5 from tastypie.authorization import Authorization 6 7 from apps.authentication.models import OnlineUser as User 8 9 class UserResource(ModelResource): 10 11 class Meta: 12 queryset = User.objects.all() 13 resource_name = 'user' 14 fields = ['username', 'first_name', 'last_name', 'last_login', 'email', ] 15 [end of apps/api/v0/authentication.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/api/v0/authentication.py b/apps/api/v0/authentication.py --- a/apps/api/v0/authentication.py +++ b/apps/api/v0/authentication.py @@ -11,4 +11,4 @@ class Meta: queryset = User.objects.all() resource_name = 'user' - fields = ['username', 'first_name', 'last_name', 'last_login', 'email', ] + fields = ['username', 'first_name', 'last_name', 'email', ]
{"golden_diff": "diff --git a/apps/api/v0/authentication.py b/apps/api/v0/authentication.py\n--- a/apps/api/v0/authentication.py\n+++ b/apps/api/v0/authentication.py\n@@ -11,4 +11,4 @@\n class Meta:\n queryset = User.objects.all()\n resource_name = 'user'\n- fields = ['username', 'first_name', 'last_name', 'last_login', 'email', ]\n+ fields = ['username', 'first_name', 'last_name', 'email', ]\n", "issue": "UserResource in API should not display last login date publicly\nSomewhat sensitive information...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\nfrom tastypie.authorization import Authorization\n\nfrom apps.authentication.models import OnlineUser as User\n\nclass UserResource(ModelResource):\n\n class Meta:\n queryset = User.objects.all()\n resource_name = 'user'\n fields = ['username', 'first_name', 'last_name', 'last_login', 'email', ]\n", "path": "apps/api/v0/authentication.py"}]}
668
109
gh_patches_debug_11661
rasdani/github-patches
git_diff
projectmesa__mesa-1439
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Seed and random should not be class attributes **Describe the bug** <!-- A clear and concise description the bug --> Every time a model instance is created, it updates the seed and random attribute of every other instance of the same class. **Expected behavior** <!-- A clear and concise description of what you expected to happen --> I would expect each instance to have their own attribute. This way, the behavior of each instance is independent and predictable. **To Reproduce** <!-- Steps to reproduce the bug, or a link to a project where the bug is visible --> ```pycon >>> from mesa import Model >>> class Ex(Model): ... def __init__(self, seed=2): ... pass ... >>> a = Ex(seed=1) >>> print(a._seed, a.random) 1 <random.Random object at 0x282a8d0> >>> b = Ex(seed=2) >>> print(a._seed, a.random) 2 <random.Random object at 0x282b2d0> ``` </issue> <code> [start of mesa/model.py] 1 """ 2 The model class for Mesa framework. 3 4 Core Objects: Model 5 6 """ 7 # Mypy; for the `|` operator purpose 8 # Remove this __future__ import once the oldest supported Python is 3.10 9 from __future__ import annotations 10 11 import random 12 13 from mesa.datacollection import DataCollector 14 15 # mypy 16 from typing import Any 17 18 19 class Model: 20 """Base class for models.""" 21 22 def __new__(cls, *args: Any, **kwargs: Any) -> Any: 23 """Create a new model object and instantiate its RNG automatically.""" 24 cls._seed = kwargs.get("seed", None) 25 cls.random = random.Random(cls._seed) 26 return object.__new__(cls) 27 28 def __init__(self, *args: Any, **kwargs: Any) -> None: 29 """Create a new model. Overload this method with the actual code to 30 start the model. 31 32 Attributes: 33 schedule: schedule object 34 running: a bool indicating if the model should continue running 35 36 """ 37 38 self.running = True 39 self.schedule = None 40 self.current_id = 0 41 42 def run_model(self) -> None: 43 """Run the model until the end condition is reached. Overload as 44 needed. 45 46 """ 47 while self.running: 48 self.step() 49 50 def step(self) -> None: 51 """A single step. Fill in here.""" 52 pass 53 54 def next_id(self) -> int: 55 """Return the next unique ID for agents, increment current_id""" 56 self.current_id += 1 57 return self.current_id 58 59 def reset_randomizer(self, seed: int | None = None) -> None: 60 """Reset the model random number generator. 61 62 Args: 63 seed: A new seed for the RNG; if None, reset using the current seed 64 """ 65 66 if seed is None: 67 seed = self._seed 68 self.random.seed(seed) 69 self._seed = seed 70 71 def initialize_data_collector( 72 self, model_reporters=None, agent_reporters=None, tables=None 73 ) -> None: 74 if not hasattr(self, "schedule") or self.schedule is None: 75 raise RuntimeError( 76 "You must initialize the scheduler (self.schedule) before initializing the data collector." 77 ) 78 if self.schedule.get_agent_count() == 0: 79 raise RuntimeError( 80 "You must add agents to the scheduler before initializing the data collector." 81 ) 82 self.datacollector = DataCollector( 83 model_reporters=model_reporters, 84 agent_reporters=agent_reporters, 85 tables=tables, 86 ) 87 # Collect data for the first time during initialization. 88 self.datacollector.collect(self) 89 [end of mesa/model.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mesa/model.py b/mesa/model.py --- a/mesa/model.py +++ b/mesa/model.py @@ -21,9 +21,10 @@ def __new__(cls, *args: Any, **kwargs: Any) -> Any: """Create a new model object and instantiate its RNG automatically.""" - cls._seed = kwargs.get("seed", None) - cls.random = random.Random(cls._seed) - return object.__new__(cls) + obj = object.__new__(cls) + obj._seed = kwargs.get("seed", None) + obj.random = random.Random(obj._seed) + return obj def __init__(self, *args: Any, **kwargs: Any) -> None: """Create a new model. Overload this method with the actual code to
{"golden_diff": "diff --git a/mesa/model.py b/mesa/model.py\n--- a/mesa/model.py\n+++ b/mesa/model.py\n@@ -21,9 +21,10 @@\n \n def __new__(cls, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Create a new model object and instantiate its RNG automatically.\"\"\"\n- cls._seed = kwargs.get(\"seed\", None)\n- cls.random = random.Random(cls._seed)\n- return object.__new__(cls)\n+ obj = object.__new__(cls)\n+ obj._seed = kwargs.get(\"seed\", None)\n+ obj.random = random.Random(obj._seed)\n+ return obj\n \n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Create a new model. Overload this method with the actual code to\n", "issue": "Seed and random should not be class attributes\n**Describe the bug**\r\n<!-- A clear and concise description the bug -->\r\n\r\nEvery time a model instance is created, it updates the seed and random attribute of every other instance of the same class.\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen -->\r\n\r\nI would expect each instance to have their own attribute. This way, the behavior of each instance is independent and predictable.\r\n\r\n**To Reproduce**\r\n<!-- Steps to reproduce the bug, or a link to a project where the bug is visible -->\r\n\r\n```pycon\r\n>>> from mesa import Model\r\n>>> class Ex(Model):\r\n... def __init__(self, seed=2):\r\n... pass\r\n...\r\n>>> a = Ex(seed=1)\r\n>>> print(a._seed, a.random)\r\n1 <random.Random object at 0x282a8d0>\r\n>>> b = Ex(seed=2)\r\n>>> print(a._seed, a.random)\r\n2 <random.Random object at 0x282b2d0>\r\n```\n", "before_files": [{"content": "\"\"\"\nThe model class for Mesa framework.\n\nCore Objects: Model\n\n\"\"\"\n# Mypy; for the `|` operator purpose\n# Remove this __future__ import once the oldest supported Python is 3.10\nfrom __future__ import annotations\n\nimport random\n\nfrom mesa.datacollection import DataCollector\n\n# mypy\nfrom typing import Any\n\n\nclass Model:\n \"\"\"Base class for models.\"\"\"\n\n def __new__(cls, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Create a new model object and instantiate its RNG automatically.\"\"\"\n cls._seed = kwargs.get(\"seed\", None)\n cls.random = random.Random(cls._seed)\n return object.__new__(cls)\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Create a new model. Overload this method with the actual code to\n start the model.\n\n Attributes:\n schedule: schedule object\n running: a bool indicating if the model should continue running\n\n \"\"\"\n\n self.running = True\n self.schedule = None\n self.current_id = 0\n\n def run_model(self) -> None:\n \"\"\"Run the model until the end condition is reached. Overload as\n needed.\n\n \"\"\"\n while self.running:\n self.step()\n\n def step(self) -> None:\n \"\"\"A single step. Fill in here.\"\"\"\n pass\n\n def next_id(self) -> int:\n \"\"\"Return the next unique ID for agents, increment current_id\"\"\"\n self.current_id += 1\n return self.current_id\n\n def reset_randomizer(self, seed: int | None = None) -> None:\n \"\"\"Reset the model random number generator.\n\n Args:\n seed: A new seed for the RNG; if None, reset using the current seed\n \"\"\"\n\n if seed is None:\n seed = self._seed\n self.random.seed(seed)\n self._seed = seed\n\n def initialize_data_collector(\n self, model_reporters=None, agent_reporters=None, tables=None\n ) -> None:\n if not hasattr(self, \"schedule\") or self.schedule is None:\n raise RuntimeError(\n \"You must initialize the scheduler (self.schedule) before initializing the data collector.\"\n )\n if self.schedule.get_agent_count() == 0:\n raise RuntimeError(\n \"You must add agents to the scheduler before initializing the data collector.\"\n )\n self.datacollector = DataCollector(\n model_reporters=model_reporters,\n agent_reporters=agent_reporters,\n tables=tables,\n )\n # Collect data for the first time during initialization.\n self.datacollector.collect(self)\n", "path": "mesa/model.py"}]}
1,489
185
gh_patches_debug_11715
rasdani/github-patches
git_diff
safe-global__safe-config-service-361
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Gas price editable from the Chain model Please make the Gas Price editable directly via Chain configs. Like we did for Features and Wallets. Thanks! </issue> <code> [start of src/chains/admin.py] 1 from django.contrib import admin 2 from django.db.models import Model 3 4 from .models import Chain, Feature, GasPrice, Wallet 5 6 7 class FeatureInline(admin.TabularInline[Model]): 8 model = Feature.chains.through 9 extra = 0 10 verbose_name_plural = "Features enabled for this chain" 11 12 13 class WalletInline(admin.TabularInline[Model]): 14 model = Wallet.chains.through 15 extra = 0 16 verbose_name_plural = "Wallets enabled for this chain" 17 18 19 @admin.register(Chain) 20 class ChainAdmin(admin.ModelAdmin[Chain]): 21 list_display = ( 22 "id", 23 "name", 24 "rpc_uri", 25 "safe_apps_rpc_uri", 26 "relevance", 27 ) 28 search_fields = ("name", "id") 29 ordering = ( 30 "relevance", 31 "name", 32 ) 33 inlines = [FeatureInline, WalletInline] 34 35 36 @admin.register(GasPrice) 37 class GasPriceAdmin(admin.ModelAdmin[GasPrice]): 38 list_display = ( 39 "chain_id", 40 "oracle_uri", 41 "fixed_wei_value", 42 "rank", 43 ) 44 search_fields = ("chain_id", "oracle_uri") 45 ordering = ("rank",) 46 47 48 @admin.register(Wallet) 49 class WalletAdmin(admin.ModelAdmin[Wallet]): 50 list_display = ("key",) 51 52 53 @admin.register(Feature) 54 class FeatureAdmin(admin.ModelAdmin[Feature]): 55 list_display = ("key",) 56 [end of src/chains/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/chains/admin.py b/src/chains/admin.py --- a/src/chains/admin.py +++ b/src/chains/admin.py @@ -4,6 +4,12 @@ from .models import Chain, Feature, GasPrice, Wallet +class GasPriceInline(admin.TabularInline[Model]): + model = GasPrice + extra = 0 + verbose_name_plural = "Gas prices set for this chain" + + class FeatureInline(admin.TabularInline[Model]): model = Feature.chains.through extra = 0 @@ -30,7 +36,7 @@ "relevance", "name", ) - inlines = [FeatureInline, WalletInline] + inlines = [FeatureInline, GasPriceInline, WalletInline] @admin.register(GasPrice)
{"golden_diff": "diff --git a/src/chains/admin.py b/src/chains/admin.py\n--- a/src/chains/admin.py\n+++ b/src/chains/admin.py\n@@ -4,6 +4,12 @@\n from .models import Chain, Feature, GasPrice, Wallet\n \n \n+class GasPriceInline(admin.TabularInline[Model]):\n+ model = GasPrice\n+ extra = 0\n+ verbose_name_plural = \"Gas prices set for this chain\"\n+\n+\n class FeatureInline(admin.TabularInline[Model]):\n model = Feature.chains.through\n extra = 0\n@@ -30,7 +36,7 @@\n \"relevance\",\n \"name\",\n )\n- inlines = [FeatureInline, WalletInline]\n+ inlines = [FeatureInline, GasPriceInline, WalletInline]\n \n \n @admin.register(GasPrice)\n", "issue": "Gas price editable from the Chain model\nPlease make the Gas Price editable directly via Chain configs.\r\nLike we did for Features and Wallets.\r\nThanks!\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.db.models import Model\n\nfrom .models import Chain, Feature, GasPrice, Wallet\n\n\nclass FeatureInline(admin.TabularInline[Model]):\n model = Feature.chains.through\n extra = 0\n verbose_name_plural = \"Features enabled for this chain\"\n\n\nclass WalletInline(admin.TabularInline[Model]):\n model = Wallet.chains.through\n extra = 0\n verbose_name_plural = \"Wallets enabled for this chain\"\n\n\[email protected](Chain)\nclass ChainAdmin(admin.ModelAdmin[Chain]):\n list_display = (\n \"id\",\n \"name\",\n \"rpc_uri\",\n \"safe_apps_rpc_uri\",\n \"relevance\",\n )\n search_fields = (\"name\", \"id\")\n ordering = (\n \"relevance\",\n \"name\",\n )\n inlines = [FeatureInline, WalletInline]\n\n\[email protected](GasPrice)\nclass GasPriceAdmin(admin.ModelAdmin[GasPrice]):\n list_display = (\n \"chain_id\",\n \"oracle_uri\",\n \"fixed_wei_value\",\n \"rank\",\n )\n search_fields = (\"chain_id\", \"oracle_uri\")\n ordering = (\"rank\",)\n\n\[email protected](Wallet)\nclass WalletAdmin(admin.ModelAdmin[Wallet]):\n list_display = (\"key\",)\n\n\[email protected](Feature)\nclass FeatureAdmin(admin.ModelAdmin[Feature]):\n list_display = (\"key\",)\n", "path": "src/chains/admin.py"}]}
973
184
gh_patches_debug_18892
rasdani/github-patches
git_diff
python__peps-634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pep2rss can raise UnicodeEncodeError on certain platforms This is due to the default encoding of [`open()`](https://docs.python.org/3/library/functions.html#open) being platform dependent. For example, on Windows (CP-1252), it can raise `UnicodeEncodeError: 'charmap' codec can't encode character '\u0141' in position 1545: character maps to <undefined>` when handling the character, 'Ł', in the name of the author of [PEP 569](https://www.python.org/dev/peps/pep-0569/). https://github.com/python/peps/blob/433faca4cf1bb8a0550d01b86b2b4581b3eda460/pep-0569.rst#L5 </issue> <code> [start of pep2rss.py] 1 #!/usr/bin/env python 2 3 # usage: pep-hook.py $REPOS $REV 4 # (standard post-commit args) 5 6 import os, glob, time, datetime, stat, re, sys 7 import codecs 8 import PyRSS2Gen as rssgen 9 10 RSS_PATH = os.path.join(sys.argv[1], 'peps.rss') 11 12 def firstline_startingwith(full_path, text): 13 for line in codecs.open(full_path, encoding="utf-8"): 14 if line.startswith(text): 15 return line[len(text):].strip() 16 return None 17 18 # get list of peps with creation time (from "Created:" string in pep .txt) 19 peps = glob.glob('pep-*.txt') 20 def pep_creation_dt(full_path): 21 created_str = firstline_startingwith(full_path, 'Created:') 22 # bleh, I was hoping to avoid re but some PEPs editorialize 23 # on the Created line 24 m = re.search(r'''(\d+-\w+-\d{4})''', created_str) 25 if not m: 26 # some older ones have an empty line, that's okay, if it's old 27 # we ipso facto don't care about it. 28 # "return None" would make the most sense but datetime objects 29 # refuse to compare with that. :-| 30 return datetime.datetime(*time.localtime(0)[:6]) 31 created_str = m.group(1) 32 try: 33 t = time.strptime(created_str, '%d-%b-%Y') 34 except ValueError: 35 t = time.strptime(created_str, '%d-%B-%Y') 36 return datetime.datetime(*t[:6]) 37 peps_with_dt = [(pep_creation_dt(full_path), full_path) for full_path in peps] 38 # sort peps by date, newest first 39 peps_with_dt.sort(reverse=True) 40 41 # generate rss items for 10 most recent peps 42 items = [] 43 for dt, full_path in peps_with_dt[:10]: 44 try: 45 n = int(full_path.split('-')[-1].split('.')[0]) 46 except ValueError: 47 pass 48 title = firstline_startingwith(full_path, 'Title:') 49 author = firstline_startingwith(full_path, 'Author:') 50 url = 'http://www.python.org/dev/peps/pep-%0.4d' % n 51 item = rssgen.RSSItem( 52 title = 'PEP %d: %s' % (n, title), 53 link = url, 54 description = 'Author: %s' % author, 55 guid = rssgen.Guid(url), 56 pubDate = dt) 57 items.append(item) 58 59 # the rss envelope 60 desc = """ 61 Newest Python Enhancement Proposals (PEPs) - Information on new 62 language features, and some meta-information like release 63 procedure and schedules 64 """.strip() 65 rss = rssgen.RSS2( 66 title = 'Newest Python PEPs', 67 link = 'http://www.python.org/dev/peps', 68 description = desc, 69 lastBuildDate = datetime.datetime.now(), 70 items = items) 71 72 with open(RSS_PATH, 'w') as fp: 73 fp.write(rss.to_xml()) 74 [end of pep2rss.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pep2rss.py b/pep2rss.py --- a/pep2rss.py +++ b/pep2rss.py @@ -1,16 +1,15 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # usage: pep-hook.py $REPOS $REV # (standard post-commit args) import os, glob, time, datetime, stat, re, sys -import codecs import PyRSS2Gen as rssgen RSS_PATH = os.path.join(sys.argv[1], 'peps.rss') def firstline_startingwith(full_path, text): - for line in codecs.open(full_path, encoding="utf-8"): + for line in open(full_path, encoding="utf-8"): if line.startswith(text): return line[len(text):].strip() return None @@ -69,5 +68,5 @@ lastBuildDate = datetime.datetime.now(), items = items) -with open(RSS_PATH, 'w') as fp: - fp.write(rss.to_xml()) +with open(RSS_PATH, 'w', encoding="utf-8") as fp: + fp.write(rss.to_xml(encoding="utf-8"))
{"golden_diff": "diff --git a/pep2rss.py b/pep2rss.py\n--- a/pep2rss.py\n+++ b/pep2rss.py\n@@ -1,16 +1,15 @@\n-#!/usr/bin/env python\n+#!/usr/bin/env python3\n \n # usage: pep-hook.py $REPOS $REV\n # (standard post-commit args)\n \n import os, glob, time, datetime, stat, re, sys\n-import codecs\n import PyRSS2Gen as rssgen\n \n RSS_PATH = os.path.join(sys.argv[1], 'peps.rss')\n \n def firstline_startingwith(full_path, text):\n- for line in codecs.open(full_path, encoding=\"utf-8\"):\n+ for line in open(full_path, encoding=\"utf-8\"):\n if line.startswith(text):\n return line[len(text):].strip()\n return None\n@@ -69,5 +68,5 @@\n lastBuildDate = datetime.datetime.now(),\n items = items)\n \n-with open(RSS_PATH, 'w') as fp:\n- fp.write(rss.to_xml())\n+with open(RSS_PATH, 'w', encoding=\"utf-8\") as fp:\n+ fp.write(rss.to_xml(encoding=\"utf-8\"))\n", "issue": "pep2rss can raise UnicodeEncodeError on certain platforms\nThis is due to the default encoding of [`open()`](https://docs.python.org/3/library/functions.html#open) being platform dependent.\r\n\r\nFor example, on Windows (CP-1252), it can raise\r\n`UnicodeEncodeError: 'charmap' codec can't encode character '\\u0141' in position 1545: character maps to <undefined>`\r\nwhen handling the character, '\u0141', in the name of the author of [PEP 569](https://www.python.org/dev/peps/pep-0569/).\r\nhttps://github.com/python/peps/blob/433faca4cf1bb8a0550d01b86b2b4581b3eda460/pep-0569.rst#L5\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# usage: pep-hook.py $REPOS $REV\n# (standard post-commit args)\n\nimport os, glob, time, datetime, stat, re, sys\nimport codecs\nimport PyRSS2Gen as rssgen\n\nRSS_PATH = os.path.join(sys.argv[1], 'peps.rss')\n\ndef firstline_startingwith(full_path, text):\n for line in codecs.open(full_path, encoding=\"utf-8\"):\n if line.startswith(text):\n return line[len(text):].strip()\n return None\n\n# get list of peps with creation time (from \"Created:\" string in pep .txt)\npeps = glob.glob('pep-*.txt')\ndef pep_creation_dt(full_path):\n created_str = firstline_startingwith(full_path, 'Created:')\n # bleh, I was hoping to avoid re but some PEPs editorialize\n # on the Created line\n m = re.search(r'''(\\d+-\\w+-\\d{4})''', created_str)\n if not m:\n # some older ones have an empty line, that's okay, if it's old\n # we ipso facto don't care about it.\n # \"return None\" would make the most sense but datetime objects\n # refuse to compare with that. :-|\n return datetime.datetime(*time.localtime(0)[:6])\n created_str = m.group(1)\n try:\n t = time.strptime(created_str, '%d-%b-%Y')\n except ValueError:\n t = time.strptime(created_str, '%d-%B-%Y')\n return datetime.datetime(*t[:6])\npeps_with_dt = [(pep_creation_dt(full_path), full_path) for full_path in peps]\n# sort peps by date, newest first\npeps_with_dt.sort(reverse=True)\n\n# generate rss items for 10 most recent peps\nitems = []\nfor dt, full_path in peps_with_dt[:10]:\n try:\n n = int(full_path.split('-')[-1].split('.')[0])\n except ValueError:\n pass\n title = firstline_startingwith(full_path, 'Title:')\n author = firstline_startingwith(full_path, 'Author:')\n url = 'http://www.python.org/dev/peps/pep-%0.4d' % n\n item = rssgen.RSSItem(\n title = 'PEP %d: %s' % (n, title),\n link = url,\n description = 'Author: %s' % author,\n guid = rssgen.Guid(url),\n pubDate = dt)\n items.append(item)\n\n# the rss envelope\ndesc = \"\"\"\nNewest Python Enhancement Proposals (PEPs) - Information on new\nlanguage features, and some meta-information like release\nprocedure and schedules\n\"\"\".strip()\nrss = rssgen.RSS2(\n title = 'Newest Python PEPs',\n link = 'http://www.python.org/dev/peps',\n description = desc,\n lastBuildDate = datetime.datetime.now(),\n items = items)\n\nwith open(RSS_PATH, 'w') as fp:\n fp.write(rss.to_xml())\n", "path": "pep2rss.py"}]}
1,546
271
gh_patches_debug_4567
rasdani/github-patches
git_diff
mozmeao__snippets-service-813
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> UnicodeEncodeError when Slack request.post UnicodeEncodeError: 'latin-1' codec can't encode characters in position 258-264: Body ('γιώργος') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8. Sentry Issue: https://sentry.prod.mozaws.net/operations/snippets/issues/4891896/ </issue> <code> [start of snippets/base/slack.py] 1 import logging 2 3 from django.conf import settings 4 from django.template.loader import render_to_string 5 6 import requests 7 from raven.contrib.django.models import client as sentry_client 8 9 logger = logging.getLogger(__name__) 10 11 12 def send_slack(template_name, snippet): 13 data = render_to_string('slack/{}.jinja.json'.format(template_name), 14 context={'snippet': snippet}) 15 _send_slack(data) 16 17 18 def _send_slack(data): 19 if not (settings.SLACK_ENABLE and settings.SLACK_WEBHOOK): 20 logger.info('Slack is not enabled.') 21 return 22 23 try: 24 response = requests.post(settings.SLACK_WEBHOOK, data=data, 25 headers={'Content-Type': 'application/json'}, 26 timeout=4) 27 response.raise_for_status() 28 except requests.exceptions.RequestException: 29 sentry_client.captureException() 30 [end of snippets/base/slack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/snippets/base/slack.py b/snippets/base/slack.py --- a/snippets/base/slack.py +++ b/snippets/base/slack.py @@ -21,7 +21,7 @@ return try: - response = requests.post(settings.SLACK_WEBHOOK, data=data, + response = requests.post(settings.SLACK_WEBHOOK, data=data.encode('utf-8'), headers={'Content-Type': 'application/json'}, timeout=4) response.raise_for_status()
{"golden_diff": "diff --git a/snippets/base/slack.py b/snippets/base/slack.py\n--- a/snippets/base/slack.py\n+++ b/snippets/base/slack.py\n@@ -21,7 +21,7 @@\n return\n \n try:\n- response = requests.post(settings.SLACK_WEBHOOK, data=data,\n+ response = requests.post(settings.SLACK_WEBHOOK, data=data.encode('utf-8'),\n headers={'Content-Type': 'application/json'},\n timeout=4)\n response.raise_for_status()\n", "issue": "UnicodeEncodeError when Slack request.post\nUnicodeEncodeError: 'latin-1' codec can't encode characters in position 258-264: Body ('\u03b3\u03b9\u03ce\u03c1\u03b3\u03bf\u03c2') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8.\r\n\r\nSentry Issue: https://sentry.prod.mozaws.net/operations/snippets/issues/4891896/\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.template.loader import render_to_string\n\nimport requests\nfrom raven.contrib.django.models import client as sentry_client\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_slack(template_name, snippet):\n data = render_to_string('slack/{}.jinja.json'.format(template_name),\n context={'snippet': snippet})\n _send_slack(data)\n\n\ndef _send_slack(data):\n if not (settings.SLACK_ENABLE and settings.SLACK_WEBHOOK):\n logger.info('Slack is not enabled.')\n return\n\n try:\n response = requests.post(settings.SLACK_WEBHOOK, data=data,\n headers={'Content-Type': 'application/json'},\n timeout=4)\n response.raise_for_status()\n except requests.exceptions.RequestException:\n sentry_client.captureException()\n", "path": "snippets/base/slack.py"}]}
865
113
gh_patches_debug_11352
rasdani/github-patches
git_diff
netket__netket-1487
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error in fermi-hubbard example Hello, there. It seems that the definition of create and annihilation operators are defined reversely in fermi-hubbard model, and results are wrong. [link](https://github.com/netket/netket/blob/26372e3527ae12861cc7953c24654614e8679d87/Examples/Fermions/fermi_hubbard.py#L25C1-L26) </issue> <code> [start of Examples/Fermions/fermi_hubbard.py] 1 import netket as nk 2 import numpy as np 3 import matplotlib.pyplot as plt 4 import json 5 6 from netket import experimental as nkx 7 8 L = 2 # take a 2x2 lattice 9 D = 2 10 t = 1 # tunneling/hopping 11 U = 0.01 # coulomb 12 13 # create the graph our fermions can hop on 14 g = nk.graph.Hypercube(length=L, n_dim=D, pbc=True) 15 n_sites = g.n_nodes 16 17 # create a hilbert space with 2 up and 2 down spins 18 hi = nkx.hilbert.SpinOrbitalFermions(n_sites, s=1 / 2, n_fermions=(2, 2)) 19 20 21 # create an operator representing fermi hubbard interactions 22 # -t (i^ j + h.c.) + U (i^ i j^ j) 23 # we will create a helper function to abbreviate the creation, destruction and number operators 24 # each operator has a site and spin projection (sz) in order to find the right position in the hilbert space samples 25 def c(site, sz): 26 return nkx.operator.fermion.create(hi, site, sz=sz) 27 28 29 def cdag(site, sz): 30 return nkx.operator.fermion.destroy(hi, site, sz=sz) 31 32 33 def nc(site, sz): 34 return nkx.operator.fermion.number(hi, site, sz=sz) 35 36 37 up = +1 / 2 38 down = -1 / 2 39 ham = 0.0 40 for sz in (up, down): 41 for u, v in g.edges(): 42 ham += -t * cdag(u, sz) * c(v, sz) - t * cdag(v, sz) * c(u, sz) 43 for u in g.nodes(): 44 ham += U * nc(u, up) * nc(u, down) 45 46 print("Hamiltonian =", ham.operator_string()) 47 48 # metropolis exchange moves fermions around according to a graph 49 # the physical graph has LxL vertices, but the computational basis defined by the 50 # hilbert space contains (2s+1)*L*L occupation numbers 51 # by taking a disjoint copy of the lattice, we can 52 # move the fermions around independently for both spins 53 # and therefore conserve the number of fermions with up and down spin 54 55 # g.n_nodes == L*L --> disj_graph == 2*L*L 56 disj_graph = nk.graph.disjoint_union(g, g) 57 sa = nk.sampler.MetropolisExchange(hi, graph=disj_graph, n_chains=16) 58 59 # since the hilbert basis is a set of occupation numbers, we can take a general RBM 60 # we take complex parameters, since it learns sign structures more easily, and for even fermion number, the wave function might be complex 61 ma = nk.models.RBM(alpha=1, param_dtype=complex, use_visible_bias=False) 62 vs = nk.vqs.MCState(sa, ma, n_discard_per_chain=100, n_samples=512) 63 64 # we will use sgd with Stochastic Reconfiguration 65 opt = nk.optimizer.Sgd(learning_rate=0.01) 66 sr = nk.optimizer.SR(diag_shift=0.1) 67 68 gs = nk.driver.VMC(ham, opt, variational_state=vs, preconditioner=sr) 69 70 # now run the optimization 71 # first step will take longer in order to compile 72 exp_name = "fermions_test" 73 gs.run(500, out=exp_name) 74 75 ############## plot ################# 76 77 ed_energies = np.linalg.eigvalsh(ham.to_dense()) 78 79 with open(f"{exp_name}.log", "r") as f: 80 data = json.load(f) 81 82 x = data["Energy"]["iters"] 83 y = data["Energy"]["Mean"]["real"] 84 85 # plot the energy levels 86 plt.axhline(ed_energies[0], color="red", label="E0") 87 for e in ed_energies[1:]: 88 plt.axhline(e, color="black") 89 plt.plot(x, y, color="red", label="VMC") 90 plt.xlabel("step") 91 plt.ylabel("E") 92 plt.show() 93 [end of Examples/Fermions/fermi_hubbard.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Examples/Fermions/fermi_hubbard.py b/Examples/Fermions/fermi_hubbard.py --- a/Examples/Fermions/fermi_hubbard.py +++ b/Examples/Fermions/fermi_hubbard.py @@ -23,11 +23,11 @@ # we will create a helper function to abbreviate the creation, destruction and number operators # each operator has a site and spin projection (sz) in order to find the right position in the hilbert space samples def c(site, sz): - return nkx.operator.fermion.create(hi, site, sz=sz) + return nkx.operator.fermion.destroy(hi, site, sz=sz) def cdag(site, sz): - return nkx.operator.fermion.destroy(hi, site, sz=sz) + return nkx.operator.fermion.create(hi, site, sz=sz) def nc(site, sz):
{"golden_diff": "diff --git a/Examples/Fermions/fermi_hubbard.py b/Examples/Fermions/fermi_hubbard.py\n--- a/Examples/Fermions/fermi_hubbard.py\n+++ b/Examples/Fermions/fermi_hubbard.py\n@@ -23,11 +23,11 @@\n # we will create a helper function to abbreviate the creation, destruction and number operators\n # each operator has a site and spin projection (sz) in order to find the right position in the hilbert space samples\n def c(site, sz):\n- return nkx.operator.fermion.create(hi, site, sz=sz)\n+ return nkx.operator.fermion.destroy(hi, site, sz=sz)\n \n \n def cdag(site, sz):\n- return nkx.operator.fermion.destroy(hi, site, sz=sz)\n+ return nkx.operator.fermion.create(hi, site, sz=sz)\n \n \n def nc(site, sz):\n", "issue": "Error in fermi-hubbard example\nHello, there.\r\n\r\nIt seems that the definition of create and annihilation operators are defined reversely in fermi-hubbard model, and results are wrong. [link](https://github.com/netket/netket/blob/26372e3527ae12861cc7953c24654614e8679d87/Examples/Fermions/fermi_hubbard.py#L25C1-L26)\n", "before_files": [{"content": "import netket as nk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\nfrom netket import experimental as nkx\n\nL = 2 # take a 2x2 lattice\nD = 2\nt = 1 # tunneling/hopping\nU = 0.01 # coulomb\n\n# create the graph our fermions can hop on\ng = nk.graph.Hypercube(length=L, n_dim=D, pbc=True)\nn_sites = g.n_nodes\n\n# create a hilbert space with 2 up and 2 down spins\nhi = nkx.hilbert.SpinOrbitalFermions(n_sites, s=1 / 2, n_fermions=(2, 2))\n\n\n# create an operator representing fermi hubbard interactions\n# -t (i^ j + h.c.) + U (i^ i j^ j)\n# we will create a helper function to abbreviate the creation, destruction and number operators\n# each operator has a site and spin projection (sz) in order to find the right position in the hilbert space samples\ndef c(site, sz):\n return nkx.operator.fermion.create(hi, site, sz=sz)\n\n\ndef cdag(site, sz):\n return nkx.operator.fermion.destroy(hi, site, sz=sz)\n\n\ndef nc(site, sz):\n return nkx.operator.fermion.number(hi, site, sz=sz)\n\n\nup = +1 / 2\ndown = -1 / 2\nham = 0.0\nfor sz in (up, down):\n for u, v in g.edges():\n ham += -t * cdag(u, sz) * c(v, sz) - t * cdag(v, sz) * c(u, sz)\nfor u in g.nodes():\n ham += U * nc(u, up) * nc(u, down)\n\nprint(\"Hamiltonian =\", ham.operator_string())\n\n# metropolis exchange moves fermions around according to a graph\n# the physical graph has LxL vertices, but the computational basis defined by the\n# hilbert space contains (2s+1)*L*L occupation numbers\n# by taking a disjoint copy of the lattice, we can\n# move the fermions around independently for both spins\n# and therefore conserve the number of fermions with up and down spin\n\n# g.n_nodes == L*L --> disj_graph == 2*L*L\ndisj_graph = nk.graph.disjoint_union(g, g)\nsa = nk.sampler.MetropolisExchange(hi, graph=disj_graph, n_chains=16)\n\n# since the hilbert basis is a set of occupation numbers, we can take a general RBM\n# we take complex parameters, since it learns sign structures more easily, and for even fermion number, the wave function might be complex\nma = nk.models.RBM(alpha=1, param_dtype=complex, use_visible_bias=False)\nvs = nk.vqs.MCState(sa, ma, n_discard_per_chain=100, n_samples=512)\n\n# we will use sgd with Stochastic Reconfiguration\nopt = nk.optimizer.Sgd(learning_rate=0.01)\nsr = nk.optimizer.SR(diag_shift=0.1)\n\ngs = nk.driver.VMC(ham, opt, variational_state=vs, preconditioner=sr)\n\n# now run the optimization\n# first step will take longer in order to compile\nexp_name = \"fermions_test\"\ngs.run(500, out=exp_name)\n\n############## plot #################\n\ned_energies = np.linalg.eigvalsh(ham.to_dense())\n\nwith open(f\"{exp_name}.log\", \"r\") as f:\n data = json.load(f)\n\nx = data[\"Energy\"][\"iters\"]\ny = data[\"Energy\"][\"Mean\"][\"real\"]\n\n# plot the energy levels\nplt.axhline(ed_energies[0], color=\"red\", label=\"E0\")\nfor e in ed_energies[1:]:\n plt.axhline(e, color=\"black\")\nplt.plot(x, y, color=\"red\", label=\"VMC\")\nplt.xlabel(\"step\")\nplt.ylabel(\"E\")\nplt.show()\n", "path": "Examples/Fermions/fermi_hubbard.py"}]}
1,755
220
gh_patches_debug_5242
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-884
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wrong and correct examples of nested annotations mixed up in docs for Union type # Bug report <!-- Hi, thanks for submitting a bug. We appreciate that. But, we will need some information about what's wrong to help you. --> ## What's wrong From ``NestedAnnotationsViolation`` class docstring: ``` Example:: # Correct: Literal[1, 2, 3, "foo", 5, None] Union[Union[int, str], float] # Wrong: Literal[Literal[Literal[1, 2, 3], "foo"], 5, None] Union[int, str, float] ``` There is a mistake: correct and wrong examples for ``Union`` type are mixed up. If I run flake8 passing it a file which contains ``Union[int, str, float]`` annotation, no error will be raised. And for ``Union[Union[int, str], float]`` WPS702 is raised. Both are expected behaviour I guess, just a mismatch in documentation. ## How is that should be ``` Example:: # Correct: Literal[1, 2, 3, "foo", 5, None] Union[int, str, float] # Wrong: Literal[Literal[Literal[1, 2, 3], "foo"], 5, None] Union[Union[int, str], float] ``` <!-- Love wemake-python-styleguide? ❤️ Do you want your bugs to be solved faster? Please consider supporting our collective: 👉 https://opencollective.com/wemake-python-styleguide/donate --> </issue> <code> [start of wemake_python_styleguide/violations/annotations.py] 1 # -*- coding: utf-8 -*- 2 3 """ 4 These checks help to prevent incorrect usage of Python 3 typing annotations. 5 6 While they maybe of a great help in writing clear 7 and concise code, they still can be abused. 8 9 Once again, these rules are highly subjective. But, we love them. 10 11 .. currentmodule:: wemake_python_styleguide.violations.annotations 12 13 Summary 14 ------- 15 16 .. autosummary:: 17 :nosignatures: 18 19 LiteralNoneViolation 20 NestedAnnotationsViolation 21 22 Annotation checks 23 ------------------ 24 25 .. autoclass:: LiteralNoneViolation 26 .. autoclass:: NestedAnnotationsViolation 27 28 """ 29 30 from typing_extensions import final 31 32 from wemake_python_styleguide.violations.base import ASTViolation 33 34 35 @final 36 class LiteralNoneViolation(ASTViolation): 37 """ 38 Forbids to use ``Literal[None]`` typing annotation. 39 40 Reasoning: 41 Literal[None] is just the same as None. 42 There's no need to use the first version. 43 It is not type related, it is a consistency rule. 44 45 Solution: 46 Replace ``Literal[None]`` with ``None``. 47 48 Example:: 49 50 # Correct: 51 def func(empty: None): 52 '''Empty function.''' 53 54 # Wrong: 55 def func(empty: Literal[None]): 56 '''Empty function.''' 57 58 .. versionadded:: 0.13.0 59 60 """ 61 62 code = 701 63 error_template = 'Found useless `Literal[None]` typing annotation' 64 65 66 @final 67 class NestedAnnotationsViolation(ASTViolation): 68 """ 69 Forbids use of nested Literal and Union Annotation. 70 71 Reasoning: 72 There is no need to nest certain annotations of the same type. 73 They are exactly equivalent to the flattened version. 74 Use the flattened version for consistency. 75 76 Solution: 77 Flatten consecutively nested ``typing.Literal`` and ``typing.Union``. 78 79 Example:: 80 # Correct: 81 Literal[1, 2, 3, "foo", 5, None] 82 Union[Union[int, str], float] 83 84 # Wrong: 85 Literal[Literal[Literal[1, 2, 3], "foo"], 5, None] 86 Union[int, str, float] 87 88 .. versionadded:: 0.13.0 89 90 """ 91 92 error_template = 'Found redundant nested typing annotation' 93 code = 702 94 [end of wemake_python_styleguide/violations/annotations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wemake_python_styleguide/violations/annotations.py b/wemake_python_styleguide/violations/annotations.py --- a/wemake_python_styleguide/violations/annotations.py +++ b/wemake_python_styleguide/violations/annotations.py @@ -79,11 +79,11 @@ Example:: # Correct: Literal[1, 2, 3, "foo", 5, None] - Union[Union[int, str], float] + Union[int, str, float] # Wrong: Literal[Literal[Literal[1, 2, 3], "foo"], 5, None] - Union[int, str, float] + Union[Union[int, str], float] .. versionadded:: 0.13.0
{"golden_diff": "diff --git a/wemake_python_styleguide/violations/annotations.py b/wemake_python_styleguide/violations/annotations.py\n--- a/wemake_python_styleguide/violations/annotations.py\n+++ b/wemake_python_styleguide/violations/annotations.py\n@@ -79,11 +79,11 @@\n Example::\n # Correct:\n Literal[1, 2, 3, \"foo\", 5, None]\n- Union[Union[int, str], float]\n+ Union[int, str, float]\n \n # Wrong:\n Literal[Literal[Literal[1, 2, 3], \"foo\"], 5, None]\n- Union[int, str, float]\n+ Union[Union[int, str], float]\n \n .. versionadded:: 0.13.0\n", "issue": "Wrong and correct examples of nested annotations mixed up in docs for Union type\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\n\r\nFrom ``NestedAnnotationsViolation`` class docstring:\r\n```\r\nExample::\r\n # Correct:\r\n Literal[1, 2, 3, \"foo\", 5, None]\r\n Union[Union[int, str], float]\r\n\r\n # Wrong:\r\n Literal[Literal[Literal[1, 2, 3], \"foo\"], 5, None]\r\n Union[int, str, float]\r\n```\r\nThere is a mistake: correct and wrong examples for ``Union`` type are mixed up. If I run flake8 passing it a file which contains ``Union[int, str, float]`` annotation, no error will be raised. And for ``Union[Union[int, str], float]`` WPS702 is raised. Both are expected behaviour I guess, just a mismatch in documentation.\r\n## How is that should be\r\n\r\n```\r\nExample::\r\n # Correct:\r\n Literal[1, 2, 3, \"foo\", 5, None]\r\n Union[int, str, float]\r\n\r\n # Wrong:\r\n Literal[Literal[Literal[1, 2, 3], \"foo\"], 5, None]\r\n Union[Union[int, str], float]\r\n```\r\n<!--\r\n\r\n Love wemake-python-styleguide? \u2764\ufe0f\r\n Do you want your bugs to be solved faster?\r\n\r\n Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/wemake-python-styleguide/donate\r\n\r\n-->\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThese checks help to prevent incorrect usage of Python 3 typing annotations.\n\nWhile they maybe of a great help in writing clear\nand concise code, they still can be abused.\n\nOnce again, these rules are highly subjective. But, we love them.\n\n.. currentmodule:: wemake_python_styleguide.violations.annotations\n\nSummary\n-------\n\n.. autosummary::\n :nosignatures:\n\n LiteralNoneViolation\n NestedAnnotationsViolation\n\nAnnotation checks\n------------------\n\n.. autoclass:: LiteralNoneViolation\n.. autoclass:: NestedAnnotationsViolation\n\n\"\"\"\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.base import ASTViolation\n\n\n@final\nclass LiteralNoneViolation(ASTViolation):\n \"\"\"\n Forbids to use ``Literal[None]`` typing annotation.\n\n Reasoning:\n Literal[None] is just the same as None.\n There's no need to use the first version.\n It is not type related, it is a consistency rule.\n\n Solution:\n Replace ``Literal[None]`` with ``None``.\n\n Example::\n\n # Correct:\n def func(empty: None):\n '''Empty function.'''\n\n # Wrong:\n def func(empty: Literal[None]):\n '''Empty function.'''\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n code = 701\n error_template = 'Found useless `Literal[None]` typing annotation'\n\n\n@final\nclass NestedAnnotationsViolation(ASTViolation):\n \"\"\"\n Forbids use of nested Literal and Union Annotation.\n\n Reasoning:\n There is no need to nest certain annotations of the same type.\n They are exactly equivalent to the flattened version.\n Use the flattened version for consistency.\n\n Solution:\n Flatten consecutively nested ``typing.Literal`` and ``typing.Union``.\n\n Example::\n # Correct:\n Literal[1, 2, 3, \"foo\", 5, None]\n Union[Union[int, str], float]\n\n # Wrong:\n Literal[Literal[Literal[1, 2, 3], \"foo\"], 5, None]\n Union[int, str, float]\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n error_template = 'Found redundant nested typing annotation'\n code = 702\n", "path": "wemake_python_styleguide/violations/annotations.py"}]}
1,606
183
gh_patches_debug_1372
rasdani/github-patches
git_diff
wright-group__WrightTools-361
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> collection.keys returns data objects should return names, but get objects </issue> <code> [start of WrightTools/collection/_collection.py] 1 """Collection.""" 2 3 4 # --- import -------------------------------------------------------------------------------------- 5 6 7 import os 8 import shutil 9 10 import numpy as np 11 12 import h5py 13 14 from .. import data as wt_data 15 from .._base import Group 16 17 18 # --- define -------------------------------------------------------------------------------------- 19 20 21 __all__ = ['Collection'] 22 23 24 # --- classes ------------------------------------------------------------------------------------- 25 26 27 class Collection(Group): 28 """Nestable Collection of Data objects.""" 29 class_name = 'Collection' 30 31 def __iter__(self): 32 self.__n = 0 33 return self 34 35 def __len__(self): 36 return len(self.item_names) 37 38 def __next__(self): 39 if self.__n < len(self): 40 out = self[self.__n] 41 self.__n += 1 42 else: 43 raise StopIteration 44 return out 45 46 def __repr__(self): 47 return '<WrightTools.Collection \'{0}\' {1} at {2}>'.format(self.natural_name, 48 self.item_names, 49 '::'.join([self.filepath, 50 self.name])) 51 52 def __getitem__(self, key): 53 if isinstance(key, int): 54 key = self.item_names[key] 55 out = h5py.Group.__getitem__(self, key) 56 if 'class' in out.attrs.keys(): 57 if out.attrs['class'] == 'Data': 58 return wt_data.Data(filepath=self.filepath, parent=self.name, name=key, 59 edit_local=True) 60 elif out.attrs['class'] == 'Collection': 61 return Collection(filepath=self.filepath, parent=self.name, name=key, 62 edit_local=True) 63 else: 64 return Group(filepath=self.filepath, parent=self.name, name=key, 65 edit_local=True) 66 else: 67 return out 68 69 def __setitem__(self, key, value): 70 raise NotImplementedError 71 72 @property 73 def item_names(self): 74 if 'item_names' not in self.attrs.keys(): 75 self.attrs['item_names'] = np.array([], dtype='S') 76 return [s.decode() for s in self.attrs['item_names']] 77 78 def create_collection(self, name='collection', position=None, **kwargs): 79 collection = Collection(filepath=self.filepath, parent=self.name, name=name, 80 edit_local=True, **kwargs) 81 if position is None: 82 self._items.append(collection) 83 self.attrs['item_names'] = np.append(self.attrs['item_names'], 84 collection.natural_name.encode()) 85 else: 86 self._items.insert(position, collection) 87 self.attrs['item_names'] = np.insert(self.attrs['item_names'], position, 88 collection.natural_name.encode()) 89 setattr(self, name, collection) 90 return collection 91 92 def create_data(self, name='data', position=None, **kwargs): 93 data = wt_data.Data(filepath=self.filepath, parent=self.name, name=name, edit_local=True, 94 **kwargs) 95 if position is None: 96 self._items.append(data) 97 self.attrs['item_names'] = np.append(self.attrs['item_names'], 98 data.natural_name.encode()) 99 else: 100 self._items.insert(position, data) 101 self.attrs['item_names'] = np.insert(self.attrs['item_names'], position, 102 data.natural_name.encode()) 103 setattr(self, name, data) 104 return data 105 106 def index(self): 107 raise NotImplementedError 108 109 def flush(self): 110 for item in self._items: 111 item.flush() 112 self.file.flush() 113 114 def save(self, filepath=None, verbose=True): 115 # TODO: documentation 116 self.flush() # ensure all changes are written to file 117 if filepath is None: 118 filepath = os.path.join(os.getcwd(), self.natural_name + '.wt5') 119 elif len(os.path.basename(filepath).split('.')) == 1: 120 filepath += '.wt5' 121 filepath = os.path.expanduser(filepath) 122 shutil.copyfile(src=self.filepath, dst=filepath) 123 if verbose: 124 print('file saved at', filepath) 125 return filepath 126 [end of WrightTools/collection/_collection.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py --- a/WrightTools/collection/_collection.py +++ b/WrightTools/collection/_collection.py @@ -37,7 +37,7 @@ def __next__(self): if self.__n < len(self): - out = self[self.__n] + out = self.item_names[self.__n] self.__n += 1 else: raise StopIteration
{"golden_diff": "diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py\n--- a/WrightTools/collection/_collection.py\n+++ b/WrightTools/collection/_collection.py\n@@ -37,7 +37,7 @@\n \n def __next__(self):\n if self.__n < len(self):\n- out = self[self.__n]\n+ out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n", "issue": "collection.keys returns data objects\nshould return names, but get objects\n", "before_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport shutil\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import data as wt_data\nfrom .._base import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = ['Collection']\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n class_name = 'Collection'\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return '<WrightTools.Collection \\'{0}\\' {1} at {2}>'.format(self.natural_name,\n self.item_names,\n '::'.join([self.filepath,\n self.name]))\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n out = h5py.Group.__getitem__(self, key)\n if 'class' in out.attrs.keys():\n if out.attrs['class'] == 'Data':\n return wt_data.Data(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n elif out.attrs['class'] == 'Collection':\n return Collection(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n else:\n return Group(filepath=self.filepath, parent=self.name, name=key,\n edit_local=True)\n else:\n return out\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return [s.decode() for s in self.attrs['item_names']]\n\n def create_collection(self, name='collection', position=None, **kwargs):\n collection = Collection(filepath=self.filepath, parent=self.name, name=name,\n edit_local=True, **kwargs)\n if position is None:\n self._items.append(collection)\n self.attrs['item_names'] = np.append(self.attrs['item_names'],\n collection.natural_name.encode())\n else:\n self._items.insert(position, collection)\n self.attrs['item_names'] = np.insert(self.attrs['item_names'], position,\n collection.natural_name.encode())\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name='data', position=None, **kwargs):\n data = wt_data.Data(filepath=self.filepath, parent=self.name, name=name, edit_local=True,\n **kwargs)\n if position is None:\n self._items.append(data)\n self.attrs['item_names'] = np.append(self.attrs['item_names'],\n data.natural_name.encode())\n else:\n self._items.insert(position, data)\n self.attrs['item_names'] = np.insert(self.attrs['item_names'], position,\n data.natural_name.encode())\n setattr(self, name, data)\n return data\n\n def index(self):\n raise NotImplementedError\n\n def flush(self):\n for item in self._items:\n item.flush()\n self.file.flush()\n\n def save(self, filepath=None, verbose=True):\n # TODO: documentation\n self.flush() # ensure all changes are written to file\n if filepath is None:\n filepath = os.path.join(os.getcwd(), self.natural_name + '.wt5')\n elif len(os.path.basename(filepath).split('.')) == 1:\n filepath += '.wt5'\n filepath = os.path.expanduser(filepath)\n shutil.copyfile(src=self.filepath, dst=filepath)\n if verbose:\n print('file saved at', filepath)\n return filepath\n", "path": "WrightTools/collection/_collection.py"}]}
1,665
110
gh_patches_debug_27078
rasdani/github-patches
git_diff
dotkom__onlineweb4-524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> (User creation) Some fields are required that should not be Address, zip and phone should not be required. People will complain. </issue> <code> [start of apps/authentication/forms.py] 1 # -*- coding: utf-8 -*- 2 3 import datetime 4 import re 5 6 from django import forms 7 from django.contrib import auth 8 from django.utils.translation import ugettext as _ 9 10 from apps.authentication.models import OnlineUser as User, Email 11 12 class LoginForm(forms.Form): 13 username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50) 14 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Passord")) 15 user = None 16 17 def clean(self): 18 if self._errors: 19 return 20 21 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password']) 22 23 if user: 24 if user.is_active: 25 self.user = user 26 else: 27 self._errors['username'] = self.error_class([_(u"Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")]) 28 else: 29 self._errors['username'] = self.error_class([_(u"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")]) 30 return self.cleaned_data 31 32 def login(self, request): 33 try: 34 User.objects.get(username=request.POST['username']) 35 except: 36 return False 37 if self.is_valid(): 38 auth.login(request, self.user) 39 request.session.set_expiry(0) 40 return True 41 return False 42 43 class RegisterForm(forms.Form): 44 username = forms.CharField(label=_("Brukernavn"), max_length=20) 45 first_name = forms.CharField(label=_("Fornavn"), max_length=50) 46 last_name = forms.CharField(label=_("Etternavn"), max_length=50) 47 email = forms.EmailField(label=_("Epost"), max_length=50) 48 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord")) 49 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Gjenta passord")) 50 address = forms.CharField(label=_("Adresse"), max_length=50) 51 zip_code = forms.CharField(label=_("Postnummer"), max_length=4) 52 phone = forms.CharField(label=_("Telefon"), max_length=20) 53 54 def clean(self): 55 super(RegisterForm, self).clean() 56 if self.is_valid(): 57 cleaned_data = self.cleaned_data 58 59 # Check passwords 60 if cleaned_data['password'] != cleaned_data['repeat_password']: 61 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")]) 62 63 # Check username 64 username = cleaned_data['username'] 65 if User.objects.filter(username=username).count() > 0: 66 self._errors['username'] = self.error_class([_(u"Brukernavnet er allerede registrert.")]) 67 if not re.match("^[a-zA-Z0-9_-]+$", username): 68 self._errors['username'] = self.error_class([_(u"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")]) 69 70 # Check email 71 email = cleaned_data['email'] 72 if Email.objects.filter(email=email).count() > 0: 73 self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")]) 74 75 # ZIP code digits only 76 zip_code = cleaned_data['zip_code'] 77 if len(zip_code) != 4 or not zip_code.isdigit(): 78 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")]) 79 80 return cleaned_data 81 82 class RecoveryForm(forms.Form): 83 email = forms.EmailField(label="Email", max_length=50) 84 85 class ChangePasswordForm(forms.Form): 86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Nytt passord")) 87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Gjenta passord")) 88 89 def clean(self): 90 super(ChangePasswordForm, self).clean() 91 if self.is_valid(): 92 cleaned_data = self.cleaned_data 93 94 # Check passwords 95 if cleaned_data['new_password'] != cleaned_data['repeat_password']: 96 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")]) 97 98 return cleaned_data 99 100 101 class NewEmailForm(forms.Form): 102 new_email = forms.EmailField(_(u"ny epostadresse")) 103 [end of apps/authentication/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py --- a/apps/authentication/forms.py +++ b/apps/authentication/forms.py @@ -47,9 +47,9 @@ email = forms.EmailField(label=_("Epost"), max_length=50) password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord")) repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Gjenta passord")) - address = forms.CharField(label=_("Adresse"), max_length=50) - zip_code = forms.CharField(label=_("Postnummer"), max_length=4) - phone = forms.CharField(label=_("Telefon"), max_length=20) + address = forms.CharField(label=_("Adresse"), max_length=50, required=False) + zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False) + phone = forms.CharField(label=_("Telefon"), max_length=20, required=False) def clean(self): super(RegisterForm, self).clean() @@ -74,8 +74,9 @@ # ZIP code digits only zip_code = cleaned_data['zip_code'] - if len(zip_code) != 4 or not zip_code.isdigit(): - self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")]) + if len(zip_code) != 0: + if len(zip_code) != 4 or len(zip_code) != 0 or not zip_code.isdigit(): + self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")]) return cleaned_data
{"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -47,9 +47,9 @@\n email = forms.EmailField(label=_(\"Epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Gjenta passord\"))\n- address = forms.CharField(label=_(\"Adresse\"), max_length=50)\n- zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4)\n- phone = forms.CharField(label=_(\"Telefon\"), max_length=20)\n+ address = forms.CharField(label=_(\"Adresse\"), max_length=50, required=False)\n+ zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False)\n+ phone = forms.CharField(label=_(\"Telefon\"), max_length=20, required=False)\n \n def clean(self):\n super(RegisterForm, self).clean()\n@@ -74,8 +74,9 @@\n \n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n- if len(zip_code) != 4 or not zip_code.isdigit():\n- self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n+ if len(zip_code) != 0:\n+ if len(zip_code) != 4 or len(zip_code) != 0 or not zip_code.isdigit():\n+ self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n \n return cleaned_data\n", "issue": "(User creation) Some fields are required that should not be\nAddress, zip and phone should not be required. People will complain.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User, Email\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n request.session.set_expiry(0)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"Brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"Etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"Epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Gjenta passord\"))\n address = forms.CharField(label=_(\"Adresse\"), max_length=50)\n zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4)\n phone = forms.CharField(label=_(\"Telefon\"), max_length=20)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email']\n if Email.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}]}
1,726
365
gh_patches_debug_27889
rasdani/github-patches
git_diff
bridgecrewio__checkov-5753
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Azure Function App Slots - Ensure Azure Function App Slots use at least TLS 1.2 **Describe the issue** It seems that there are no checks that ensure that the following resources to use at least TLS 1.2 : - azurerm_function_app_slot - azurerm_linux_function_app_slot - azurerm_windows_function_app_slot **Examples** ````hcl ````hcl resource "azurerm_function_app_slot" "example" { name = "test-azure-functions_slot" location = azurerm_resource_group.example.location resource_group_name = azurerm_resource_group.example.name app_service_plan_id = azurerm_app_service_plan.example.id function_app_name = azurerm_function_app.example.name storage_account_name = azurerm_storage_account.example.name storage_account_access_key = azurerm_storage_account.example.primary_access_key site_config { min_tls_version = 1.2 } } resource "azurerm_linux_function_app_slot" "example" { name = "example-linux-function-app-slot" function_app_id = azurerm_linux_function_app.example.id storage_account_name = azurerm_storage_account.example.name site_config { min_tls_version = 1.2 } } resource "azurerm_windows_function_app" "example" { name = "example-windows-function-app" resource_group_name = azurerm_resource_group.example.name location = azurerm_resource_group.example.location storage_account_name = azurerm_storage_account.example.name service_plan_id = azurerm_service_plan.example.id site_config { min_tls_version = 1.2 } } ```` **Version (please complete the following information):** - N/A **Additional context** N/A </issue> <code> [start of checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py] 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class FunctionAppMinTLSVersion(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure Function app is using the latest version of TLS encryption" 8 id = "CKV_AZURE_145" 9 supported_resources = ['azurerm_function_app'] 10 categories = [CheckCategories.NETWORKING] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, 12 missing_block_result=CheckResult.PASSED) 13 14 def get_inspected_key(self): 15 return "site_config/[0]/min_tls_version" 16 17 def get_expected_value(self): 18 return 1.2 19 20 def get_expected_values(self): 21 return ["1.2", 1.2] 22 23 24 check = FunctionAppMinTLSVersion() 25 [end of checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py b/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py --- a/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py +++ b/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py @@ -4,15 +4,27 @@ class FunctionAppMinTLSVersion(BaseResourceValueCheck): def __init__(self): + """ + The minimum supported TLS version for the function app. + Defaults to 1.2 for new function apps. + field name is: + - min_tls_version in azurerm_function_app, azurerm_function_app_slot. + - minimum_tls_version in newer resources (with linux/windows). + """ name = "Ensure Function app is using the latest version of TLS encryption" id = "CKV_AZURE_145" - supported_resources = ['azurerm_function_app'] + supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app', + 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot', + 'azurerm_windows_function_app_slot'] categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, missing_block_result=CheckResult.PASSED) def get_inspected_key(self): - return "site_config/[0]/min_tls_version" + if self.entity_type in ("azurerm_function_app", "azurerm_function_app_slot"): + return "site_config/[0]/min_tls_version" + else: + return "site_config/[0]/minimum_tls_version" def get_expected_value(self): return 1.2
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py b/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py\n--- a/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py\n+++ b/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py\n@@ -4,15 +4,27 @@\n \n class FunctionAppMinTLSVersion(BaseResourceValueCheck):\n def __init__(self):\n+ \"\"\"\n+ The minimum supported TLS version for the function app.\n+ Defaults to 1.2 for new function apps.\n+ field name is:\n+ - min_tls_version in azurerm_function_app, azurerm_function_app_slot.\n+ - minimum_tls_version in newer resources (with linux/windows).\n+ \"\"\"\n name = \"Ensure Function app is using the latest version of TLS encryption\"\n id = \"CKV_AZURE_145\"\n- supported_resources = ['azurerm_function_app']\n+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',\n+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',\n+ 'azurerm_windows_function_app_slot']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,\n missing_block_result=CheckResult.PASSED)\n \n def get_inspected_key(self):\n- return \"site_config/[0]/min_tls_version\"\n+ if self.entity_type in (\"azurerm_function_app\", \"azurerm_function_app_slot\"):\n+ return \"site_config/[0]/min_tls_version\"\n+ else:\n+ return \"site_config/[0]/minimum_tls_version\"\n \n def get_expected_value(self):\n return 1.2\n", "issue": "Azure Function App Slots - Ensure Azure Function App Slots use at least TLS 1.2 \n**Describe the issue**\r\nIt seems that there are no checks that ensure that the following resources to use at least TLS 1.2 :\r\n\r\n- azurerm_function_app_slot\r\n- azurerm_linux_function_app_slot\r\n- azurerm_windows_function_app_slot\r\n\r\n**Examples**\r\n\r\n````hcl\r\n````hcl\r\nresource \"azurerm_function_app_slot\" \"example\" {\r\n name = \"test-azure-functions_slot\"\r\n location = azurerm_resource_group.example.location\r\n resource_group_name = azurerm_resource_group.example.name\r\n app_service_plan_id = azurerm_app_service_plan.example.id\r\n function_app_name = azurerm_function_app.example.name\r\n storage_account_name = azurerm_storage_account.example.name\r\n storage_account_access_key = azurerm_storage_account.example.primary_access_key\r\n\r\n site_config {\r\n min_tls_version = 1.2\r\n }\r\n}\r\n\r\nresource \"azurerm_linux_function_app_slot\" \"example\" {\r\n name = \"example-linux-function-app-slot\"\r\n function_app_id = azurerm_linux_function_app.example.id\r\n storage_account_name = azurerm_storage_account.example.name\r\n\r\n site_config {\r\n min_tls_version = 1.2\r\n }\r\n}\r\n\r\nresource \"azurerm_windows_function_app\" \"example\" {\r\n name = \"example-windows-function-app\"\r\n resource_group_name = azurerm_resource_group.example.name\r\n location = azurerm_resource_group.example.location\r\n storage_account_name = azurerm_storage_account.example.name\r\n service_plan_id = azurerm_service_plan.example.id\r\n\r\n site_config {\r\n min_tls_version = 1.2\r\n }\r\n}\r\n````\r\n\r\n**Version (please complete the following information):**\r\n - N/A\r\n \r\n**Additional context**\r\n\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass FunctionAppMinTLSVersion(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure Function app is using the latest version of TLS encryption\"\n id = \"CKV_AZURE_145\"\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,\n missing_block_result=CheckResult.PASSED)\n\n def get_inspected_key(self):\n return \"site_config/[0]/min_tls_version\"\n\n def get_expected_value(self):\n return 1.2\n\n def get_expected_values(self):\n return [\"1.2\", 1.2]\n\n\ncheck = FunctionAppMinTLSVersion()\n", "path": "checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py"}]}
1,203
410
gh_patches_debug_9248
rasdani/github-patches
git_diff
hylang__hy-1813
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> REPL crashes when ~/.hy-history is inaccessible I've been working on updating the Hy package in Debian and in running the tests, I've noticed that the `test_bin` tests appear to expect to be able to do something with `~/.hy-history` (not sure if it's trying to read or write) but I had to set `HOME` to a real directory after which they passed. Looking at the tests, I imagine it's happening because they're interacting with the REPL via stdin so it's trying to load and/or use the history file? </issue> <code> [start of hy/completer.py] 1 # Copyright 2019 the authors. 2 # This file is part of Hy, which is free software licensed under the Expat 3 # license. See the LICENSE. 4 5 import contextlib 6 import os 7 import re 8 import sys 9 import builtins 10 11 import hy.macros 12 import hy.compiler 13 14 15 docomplete = True 16 17 try: 18 import readline 19 except ImportError: 20 try: 21 import pyreadline.rlmain 22 import pyreadline.unicode_helper # NOQA 23 import readline 24 except ImportError: 25 docomplete = False 26 27 if docomplete: 28 if sys.platform == 'darwin' and 'libedit' in readline.__doc__: 29 readline_bind = "bind ^I rl_complete" 30 else: 31 readline_bind = "tab: complete" 32 33 34 class Completer(object): 35 36 def __init__(self, namespace={}): 37 if not isinstance(namespace, dict): 38 raise TypeError('namespace must be a dictionary') 39 self.namespace = namespace 40 self.path = [hy.compiler._special_form_compilers, 41 builtins.__dict__, 42 namespace] 43 44 self.tag_path = [] 45 46 namespace.setdefault('__macros__', {}) 47 namespace.setdefault('__tags__', {}) 48 49 self.path.append(namespace['__macros__']) 50 self.tag_path.append(namespace['__tags__']) 51 52 def attr_matches(self, text): 53 # Borrowed from IPython's completer 54 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text) 55 56 if m: 57 expr, attr = m.group(1, 3) 58 attr = attr.replace("-", "_") 59 expr = expr.replace("-", "_") 60 else: 61 return [] 62 63 try: 64 obj = eval(expr, self.namespace) 65 words = dir(obj) 66 except Exception: 67 return [] 68 69 n = len(attr) 70 matches = [] 71 for w in words: 72 if w[:n] == attr: 73 matches.append("{}.{}".format( 74 expr.replace("_", "-"), w.replace("_", "-"))) 75 return matches 76 77 def global_matches(self, text): 78 matches = [] 79 for p in self.path: 80 for k in p.keys(): 81 if isinstance(k, str): 82 k = k.replace("_", "-") 83 if k.startswith(text): 84 matches.append(k) 85 return matches 86 87 def tag_matches(self, text): 88 text = text[1:] 89 matches = [] 90 for p in self.tag_path: 91 for k in p.keys(): 92 if isinstance(k, str): 93 if k.startswith(text): 94 matches.append("#{}".format(k)) 95 return matches 96 97 def complete(self, text, state): 98 if text.startswith("#"): 99 matches = self.tag_matches(text) 100 elif "." in text: 101 matches = self.attr_matches(text) 102 else: 103 matches = self.global_matches(text) 104 try: 105 return matches[state] 106 except IndexError: 107 return None 108 109 110 @contextlib.contextmanager 111 def completion(completer=None): 112 delims = "()[]{} " 113 if not completer: 114 completer = Completer() 115 116 if docomplete: 117 readline.set_completer(completer.complete) 118 readline.set_completer_delims(delims) 119 120 history = os.path.expanduser("~/.hy-history") 121 readline.parse_and_bind("set blink-matching-paren on") 122 123 try: 124 readline.read_history_file(history) 125 except IOError: 126 open(history, 'a').close() 127 128 readline.parse_and_bind(readline_bind) 129 130 try: 131 yield 132 finally: 133 if docomplete: 134 readline.write_history_file(history) 135 [end of hy/completer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hy/completer.py b/hy/completer.py --- a/hy/completer.py +++ b/hy/completer.py @@ -123,7 +123,7 @@ try: readline.read_history_file(history) except IOError: - open(history, 'a').close() + pass readline.parse_and_bind(readline_bind) @@ -131,4 +131,7 @@ yield finally: if docomplete: - readline.write_history_file(history) + try: + readline.write_history_file(history) + except IOError: + pass
{"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -123,7 +123,7 @@\n try:\n readline.read_history_file(history)\n except IOError:\n- open(history, 'a').close()\n+ pass\n \n readline.parse_and_bind(readline_bind)\n \n@@ -131,4 +131,7 @@\n yield\n finally:\n if docomplete:\n- readline.write_history_file(history)\n+ try:\n+ readline.write_history_file(history)\n+ except IOError:\n+ pass\n", "issue": "REPL crashes when ~/.hy-history is inaccessible\nI've been working on updating the Hy package in Debian and in running the tests, I've noticed that the `test_bin` tests appear to expect to be able to do something with `~/.hy-history` (not sure if it's trying to read or write) but I had to set `HOME` to a real directory after which they passed. Looking at the tests, I imagine it's happening because they're interacting with the REPL via stdin so it's trying to load and/or use the history file?\n", "before_files": [{"content": "# Copyright 2019 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._special_form_compilers,\n builtins.__dict__,\n namespace]\n\n self.tag_path = []\n\n namespace.setdefault('__macros__', {})\n namespace.setdefault('__tags__', {})\n\n self.path.append(namespace['__macros__'])\n self.tag_path.append(namespace['__tags__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, str):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.path.expanduser(\"~/.hy-history\")\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n open(history, 'a').close()\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n readline.write_history_file(history)\n", "path": "hy/completer.py"}]}
1,716
144
gh_patches_debug_25027
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2810
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider planned_parenthood is broken During the global build at 2021-05-26-14-42-23, spider **planned_parenthood** failed with **598 features** and **598 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/planned_parenthood.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/planned_parenthood.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/planned_parenthood.geojson)) </issue> <code> [start of locations/spiders/planned_parenthood.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import re 4 5 from locations.items import GeojsonPointItem 6 7 8 class PlannedParenthoodSpider(scrapy.Spider): 9 name = "planned_parenthood" 10 item_attributes = { 'brand': "Planned Parenthood" } 11 allowed_domains = ["www.plannedparenthood.org"] 12 start_urls = ( 13 'https://www.plannedparenthood.org/health-center', 14 ) 15 16 def parse(self, response): 17 state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract() 18 for path in state_urls: 19 yield scrapy.Request( 20 response.urljoin(path), 21 callback=self.parse_state, 22 ) 23 24 def parse_state(self, response): 25 venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract() 26 for path in venue_urls: 27 yield scrapy.Request( 28 response.urljoin(path), 29 callback=self.parse_venue, 30 ) 31 32 def parse_venue(self, response): 33 properties = { 34 'addr_full': response.xpath('//*[@itemprop="streetAddress"]/text()').extract_first(), 35 'city': response.xpath('//*[@itemprop="addressLocality"]/text()').extract_first(), 36 'state': response.xpath('//*[@itemprop="addressRegion"]/text()').extract_first(), 37 'postcode': response.xpath('//*[@itemprop="postalCode"]/text()').extract_first(), 38 'phone': response.xpath('//a[@itemprop="telephone"][@data-link]/text()').extract_first(), 39 'ref': response.url, 40 'website': response.url, 41 } 42 43 map_image_url = response.xpath('//img[@class="address-map"]/@src').extract_first() 44 match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url) 45 properties['lat'] = float(match.group(1)), 46 properties['lon'] = float(match.group(2)), 47 48 yield GeojsonPointItem(**properties) 49 [end of locations/spiders/planned_parenthood.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/planned_parenthood.py b/locations/spiders/planned_parenthood.py --- a/locations/spiders/planned_parenthood.py +++ b/locations/spiders/planned_parenthood.py @@ -27,9 +27,14 @@ yield scrapy.Request( response.urljoin(path), callback=self.parse_venue, + meta={'dont_redirect':True} ) def parse_venue(self, response): + if response is None: + # Ignoring redirects + return + properties = { 'addr_full': response.xpath('//*[@itemprop="streetAddress"]/text()').extract_first(), 'city': response.xpath('//*[@itemprop="addressLocality"]/text()').extract_first(), @@ -42,7 +47,7 @@ map_image_url = response.xpath('//img[@class="address-map"]/@src').extract_first() match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url) - properties['lat'] = float(match.group(1)), - properties['lon'] = float(match.group(2)), + properties['lat'] = float(match.group(1)) + properties['lon'] = float(match.group(2)) yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/planned_parenthood.py b/locations/spiders/planned_parenthood.py\n--- a/locations/spiders/planned_parenthood.py\n+++ b/locations/spiders/planned_parenthood.py\n@@ -27,9 +27,14 @@\n yield scrapy.Request(\n response.urljoin(path),\n callback=self.parse_venue,\n+ meta={'dont_redirect':True}\n )\n \n def parse_venue(self, response):\n+ if response is None:\n+ # Ignoring redirects\n+ return\n+\n properties = {\n 'addr_full': response.xpath('//*[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//*[@itemprop=\"addressLocality\"]/text()').extract_first(),\n@@ -42,7 +47,7 @@\n \n map_image_url = response.xpath('//img[@class=\"address-map\"]/@src').extract_first()\n match = re.search(r\"center=(.*?),(.*?)&zoom\", map_image_url)\n- properties['lat'] = float(match.group(1)),\n- properties['lon'] = float(match.group(2)),\n+ properties['lat'] = float(match.group(1))\n+ properties['lon'] = float(match.group(2))\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider planned_parenthood is broken\nDuring the global build at 2021-05-26-14-42-23, spider **planned_parenthood** failed with **598 features** and **598 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/planned_parenthood.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/planned_parenthood.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/planned_parenthood.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass PlannedParenthoodSpider(scrapy.Spider):\n name = \"planned_parenthood\"\n item_attributes = { 'brand': \"Planned Parenthood\" }\n allowed_domains = [\"www.plannedparenthood.org\"]\n start_urls = (\n 'https://www.plannedparenthood.org/health-center',\n )\n\n def parse(self, response):\n state_urls = response.xpath('//ul[@class=\"quicklist-list\"]/li/a/@href').extract()\n for path in state_urls:\n yield scrapy.Request(\n response.urljoin(path),\n callback=self.parse_state,\n )\n\n def parse_state(self, response):\n venue_urls = response.xpath('//ul[@class=\"quicklist-list\"]/li/p/a/@href').extract()\n for path in venue_urls:\n yield scrapy.Request(\n response.urljoin(path),\n callback=self.parse_venue,\n )\n\n def parse_venue(self, response):\n properties = {\n 'addr_full': response.xpath('//*[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//*[@itemprop=\"addressLocality\"]/text()').extract_first(),\n 'state': response.xpath('//*[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'postcode': response.xpath('//*[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'phone': response.xpath('//a[@itemprop=\"telephone\"][@data-link]/text()').extract_first(),\n 'ref': response.url,\n 'website': response.url,\n }\n\n map_image_url = response.xpath('//img[@class=\"address-map\"]/@src').extract_first()\n match = re.search(r\"center=(.*?),(.*?)&zoom\", map_image_url)\n properties['lat'] = float(match.group(1)),\n properties['lon'] = float(match.group(2)),\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/planned_parenthood.py"}]}
1,247
284
gh_patches_debug_23474
rasdani/github-patches
git_diff
aws-powertools__powertools-lambda-python-186
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Case insensitive lookup of header values in http proxy events **Is your feature request related to a problem? Please describe.** <!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] --> Http headers are meant to be case insensitive, but lambda events use case sensitive dicts for the header keys. For example getting the `Authorization` header you will need to do a case insensitive look up to support if the header is `authorization`. So you end up doing something like this. ```python3 authorization = None for key, value in self.headers.items(): if name.lower() == 'authorization' authorization = value ``` **Describe the solution you'd like** <!-- A clear and concise description of what you want to happen. --> > Linked pull request : #185 Update `BaseProxyEvent.get_header_value` method to support case insensitive lookups by default ```python3 authorization = event.get_header_value("authorization") ``` ```python3 def get_header_value( self, name: str, default_value: Optional[str] = None, case_sensitive: Optional[bool] = False ) -> Optional[str]: """Get header value by name Parameters ---------- name: str Header name default_value: str, optional Default value if no value was found by name case_sensitive: bool Whether to use a case sensitive look up Returns ------- str, optional Header value """ if case_sensitive: return self.headers.get(name, default_value) return next((value for key, value in self.headers.items() if name.lower() == key.lower()), default_value) ``` **Questions** * Should we default to case insensitive look ups? This is what most people would want? * Just return the first match? Or support returning all matches? **Describe alternatives you've considered** <!-- A clear and concise description of any alternative solutions or features you've considered. --> Use the requests CaseInsensitiveDict or port it : * https://github.com/psf/requests/blob/333fa8d3f201f68b82d9e8f26be4987e869fd3bf/requests/structures.py#L15 **Additional context** <!-- Add any other context or screenshots about the feature request here. --> From the Http 1.1 spec, https headers are not supposed to be case sensitive: https://www.w3.org/Protocols/rfc2616/rfc2616.html </issue> <code> [start of aws_lambda_powertools/utilities/data_classes/common.py] 1 from typing import Any, Dict, Optional 2 3 4 class DictWrapper: 5 """Provides a single read only access to a wrapper dict""" 6 7 def __init__(self, data: Dict[str, Any]): 8 self._data = data 9 10 def __getitem__(self, key: str) -> Any: 11 return self._data[key] 12 13 def get(self, key: str) -> Optional[Any]: 14 return self._data.get(key) 15 16 17 class BaseProxyEvent(DictWrapper): 18 @property 19 def headers(self) -> Dict[str, str]: 20 return self["headers"] 21 22 @property 23 def query_string_parameters(self) -> Optional[Dict[str, str]]: 24 return self.get("queryStringParameters") 25 26 @property 27 def is_base64_encoded(self) -> bool: 28 return self.get("isBase64Encoded") 29 30 @property 31 def body(self) -> Optional[str]: 32 return self.get("body") 33 34 def get_query_string_value(self, name: str, default_value: Optional[str] = None) -> Optional[str]: 35 """Get query string value by name 36 37 Parameters 38 ---------- 39 name: str 40 Query string parameter name 41 default_value: str, optional 42 Default value if no value was found by name 43 Returns 44 ------- 45 str, optional 46 Query string parameter value 47 """ 48 params = self.query_string_parameters 49 return default_value if params is None else params.get(name, default_value) 50 51 def get_header_value(self, name: str, default_value: Optional[str] = None) -> Optional[str]: 52 """Get header value by name 53 54 Parameters 55 ---------- 56 name: str 57 Header name 58 default_value: str, optional 59 Default value if no value was found by name 60 Returns 61 ------- 62 str, optional 63 Header value 64 """ 65 return self.headers.get(name, default_value) 66 [end of aws_lambda_powertools/utilities/data_classes/common.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aws_lambda_powertools/utilities/data_classes/common.py b/aws_lambda_powertools/utilities/data_classes/common.py --- a/aws_lambda_powertools/utilities/data_classes/common.py +++ b/aws_lambda_powertools/utilities/data_classes/common.py @@ -48,7 +48,9 @@ params = self.query_string_parameters return default_value if params is None else params.get(name, default_value) - def get_header_value(self, name: str, default_value: Optional[str] = None) -> Optional[str]: + def get_header_value( + self, name: str, default_value: Optional[str] = None, case_sensitive: Optional[bool] = False + ) -> Optional[str]: """Get header value by name Parameters @@ -57,9 +59,14 @@ Header name default_value: str, optional Default value if no value was found by name + case_sensitive: bool + Whether to use a case sensitive look up Returns ------- str, optional Header value """ - return self.headers.get(name, default_value) + if case_sensitive: + return self.headers.get(name, default_value) + + return next((value for key, value in self.headers.items() if name.lower() == key.lower()), default_value)
{"golden_diff": "diff --git a/aws_lambda_powertools/utilities/data_classes/common.py b/aws_lambda_powertools/utilities/data_classes/common.py\n--- a/aws_lambda_powertools/utilities/data_classes/common.py\n+++ b/aws_lambda_powertools/utilities/data_classes/common.py\n@@ -48,7 +48,9 @@\n params = self.query_string_parameters\n return default_value if params is None else params.get(name, default_value)\n \n- def get_header_value(self, name: str, default_value: Optional[str] = None) -> Optional[str]:\n+ def get_header_value(\n+ self, name: str, default_value: Optional[str] = None, case_sensitive: Optional[bool] = False\n+ ) -> Optional[str]:\n \"\"\"Get header value by name\n \n Parameters\n@@ -57,9 +59,14 @@\n Header name\n default_value: str, optional\n Default value if no value was found by name\n+ case_sensitive: bool\n+ Whether to use a case sensitive look up\n Returns\n -------\n str, optional\n Header value\n \"\"\"\n- return self.headers.get(name, default_value)\n+ if case_sensitive:\n+ return self.headers.get(name, default_value)\n+\n+ return next((value for key, value in self.headers.items() if name.lower() == key.lower()), default_value)\n", "issue": "Case insensitive lookup of header values in http proxy events\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\nHttp headers are meant to be case insensitive, but lambda events use case sensitive dicts for the header keys.\r\n\r\nFor example getting the `Authorization` header you will need to do a case insensitive look up to support if the header is `authorization`. So you end up doing something like this.\r\n\r\n```python3\r\nauthorization = None\r\nfor key, value in self.headers.items():\r\n if name.lower() == 'authorization'\r\n authorization = value\r\n```\r\n\r\n**Describe the solution you'd like**\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n> Linked pull request : #185\r\n\r\nUpdate `BaseProxyEvent.get_header_value` method to support case insensitive lookups by default\r\n\r\n```python3\r\nauthorization = event.get_header_value(\"authorization\")\r\n```\r\n\r\n```python3\r\n def get_header_value(\r\n self, name: str, default_value: Optional[str] = None, case_sensitive: Optional[bool] = False\r\n ) -> Optional[str]:\r\n \"\"\"Get header value by name\r\n\r\n Parameters\r\n ----------\r\n name: str\r\n Header name\r\n default_value: str, optional\r\n Default value if no value was found by name\r\n case_sensitive: bool\r\n Whether to use a case sensitive look up\r\n Returns\r\n -------\r\n str, optional\r\n Header value\r\n \"\"\"\r\n if case_sensitive:\r\n return self.headers.get(name, default_value)\r\n \r\n return next((value for key, value in self.headers.items() if name.lower() == key.lower()), default_value)\r\n```\r\n\r\n**Questions**\r\n\r\n* Should we default to case insensitive look ups? This is what most people would want?\r\n* Just return the first match? Or support returning all matches?\r\n\r\n**Describe alternatives you've considered**\r\n<!-- A clear and concise description of any alternative solutions or features you've considered. -->\r\nUse the requests CaseInsensitiveDict or port it :\r\n* https://github.com/psf/requests/blob/333fa8d3f201f68b82d9e8f26be4987e869fd3bf/requests/structures.py#L15\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nFrom the Http 1.1 spec, https headers are not supposed to be case sensitive: https://www.w3.org/Protocols/rfc2616/rfc2616.html\n", "before_files": [{"content": "from typing import Any, Dict, Optional\n\n\nclass DictWrapper:\n \"\"\"Provides a single read only access to a wrapper dict\"\"\"\n\n def __init__(self, data: Dict[str, Any]):\n self._data = data\n\n def __getitem__(self, key: str) -> Any:\n return self._data[key]\n\n def get(self, key: str) -> Optional[Any]:\n return self._data.get(key)\n\n\nclass BaseProxyEvent(DictWrapper):\n @property\n def headers(self) -> Dict[str, str]:\n return self[\"headers\"]\n\n @property\n def query_string_parameters(self) -> Optional[Dict[str, str]]:\n return self.get(\"queryStringParameters\")\n\n @property\n def is_base64_encoded(self) -> bool:\n return self.get(\"isBase64Encoded\")\n\n @property\n def body(self) -> Optional[str]:\n return self.get(\"body\")\n\n def get_query_string_value(self, name: str, default_value: Optional[str] = None) -> Optional[str]:\n \"\"\"Get query string value by name\n\n Parameters\n ----------\n name: str\n Query string parameter name\n default_value: str, optional\n Default value if no value was found by name\n Returns\n -------\n str, optional\n Query string parameter value\n \"\"\"\n params = self.query_string_parameters\n return default_value if params is None else params.get(name, default_value)\n\n def get_header_value(self, name: str, default_value: Optional[str] = None) -> Optional[str]:\n \"\"\"Get header value by name\n\n Parameters\n ----------\n name: str\n Header name\n default_value: str, optional\n Default value if no value was found by name\n Returns\n -------\n str, optional\n Header value\n \"\"\"\n return self.headers.get(name, default_value)\n", "path": "aws_lambda_powertools/utilities/data_classes/common.py"}]}
1,626
292
gh_patches_debug_38040
rasdani/github-patches
git_diff
getsentry__sentry-16440
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User-creation should not fail, when the account already exists I'm creating an Ansible role for installing sentry on our network. The software is installed via `pip` -- version 8.22.0, which seems to be the latest as of today. Not using Docker -- just a dedicated VM. One of the tasks is to create a Sentry-admin: ``` sentry createuser --superuser --no-input --password=FOO [email protected] ``` Unfortunately, the above command only works _the first time_. Trying to run it again fails: ``` django.db.utils.IntegrityError: IntegrityError('duplicate key value violates unique constraint \"auth_user_username_key\" DETAIL: Key (username)=([email protected]) already exists ``` I understand, why the same account can not be created again. But it should not be an _error_ -- simply update the existing record if necessary. Or, if it must be an error, there should be a specific exit-code for this case, which would allow systems like Ansible to move on. Currently, `sentry` exits with the same code 1 for all errors... At the very least, there needs to be a way to check, whether the account exists already... Ansible would then be able to perform the query before attempting to add the user. </issue> <code> [start of src/sentry/runner/commands/createuser.py] 1 from __future__ import absolute_import, print_function 2 3 import click 4 from sentry.runner.decorators import configuration 5 6 7 def _get_field(field_name): 8 from sentry.models import User 9 10 return User._meta.get_field(field_name) 11 12 13 def _get_email(): 14 from django.core.exceptions import ValidationError 15 16 rv = click.prompt("Email") 17 field = _get_field("email") 18 try: 19 return field.clean(rv, None) 20 except ValidationError as e: 21 raise click.ClickException("; ".join(e.messages)) 22 23 24 def _get_password(): 25 from django.core.exceptions import ValidationError 26 27 rv = click.prompt("Password", hide_input=True, confirmation_prompt=True) 28 field = _get_field("password") 29 try: 30 return field.clean(rv, None) 31 except ValidationError as e: 32 raise click.ClickException("; ".join(e.messages)) 33 34 35 def _get_superuser(): 36 return click.confirm("Should this user be a superuser?", default=False) 37 38 39 @click.command() 40 @click.option("--email") 41 @click.option("--password") 42 @click.option("--superuser/--no-superuser", default=None, is_flag=True) 43 @click.option("--no-password", default=False, is_flag=True) 44 @click.option("--no-input", default=False, is_flag=True) 45 @configuration 46 def createuser(email, password, superuser, no_password, no_input): 47 "Create a new user." 48 if not no_input: 49 if not email: 50 email = _get_email() 51 52 if not (password or no_password): 53 password = _get_password() 54 55 if superuser is None: 56 superuser = _get_superuser() 57 58 if superuser is None: 59 superuser = False 60 61 if not email: 62 raise click.ClickException("Invalid or missing email address.") 63 64 # TODO(mattrobenolt): Accept password over stdin? 65 if not no_password and not password: 66 raise click.ClickException("No password set and --no-password not passed.") 67 68 from sentry import roles 69 from sentry.models import User 70 from django.conf import settings 71 72 user = User( 73 email=email, username=email, is_superuser=superuser, is_staff=superuser, is_active=True 74 ) 75 76 if password: 77 user.set_password(password) 78 79 user.save() 80 81 click.echo("User created: %s" % (email,)) 82 83 # TODO(dcramer): kill this when we improve flows 84 if settings.SENTRY_SINGLE_ORGANIZATION: 85 from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team 86 87 org = Organization.get_default() 88 if superuser: 89 role = roles.get_top_dog().id 90 else: 91 role = org.default_role 92 member = OrganizationMember.objects.create(organization=org, user=user, role=role) 93 94 # if we've only got a single team let's go ahead and give 95 # access to that team as its likely the desired outcome 96 teams = list(Team.objects.filter(organization=org)[0:2]) 97 if len(teams) == 1: 98 OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member) 99 click.echo("Added to organization: %s" % (org.slug,)) 100 [end of src/sentry/runner/commands/createuser.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/runner/commands/createuser.py b/src/sentry/runner/commands/createuser.py --- a/src/sentry/runner/commands/createuser.py +++ b/src/sentry/runner/commands/createuser.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, print_function import click +import sys from sentry.runner.decorators import configuration @@ -42,8 +43,9 @@ @click.option("--superuser/--no-superuser", default=None, is_flag=True) @click.option("--no-password", default=False, is_flag=True) @click.option("--no-input", default=False, is_flag=True) [email protected]("--force-update", default=False, is_flag=True) @configuration -def createuser(email, password, superuser, no_password, no_input): +def createuser(email, password, superuser, no_password, no_input, force_update): "Create a new user." if not no_input: if not email: @@ -76,24 +78,31 @@ if password: user.set_password(password) - user.save() - - click.echo("User created: %s" % (email,)) - - # TODO(dcramer): kill this when we improve flows - if settings.SENTRY_SINGLE_ORGANIZATION: - from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team - - org = Organization.get_default() - if superuser: - role = roles.get_top_dog().id + if User.objects.filter(username=email).exists(): + if force_update: + user.save(force_update=force_update) + click.echo("User updated: %s" % (email,)) else: - role = org.default_role - member = OrganizationMember.objects.create(organization=org, user=user, role=role) - - # if we've only got a single team let's go ahead and give - # access to that team as its likely the desired outcome - teams = list(Team.objects.filter(organization=org)[0:2]) - if len(teams) == 1: - OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member) - click.echo("Added to organization: %s" % (org.slug,)) + click.echo("User: %s exists, use --force-update to force" % (email,)) + sys.exit(3) + else: + user.save() + click.echo("User created: %s" % (email,)) + + # TODO(dcramer): kill this when we improve flows + if settings.SENTRY_SINGLE_ORGANIZATION: + from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team + + org = Organization.get_default() + if superuser: + role = roles.get_top_dog().id + else: + role = org.default_role + member = OrganizationMember.objects.create(organization=org, user=user, role=role) + + # if we've only got a single team let's go ahead and give + # access to that team as its likely the desired outcome + teams = list(Team.objects.filter(organization=org)[0:2]) + if len(teams) == 1: + OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member) + click.echo("Added to organization: %s" % (org.slug,))
{"golden_diff": "diff --git a/src/sentry/runner/commands/createuser.py b/src/sentry/runner/commands/createuser.py\n--- a/src/sentry/runner/commands/createuser.py\n+++ b/src/sentry/runner/commands/createuser.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import, print_function\n \n import click\n+import sys\n from sentry.runner.decorators import configuration\n \n \n@@ -42,8 +43,9 @@\n @click.option(\"--superuser/--no-superuser\", default=None, is_flag=True)\n @click.option(\"--no-password\", default=False, is_flag=True)\n @click.option(\"--no-input\", default=False, is_flag=True)\[email protected](\"--force-update\", default=False, is_flag=True)\n @configuration\n-def createuser(email, password, superuser, no_password, no_input):\n+def createuser(email, password, superuser, no_password, no_input, force_update):\n \"Create a new user.\"\n if not no_input:\n if not email:\n@@ -76,24 +78,31 @@\n if password:\n user.set_password(password)\n \n- user.save()\n-\n- click.echo(\"User created: %s\" % (email,))\n-\n- # TODO(dcramer): kill this when we improve flows\n- if settings.SENTRY_SINGLE_ORGANIZATION:\n- from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team\n-\n- org = Organization.get_default()\n- if superuser:\n- role = roles.get_top_dog().id\n+ if User.objects.filter(username=email).exists():\n+ if force_update:\n+ user.save(force_update=force_update)\n+ click.echo(\"User updated: %s\" % (email,))\n else:\n- role = org.default_role\n- member = OrganizationMember.objects.create(organization=org, user=user, role=role)\n-\n- # if we've only got a single team let's go ahead and give\n- # access to that team as its likely the desired outcome\n- teams = list(Team.objects.filter(organization=org)[0:2])\n- if len(teams) == 1:\n- OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member)\n- click.echo(\"Added to organization: %s\" % (org.slug,))\n+ click.echo(\"User: %s exists, use --force-update to force\" % (email,))\n+ sys.exit(3)\n+ else:\n+ user.save()\n+ click.echo(\"User created: %s\" % (email,))\n+\n+ # TODO(dcramer): kill this when we improve flows\n+ if settings.SENTRY_SINGLE_ORGANIZATION:\n+ from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team\n+\n+ org = Organization.get_default()\n+ if superuser:\n+ role = roles.get_top_dog().id\n+ else:\n+ role = org.default_role\n+ member = OrganizationMember.objects.create(organization=org, user=user, role=role)\n+\n+ # if we've only got a single team let's go ahead and give\n+ # access to that team as its likely the desired outcome\n+ teams = list(Team.objects.filter(organization=org)[0:2])\n+ if len(teams) == 1:\n+ OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member)\n+ click.echo(\"Added to organization: %s\" % (org.slug,))\n", "issue": "User-creation should not fail, when the account already exists\nI'm creating an Ansible role for installing sentry on our network. The software is installed via `pip` -- version 8.22.0, which seems to be the latest as of today. Not using Docker -- just a dedicated VM.\r\n\r\nOne of the tasks is to create a Sentry-admin:\r\n```\r\nsentry createuser --superuser --no-input --password=FOO [email protected]\r\n```\r\nUnfortunately, the above command only works _the first time_. Trying to run it again fails:\r\n```\r\ndjango.db.utils.IntegrityError: IntegrityError('duplicate key value violates unique constraint \\\"auth_user_username_key\\\"\r\nDETAIL: Key (username)=([email protected]) already exists\r\n```\r\nI understand, why the same account can not be created again. But it should not be an _error_ -- simply update the existing record if necessary. Or, if it must be an error, there should be a specific exit-code for this case, which would allow systems like Ansible to move on. Currently, `sentry` exits with the same code 1 for all errors...\r\n\r\nAt the very least, there needs to be a way to check, whether the account exists already... Ansible would then be able to perform the query before attempting to add the user.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport click\nfrom sentry.runner.decorators import configuration\n\n\ndef _get_field(field_name):\n from sentry.models import User\n\n return User._meta.get_field(field_name)\n\n\ndef _get_email():\n from django.core.exceptions import ValidationError\n\n rv = click.prompt(\"Email\")\n field = _get_field(\"email\")\n try:\n return field.clean(rv, None)\n except ValidationError as e:\n raise click.ClickException(\"; \".join(e.messages))\n\n\ndef _get_password():\n from django.core.exceptions import ValidationError\n\n rv = click.prompt(\"Password\", hide_input=True, confirmation_prompt=True)\n field = _get_field(\"password\")\n try:\n return field.clean(rv, None)\n except ValidationError as e:\n raise click.ClickException(\"; \".join(e.messages))\n\n\ndef _get_superuser():\n return click.confirm(\"Should this user be a superuser?\", default=False)\n\n\[email protected]()\[email protected](\"--email\")\[email protected](\"--password\")\[email protected](\"--superuser/--no-superuser\", default=None, is_flag=True)\[email protected](\"--no-password\", default=False, is_flag=True)\[email protected](\"--no-input\", default=False, is_flag=True)\n@configuration\ndef createuser(email, password, superuser, no_password, no_input):\n \"Create a new user.\"\n if not no_input:\n if not email:\n email = _get_email()\n\n if not (password or no_password):\n password = _get_password()\n\n if superuser is None:\n superuser = _get_superuser()\n\n if superuser is None:\n superuser = False\n\n if not email:\n raise click.ClickException(\"Invalid or missing email address.\")\n\n # TODO(mattrobenolt): Accept password over stdin?\n if not no_password and not password:\n raise click.ClickException(\"No password set and --no-password not passed.\")\n\n from sentry import roles\n from sentry.models import User\n from django.conf import settings\n\n user = User(\n email=email, username=email, is_superuser=superuser, is_staff=superuser, is_active=True\n )\n\n if password:\n user.set_password(password)\n\n user.save()\n\n click.echo(\"User created: %s\" % (email,))\n\n # TODO(dcramer): kill this when we improve flows\n if settings.SENTRY_SINGLE_ORGANIZATION:\n from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team\n\n org = Organization.get_default()\n if superuser:\n role = roles.get_top_dog().id\n else:\n role = org.default_role\n member = OrganizationMember.objects.create(organization=org, user=user, role=role)\n\n # if we've only got a single team let's go ahead and give\n # access to that team as its likely the desired outcome\n teams = list(Team.objects.filter(organization=org)[0:2])\n if len(teams) == 1:\n OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member)\n click.echo(\"Added to organization: %s\" % (org.slug,))\n", "path": "src/sentry/runner/commands/createuser.py"}]}
1,707
761
gh_patches_debug_6595
rasdani/github-patches
git_diff
vispy__vispy-712
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bump VisPy version? Should we bump the version to 0.4.0-dev or something? Currently it is 0.3.0, which doesn't seem right. </issue> <code> [start of vispy/__init__.py] 1 # -*- coding: utf-8 -*- 2 # Copyright (c) 2014, Vispy Development Team. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 4 5 """ 6 7 ===== 8 Vispy 9 ===== 10 11 Vispy is a **high-performance interactive 2D/3D data visualization 12 library**. Vispy leverages the computational power of modern **Graphics 13 Processing Units (GPUs)** through the **OpenGL** library to display very 14 large datasets. 15 16 For more information, see http://vispy.org. 17 18 """ 19 20 from __future__ import division 21 22 __all__ = ['use', 'sys_info', 'set_log_level', 'test'] 23 24 # Definition of the version number 25 version_info = 0, 3, 0, '' # major, minor, patch, extra 26 27 # Nice string for the version (mimic how IPython composes its version str) 28 __version__ = '-'.join(map(str, version_info)).replace('-', '.', 2).strip('-') 29 30 from .util import config, set_log_level, keys, sys_info # noqa 31 from .util.wrappers import use # noqa 32 from .testing import test # noqa 33 [end of vispy/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vispy/__init__.py b/vispy/__init__.py --- a/vispy/__init__.py +++ b/vispy/__init__.py @@ -22,7 +22,7 @@ __all__ = ['use', 'sys_info', 'set_log_level', 'test'] # Definition of the version number -version_info = 0, 3, 0, '' # major, minor, patch, extra +version_info = 0, 4, 0, 'dev' # major, minor, patch, extra # Nice string for the version (mimic how IPython composes its version str) __version__ = '-'.join(map(str, version_info)).replace('-', '.', 2).strip('-')
{"golden_diff": "diff --git a/vispy/__init__.py b/vispy/__init__.py\n--- a/vispy/__init__.py\n+++ b/vispy/__init__.py\n@@ -22,7 +22,7 @@\n __all__ = ['use', 'sys_info', 'set_log_level', 'test']\n \n # Definition of the version number\n-version_info = 0, 3, 0, '' # major, minor, patch, extra\n+version_info = 0, 4, 0, 'dev' # major, minor, patch, extra\n \n # Nice string for the version (mimic how IPython composes its version str)\n __version__ = '-'.join(map(str, version_info)).replace('-', '.', 2).strip('-')\n", "issue": "Bump VisPy version?\nShould we bump the version to 0.4.0-dev or something? Currently it is 0.3.0, which doesn't seem right.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\n\n=====\nVispy\n=====\n\nVispy is a **high-performance interactive 2D/3D data visualization\nlibrary**. Vispy leverages the computational power of modern **Graphics\nProcessing Units (GPUs)** through the **OpenGL** library to display very\nlarge datasets.\n\nFor more information, see http://vispy.org.\n\n\"\"\"\n\nfrom __future__ import division\n\n__all__ = ['use', 'sys_info', 'set_log_level', 'test']\n\n# Definition of the version number\nversion_info = 0, 3, 0, '' # major, minor, patch, extra\n\n# Nice string for the version (mimic how IPython composes its version str)\n__version__ = '-'.join(map(str, version_info)).replace('-', '.', 2).strip('-')\n\nfrom .util import config, set_log_level, keys, sys_info # noqa\nfrom .util.wrappers import use # noqa\nfrom .testing import test # noqa\n", "path": "vispy/__init__.py"}]}
890
170
gh_patches_debug_23769
rasdani/github-patches
git_diff
pyca__cryptography-1201
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make it harder to allow truncation on a GCM tag `GCM()` should take a `min_tag_length` parameter, which defaults to `GCM.MAX_TAG_LENGTH`, then we can make it error out on truncated tags by default, while still allowing truncation for use cases that need it. This is technically a backwards compatibility break, but I argue that it should be allowed as a security hardening measure. Thoughts? </issue> <code> [start of cryptography/__about__.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 from __future__ import absolute_import, division, print_function 14 15 __all__ = [ 16 "__title__", "__summary__", "__uri__", "__version__", "__author__", 17 "__email__", "__license__", "__copyright__", 18 ] 19 20 __title__ = "cryptography" 21 __summary__ = ("cryptography is a package which provides cryptographic recipes" 22 " and primitives to Python developers.") 23 __uri__ = "https://github.com/pyca/cryptography" 24 25 __version__ = "0.5.dev1" 26 27 __author__ = "The cryptography developers" 28 __email__ = "[email protected]" 29 30 __license__ = "Apache License, Version 2.0" 31 __copyright__ = "Copyright 2013-2014 %s" % __author__ 32 [end of cryptography/__about__.py] [start of cryptography/hazmat/primitives/ciphers/modes.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 from cryptography import utils 17 from cryptography.hazmat.primitives import interfaces 18 19 20 def _check_iv_length(mode, algorithm): 21 if len(mode.initialization_vector) * 8 != algorithm.block_size: 22 raise ValueError("Invalid IV size ({0}) for {1}.".format( 23 len(mode.initialization_vector), mode.name 24 )) 25 26 27 @utils.register_interface(interfaces.Mode) 28 @utils.register_interface(interfaces.ModeWithInitializationVector) 29 class CBC(object): 30 name = "CBC" 31 32 def __init__(self, initialization_vector): 33 self.initialization_vector = initialization_vector 34 35 validate_for_algorithm = _check_iv_length 36 37 38 @utils.register_interface(interfaces.Mode) 39 class ECB(object): 40 name = "ECB" 41 42 def validate_for_algorithm(self, algorithm): 43 pass 44 45 46 @utils.register_interface(interfaces.Mode) 47 @utils.register_interface(interfaces.ModeWithInitializationVector) 48 class OFB(object): 49 name = "OFB" 50 51 def __init__(self, initialization_vector): 52 self.initialization_vector = initialization_vector 53 54 validate_for_algorithm = _check_iv_length 55 56 57 @utils.register_interface(interfaces.Mode) 58 @utils.register_interface(interfaces.ModeWithInitializationVector) 59 class CFB(object): 60 name = "CFB" 61 62 def __init__(self, initialization_vector): 63 self.initialization_vector = initialization_vector 64 65 validate_for_algorithm = _check_iv_length 66 67 68 @utils.register_interface(interfaces.Mode) 69 @utils.register_interface(interfaces.ModeWithInitializationVector) 70 class CFB8(object): 71 name = "CFB8" 72 73 def __init__(self, initialization_vector): 74 self.initialization_vector = initialization_vector 75 76 validate_for_algorithm = _check_iv_length 77 78 79 @utils.register_interface(interfaces.Mode) 80 @utils.register_interface(interfaces.ModeWithNonce) 81 class CTR(object): 82 name = "CTR" 83 84 def __init__(self, nonce): 85 self.nonce = nonce 86 87 def validate_for_algorithm(self, algorithm): 88 if len(self.nonce) * 8 != algorithm.block_size: 89 raise ValueError("Invalid nonce size ({0}) for {1}.".format( 90 len(self.nonce), self.name 91 )) 92 93 94 @utils.register_interface(interfaces.Mode) 95 @utils.register_interface(interfaces.ModeWithInitializationVector) 96 @utils.register_interface(interfaces.ModeWithAuthenticationTag) 97 class GCM(object): 98 name = "GCM" 99 100 def __init__(self, initialization_vector, tag=None): 101 # len(initialization_vector) must in [1, 2 ** 64), but it's impossible 102 # to actually construct a bytes object that large, so we don't check 103 # for it 104 if tag is not None and len(tag) < 4: 105 raise ValueError( 106 "Authentication tag must be 4 bytes or longer." 107 ) 108 109 self.initialization_vector = initialization_vector 110 self.tag = tag 111 112 def validate_for_algorithm(self, algorithm): 113 pass 114 [end of cryptography/hazmat/primitives/ciphers/modes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cryptography/__about__.py b/cryptography/__about__.py --- a/cryptography/__about__.py +++ b/cryptography/__about__.py @@ -28,4 +28,4 @@ __email__ = "[email protected]" __license__ = "Apache License, Version 2.0" -__copyright__ = "Copyright 2013-2014 %s" % __author__ +__copyright__ = "Copyright 2013-2014 {0}".format(__author__) diff --git a/cryptography/hazmat/primitives/ciphers/modes.py b/cryptography/hazmat/primitives/ciphers/modes.py --- a/cryptography/hazmat/primitives/ciphers/modes.py +++ b/cryptography/hazmat/primitives/ciphers/modes.py @@ -97,13 +97,16 @@ class GCM(object): name = "GCM" - def __init__(self, initialization_vector, tag=None): + def __init__(self, initialization_vector, tag=None, min_tag_length=16): # len(initialization_vector) must in [1, 2 ** 64), but it's impossible # to actually construct a bytes object that large, so we don't check # for it - if tag is not None and len(tag) < 4: + if min_tag_length < 4: + raise ValueError("min_tag_length must be >= 4") + if tag is not None and len(tag) < min_tag_length: raise ValueError( - "Authentication tag must be 4 bytes or longer." + "Authentication tag must be {0} bytes or longer.".format( + min_tag_length) ) self.initialization_vector = initialization_vector
{"golden_diff": "diff --git a/cryptography/__about__.py b/cryptography/__about__.py\n--- a/cryptography/__about__.py\n+++ b/cryptography/__about__.py\n@@ -28,4 +28,4 @@\n __email__ = \"[email protected]\"\n \n __license__ = \"Apache License, Version 2.0\"\n-__copyright__ = \"Copyright 2013-2014 %s\" % __author__\n+__copyright__ = \"Copyright 2013-2014 {0}\".format(__author__)\ndiff --git a/cryptography/hazmat/primitives/ciphers/modes.py b/cryptography/hazmat/primitives/ciphers/modes.py\n--- a/cryptography/hazmat/primitives/ciphers/modes.py\n+++ b/cryptography/hazmat/primitives/ciphers/modes.py\n@@ -97,13 +97,16 @@\n class GCM(object):\n name = \"GCM\"\n \n- def __init__(self, initialization_vector, tag=None):\n+ def __init__(self, initialization_vector, tag=None, min_tag_length=16):\n # len(initialization_vector) must in [1, 2 ** 64), but it's impossible\n # to actually construct a bytes object that large, so we don't check\n # for it\n- if tag is not None and len(tag) < 4:\n+ if min_tag_length < 4:\n+ raise ValueError(\"min_tag_length must be >= 4\")\n+ if tag is not None and len(tag) < min_tag_length:\n raise ValueError(\n- \"Authentication tag must be 4 bytes or longer.\"\n+ \"Authentication tag must be {0} bytes or longer.\".format(\n+ min_tag_length)\n )\n \n self.initialization_vector = initialization_vector\n", "issue": "Make it harder to allow truncation on a GCM tag\n`GCM()` should take a `min_tag_length` parameter, which defaults to `GCM.MAX_TAG_LENGTH`, then we can make it error out on truncated tags by default, while still allowing truncation for use cases that need it.\n\nThis is technically a backwards compatibility break, but I argue that it should be allowed as a security hardening measure. Thoughts?\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\")\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.5.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2014 %s\" % __author__\n", "path": "cryptography/__about__.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.hazmat.primitives import interfaces\n\n\ndef _check_iv_length(mode, algorithm):\n if len(mode.initialization_vector) * 8 != algorithm.block_size:\n raise ValueError(\"Invalid IV size ({0}) for {1}.\".format(\n len(mode.initialization_vector), mode.name\n ))\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CBC(object):\n name = \"CBC\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n validate_for_algorithm = _check_iv_length\n\n\[email protected]_interface(interfaces.Mode)\nclass ECB(object):\n name = \"ECB\"\n\n def validate_for_algorithm(self, algorithm):\n pass\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass OFB(object):\n name = \"OFB\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n validate_for_algorithm = _check_iv_length\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CFB(object):\n name = \"CFB\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n validate_for_algorithm = _check_iv_length\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\nclass CFB8(object):\n name = \"CFB8\"\n\n def __init__(self, initialization_vector):\n self.initialization_vector = initialization_vector\n\n validate_for_algorithm = _check_iv_length\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithNonce)\nclass CTR(object):\n name = \"CTR\"\n\n def __init__(self, nonce):\n self.nonce = nonce\n\n def validate_for_algorithm(self, algorithm):\n if len(self.nonce) * 8 != algorithm.block_size:\n raise ValueError(\"Invalid nonce size ({0}) for {1}.\".format(\n len(self.nonce), self.name\n ))\n\n\[email protected]_interface(interfaces.Mode)\[email protected]_interface(interfaces.ModeWithInitializationVector)\[email protected]_interface(interfaces.ModeWithAuthenticationTag)\nclass GCM(object):\n name = \"GCM\"\n\n def __init__(self, initialization_vector, tag=None):\n # len(initialization_vector) must in [1, 2 ** 64), but it's impossible\n # to actually construct a bytes object that large, so we don't check\n # for it\n if tag is not None and len(tag) < 4:\n raise ValueError(\n \"Authentication tag must be 4 bytes or longer.\"\n )\n\n self.initialization_vector = initialization_vector\n self.tag = tag\n\n def validate_for_algorithm(self, algorithm):\n pass\n", "path": "cryptography/hazmat/primitives/ciphers/modes.py"}]}
1,997
398
gh_patches_debug_29942
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1818
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exception calling application: dictionary changed size during iteration The exception occasionally happens when the worker reports gradients to ps. ![image](https://user-images.githubusercontent.com/18071380/76288063-d3925380-62e0-11ea-869f-64cccb4e7d37.png) </issue> <code> [start of elasticdl/python/ps/embedding_table.py] 1 import numpy as np 2 import tensorflow as tf 3 4 from elasticdl.proto.elasticdl_pb2 import EmbeddingTableInfo 5 from elasticdl.python.common.dtypes import dtype_numpy_to_tensor 6 7 8 class EmbeddingTable(object): 9 """ 10 EmbeddingTable is used to store embedding parameters of an embedding 11 layer. The name of an embedding table is actually the embedding layer 12 name. It uses a dictionary to store embedding vectors, the key is the 13 item id, the value is a 1-D numpy.ndarray. 14 15 Embedding vectors are lazily initialized in parameter server. 16 EmbeddingTable also has dim and initializer fields. Inside the get 17 interface of EmbeddingTable, if the id is not in the embedding_vectors 18 dictionary, the corresponding value will be initialized. 19 """ 20 21 def __init__(self, name, dim=None, initializer=None, is_slot=False): 22 """ 23 Args: 24 name: The embedding table name. 25 dim: The dimension of embeddings in this embedding table. 26 initializer: The initializer to initialize new embeddings. If this 27 embedding table is for slots, `initializer` is a float and this 28 table will initialize with constant initializer. Otherwise 29 `initializer` is the name of Keras initializer. 30 is_slot: A bool. True for storing slot variable, otherwise false. 31 """ 32 self.name = name 33 self.dim = dim 34 self.initializer_value = initializer 35 # set dtype to float32 36 self.dtype = np.dtype("float32") 37 if is_slot: 38 self.initializer = tf.keras.initializers.Constant( 39 float(self.initializer_value) 40 ) 41 else: 42 self.initializer = tf.keras.initializers.get( 43 self.initializer_value 44 ) 45 self.is_slot = is_slot 46 self.embedding_vectors = {} 47 48 def get(self, indices): 49 if len(indices) == 0: 50 return None 51 values = [] 52 for i in indices: 53 value = self.embedding_vectors.get(i, None) 54 if value is None: 55 value = self.initializer(shape=(self.dim,)).numpy() 56 self.embedding_vectors[i] = value 57 values.append(value) 58 return np.stack(values) 59 60 def set(self, indices, values): 61 # TODO(qijun) need to add a RWLock in Sync-SGD 62 for index, i in enumerate(indices): 63 embedding_vector = values[index] 64 self.embedding_vectors[i] = embedding_vector 65 66 def clear(self): 67 self.embedding_vectors.clear() 68 69 def to_indexed_slices(self): 70 indices = [] 71 embedding_vectors = [] 72 for id, embedding_vector in self.embedding_vectors.items(): 73 indices.append(id) 74 embedding_vectors.append(embedding_vector) 75 return tf.IndexedSlices( 76 values=np.array(embedding_vectors), indices=np.array(indices) 77 ) 78 79 def to_embedding_table_info_pb(self): 80 """Convert the embedding table information to a protobuf""" 81 embedding_pb = EmbeddingTableInfo() 82 embedding_pb.name = self.name 83 embedding_pb.dim = self.dim 84 embedding_pb.initializer = str(self.initializer_value) 85 embedding_pb.dtype = dtype_numpy_to_tensor(self.dtype) 86 return embedding_pb 87 88 def get_table_size(self): 89 """Get the element count of an embedding table""" 90 if len(self.embedding_vectors) > 0: 91 element_size = list(self.embedding_vectors.values())[0].itemsize 92 size = self.dim * len(self.embedding_vectors) * element_size 93 return size 94 return 0 95 96 def debug_info(self): 97 return ( 98 "Embedding param name: %s\n shape: [%d, %d]\n size: %d bytes\n" 99 % ( 100 self.name, 101 len(self.embedding_vectors), 102 self.dim, 103 self.get_table_size(), 104 ) 105 ) 106 107 108 # TODO(bug): create_embedding_table does not create EmbeddingTable correctly 109 # if it is a slot table. 110 def create_embedding_table(embedding_table_info_pb): 111 name = embedding_table_info_pb.name 112 dim = embedding_table_info_pb.dim 113 initializer = embedding_table_info_pb.initializer 114 return EmbeddingTable(name, dim, initializer) 115 116 117 def get_slot_table_name(embedding_name, slot_name): 118 return embedding_name + "-" + slot_name 119 [end of elasticdl/python/ps/embedding_table.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticdl/python/ps/embedding_table.py b/elasticdl/python/ps/embedding_table.py --- a/elasticdl/python/ps/embedding_table.py +++ b/elasticdl/python/ps/embedding_table.py @@ -1,3 +1,5 @@ +import threading + import numpy as np import tensorflow as tf @@ -44,6 +46,7 @@ ) self.is_slot = is_slot self.embedding_vectors = {} + self._lock = threading.Lock() def get(self, indices): if len(indices) == 0: @@ -52,8 +55,9 @@ for i in indices: value = self.embedding_vectors.get(i, None) if value is None: - value = self.initializer(shape=(self.dim,)).numpy() - self.embedding_vectors[i] = value + with self._lock: + value = self.initializer(shape=(self.dim,)).numpy() + self.embedding_vectors[i] = value values.append(value) return np.stack(values) @@ -69,9 +73,10 @@ def to_indexed_slices(self): indices = [] embedding_vectors = [] - for id, embedding_vector in self.embedding_vectors.items(): - indices.append(id) - embedding_vectors.append(embedding_vector) + with self._lock: + for id, embedding_vector in self.embedding_vectors.items(): + indices.append(id) + embedding_vectors.append(embedding_vector) return tf.IndexedSlices( values=np.array(embedding_vectors), indices=np.array(indices) )
{"golden_diff": "diff --git a/elasticdl/python/ps/embedding_table.py b/elasticdl/python/ps/embedding_table.py\n--- a/elasticdl/python/ps/embedding_table.py\n+++ b/elasticdl/python/ps/embedding_table.py\n@@ -1,3 +1,5 @@\n+import threading\n+\n import numpy as np\n import tensorflow as tf\n \n@@ -44,6 +46,7 @@\n )\n self.is_slot = is_slot\n self.embedding_vectors = {}\n+ self._lock = threading.Lock()\n \n def get(self, indices):\n if len(indices) == 0:\n@@ -52,8 +55,9 @@\n for i in indices:\n value = self.embedding_vectors.get(i, None)\n if value is None:\n- value = self.initializer(shape=(self.dim,)).numpy()\n- self.embedding_vectors[i] = value\n+ with self._lock:\n+ value = self.initializer(shape=(self.dim,)).numpy()\n+ self.embedding_vectors[i] = value\n values.append(value)\n return np.stack(values)\n \n@@ -69,9 +73,10 @@\n def to_indexed_slices(self):\n indices = []\n embedding_vectors = []\n- for id, embedding_vector in self.embedding_vectors.items():\n- indices.append(id)\n- embedding_vectors.append(embedding_vector)\n+ with self._lock:\n+ for id, embedding_vector in self.embedding_vectors.items():\n+ indices.append(id)\n+ embedding_vectors.append(embedding_vector)\n return tf.IndexedSlices(\n values=np.array(embedding_vectors), indices=np.array(indices)\n )\n", "issue": "Exception calling application: dictionary changed size during iteration\nThe exception occasionally happens when the worker reports gradients to ps.\r\n![image](https://user-images.githubusercontent.com/18071380/76288063-d3925380-62e0-11ea-869f-64cccb4e7d37.png)\r\n\n", "before_files": [{"content": "import numpy as np\nimport tensorflow as tf\n\nfrom elasticdl.proto.elasticdl_pb2 import EmbeddingTableInfo\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n\n\nclass EmbeddingTable(object):\n \"\"\"\n EmbeddingTable is used to store embedding parameters of an embedding\n layer. The name of an embedding table is actually the embedding layer\n name. It uses a dictionary to store embedding vectors, the key is the\n item id, the value is a 1-D numpy.ndarray.\n\n Embedding vectors are lazily initialized in parameter server.\n EmbeddingTable also has dim and initializer fields. Inside the get\n interface of EmbeddingTable, if the id is not in the embedding_vectors\n dictionary, the corresponding value will be initialized.\n \"\"\"\n\n def __init__(self, name, dim=None, initializer=None, is_slot=False):\n \"\"\"\n Args:\n name: The embedding table name.\n dim: The dimension of embeddings in this embedding table.\n initializer: The initializer to initialize new embeddings. If this\n embedding table is for slots, `initializer` is a float and this\n table will initialize with constant initializer. Otherwise\n `initializer` is the name of Keras initializer.\n is_slot: A bool. True for storing slot variable, otherwise false.\n \"\"\"\n self.name = name\n self.dim = dim\n self.initializer_value = initializer\n # set dtype to float32\n self.dtype = np.dtype(\"float32\")\n if is_slot:\n self.initializer = tf.keras.initializers.Constant(\n float(self.initializer_value)\n )\n else:\n self.initializer = tf.keras.initializers.get(\n self.initializer_value\n )\n self.is_slot = is_slot\n self.embedding_vectors = {}\n\n def get(self, indices):\n if len(indices) == 0:\n return None\n values = []\n for i in indices:\n value = self.embedding_vectors.get(i, None)\n if value is None:\n value = self.initializer(shape=(self.dim,)).numpy()\n self.embedding_vectors[i] = value\n values.append(value)\n return np.stack(values)\n\n def set(self, indices, values):\n # TODO(qijun) need to add a RWLock in Sync-SGD\n for index, i in enumerate(indices):\n embedding_vector = values[index]\n self.embedding_vectors[i] = embedding_vector\n\n def clear(self):\n self.embedding_vectors.clear()\n\n def to_indexed_slices(self):\n indices = []\n embedding_vectors = []\n for id, embedding_vector in self.embedding_vectors.items():\n indices.append(id)\n embedding_vectors.append(embedding_vector)\n return tf.IndexedSlices(\n values=np.array(embedding_vectors), indices=np.array(indices)\n )\n\n def to_embedding_table_info_pb(self):\n \"\"\"Convert the embedding table information to a protobuf\"\"\"\n embedding_pb = EmbeddingTableInfo()\n embedding_pb.name = self.name\n embedding_pb.dim = self.dim\n embedding_pb.initializer = str(self.initializer_value)\n embedding_pb.dtype = dtype_numpy_to_tensor(self.dtype)\n return embedding_pb\n\n def get_table_size(self):\n \"\"\"Get the element count of an embedding table\"\"\"\n if len(self.embedding_vectors) > 0:\n element_size = list(self.embedding_vectors.values())[0].itemsize\n size = self.dim * len(self.embedding_vectors) * element_size\n return size\n return 0\n\n def debug_info(self):\n return (\n \"Embedding param name: %s\\n shape: [%d, %d]\\n size: %d bytes\\n\"\n % (\n self.name,\n len(self.embedding_vectors),\n self.dim,\n self.get_table_size(),\n )\n )\n\n\n# TODO(bug): create_embedding_table does not create EmbeddingTable correctly\n# if it is a slot table.\ndef create_embedding_table(embedding_table_info_pb):\n name = embedding_table_info_pb.name\n dim = embedding_table_info_pb.dim\n initializer = embedding_table_info_pb.initializer\n return EmbeddingTable(name, dim, initializer)\n\n\ndef get_slot_table_name(embedding_name, slot_name):\n return embedding_name + \"-\" + slot_name\n", "path": "elasticdl/python/ps/embedding_table.py"}]}
1,783
352
gh_patches_debug_11089
rasdani/github-patches
git_diff
encode__starlette-1218
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Consider making `starlette.templating.Jinja2Templates.get_env` "protected" Hi, Thank you very much for your project! We are using it intensively through FastAPI. While I was exploring the code base and looking how to use `Jinja2Templates`, I noticed the method `get_env`. At first I thought it was a getter for the environment, but eventually realized it is only called in the constructor of the class. It would make much more sense to me if the method was called `_get_env`. This would signal that it should not be used outside of the class (or at least that the caller needs to know what she is doing). For example, if I want to add filters or globals to the environment, I should write: ``` templates = Jinja2Templates(...) templates.env.globals[...] = ... ``` instead of: ``` templates = Jinja2Templates(...) templates.get_env().globals[...] = ... ``` As of current implementation, the second snippet has no effect. Thank you very much for considering this request! </issue> <code> [start of starlette/templating.py] 1 import typing 2 3 from starlette.background import BackgroundTask 4 from starlette.responses import Response 5 from starlette.types import Receive, Scope, Send 6 7 try: 8 import jinja2 9 10 # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1 11 if hasattr(jinja2, "pass_context"): 12 pass_context = jinja2.pass_context 13 else: # pragma: nocover 14 pass_context = jinja2.contextfunction 15 except ImportError: # pragma: nocover 16 jinja2 = None # type: ignore 17 18 19 class _TemplateResponse(Response): 20 media_type = "text/html" 21 22 def __init__( 23 self, 24 template: typing.Any, 25 context: dict, 26 status_code: int = 200, 27 headers: dict = None, 28 media_type: str = None, 29 background: BackgroundTask = None, 30 ): 31 self.template = template 32 self.context = context 33 content = template.render(context) 34 super().__init__(content, status_code, headers, media_type, background) 35 36 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 37 request = self.context.get("request", {}) 38 extensions = request.get("extensions", {}) 39 if "http.response.template" in extensions: 40 await send( 41 { 42 "type": "http.response.template", 43 "template": self.template, 44 "context": self.context, 45 } 46 ) 47 await super().__call__(scope, receive, send) 48 49 50 class Jinja2Templates: 51 """ 52 templates = Jinja2Templates("templates") 53 54 return templates.TemplateResponse("index.html", {"request": request}) 55 """ 56 57 def __init__(self, directory: str) -> None: 58 assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" 59 self.env = self.get_env(directory) 60 61 def get_env(self, directory: str) -> "jinja2.Environment": 62 @pass_context 63 def url_for(context: dict, name: str, **path_params: typing.Any) -> str: 64 request = context["request"] 65 return request.url_for(name, **path_params) 66 67 loader = jinja2.FileSystemLoader(directory) 68 env = jinja2.Environment(loader=loader, autoescape=True) 69 env.globals["url_for"] = url_for 70 return env 71 72 def get_template(self, name: str) -> "jinja2.Template": 73 return self.env.get_template(name) 74 75 def TemplateResponse( 76 self, 77 name: str, 78 context: dict, 79 status_code: int = 200, 80 headers: dict = None, 81 media_type: str = None, 82 background: BackgroundTask = None, 83 ) -> _TemplateResponse: 84 if "request" not in context: 85 raise ValueError('context must include a "request" key') 86 template = self.get_template(name) 87 return _TemplateResponse( 88 template, 89 context, 90 status_code=status_code, 91 headers=headers, 92 media_type=media_type, 93 background=background, 94 ) 95 [end of starlette/templating.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlette/templating.py b/starlette/templating.py --- a/starlette/templating.py +++ b/starlette/templating.py @@ -56,9 +56,9 @@ def __init__(self, directory: str) -> None: assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates" - self.env = self.get_env(directory) + self.env = self._create_env(directory) - def get_env(self, directory: str) -> "jinja2.Environment": + def _create_env(self, directory: str) -> "jinja2.Environment": @pass_context def url_for(context: dict, name: str, **path_params: typing.Any) -> str: request = context["request"]
{"golden_diff": "diff --git a/starlette/templating.py b/starlette/templating.py\n--- a/starlette/templating.py\n+++ b/starlette/templating.py\n@@ -56,9 +56,9 @@\n \n def __init__(self, directory: str) -> None:\n assert jinja2 is not None, \"jinja2 must be installed to use Jinja2Templates\"\n- self.env = self.get_env(directory)\n+ self.env = self._create_env(directory)\n \n- def get_env(self, directory: str) -> \"jinja2.Environment\":\n+ def _create_env(self, directory: str) -> \"jinja2.Environment\":\n @pass_context\n def url_for(context: dict, name: str, **path_params: typing.Any) -> str:\n request = context[\"request\"]\n", "issue": "Consider making `starlette.templating.Jinja2Templates.get_env` \"protected\"\nHi,\r\nThank you very much for your project! We are using it intensively through FastAPI.\r\n\r\nWhile I was exploring the code base and looking how to use `Jinja2Templates`, I noticed the method `get_env`. At first I thought it was a getter for the environment, but eventually realized it is only called in the constructor of the class.\r\n\r\nIt would make much more sense to me if the method was called `_get_env`. This would signal that it should not be used outside of the class (or at least that the caller needs to know what she is doing).\r\n\r\nFor example, if I want to add filters or globals to the environment, I should write:\r\n\r\n```\r\ntemplates = Jinja2Templates(...)\r\ntemplates.env.globals[...] = ...\r\n```\r\n\r\ninstead of:\r\n\r\n```\r\ntemplates = Jinja2Templates(...)\r\ntemplates.get_env().globals[...] = ...\r\n```\r\n\r\nAs of current implementation, the second snippet has no effect.\r\n\r\nThank you very much for considering this request!\n", "before_files": [{"content": "import typing\n\nfrom starlette.background import BackgroundTask\nfrom starlette.responses import Response\nfrom starlette.types import Receive, Scope, Send\n\ntry:\n import jinja2\n\n # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1\n if hasattr(jinja2, \"pass_context\"):\n pass_context = jinja2.pass_context\n else: # pragma: nocover\n pass_context = jinja2.contextfunction\nexcept ImportError: # pragma: nocover\n jinja2 = None # type: ignore\n\n\nclass _TemplateResponse(Response):\n media_type = \"text/html\"\n\n def __init__(\n self,\n template: typing.Any,\n context: dict,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ):\n self.template = template\n self.context = context\n content = template.render(context)\n super().__init__(content, status_code, headers, media_type, background)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n request = self.context.get(\"request\", {})\n extensions = request.get(\"extensions\", {})\n if \"http.response.template\" in extensions:\n await send(\n {\n \"type\": \"http.response.template\",\n \"template\": self.template,\n \"context\": self.context,\n }\n )\n await super().__call__(scope, receive, send)\n\n\nclass Jinja2Templates:\n \"\"\"\n templates = Jinja2Templates(\"templates\")\n\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n \"\"\"\n\n def __init__(self, directory: str) -> None:\n assert jinja2 is not None, \"jinja2 must be installed to use Jinja2Templates\"\n self.env = self.get_env(directory)\n\n def get_env(self, directory: str) -> \"jinja2.Environment\":\n @pass_context\n def url_for(context: dict, name: str, **path_params: typing.Any) -> str:\n request = context[\"request\"]\n return request.url_for(name, **path_params)\n\n loader = jinja2.FileSystemLoader(directory)\n env = jinja2.Environment(loader=loader, autoescape=True)\n env.globals[\"url_for\"] = url_for\n return env\n\n def get_template(self, name: str) -> \"jinja2.Template\":\n return self.env.get_template(name)\n\n def TemplateResponse(\n self,\n name: str,\n context: dict,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> _TemplateResponse:\n if \"request\" not in context:\n raise ValueError('context must include a \"request\" key')\n template = self.get_template(name)\n return _TemplateResponse(\n template,\n context,\n status_code=status_code,\n headers=headers,\n media_type=media_type,\n background=background,\n )\n", "path": "starlette/templating.py"}]}
1,616
180
gh_patches_debug_15302
rasdani/github-patches
git_diff
dj-stripe__dj-stripe-975
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document our plan to standardise on storing currency in cents - [x] Add note on field classes that StripeQuantumCurrencyAmountField should be used for new fields - [x] Update changelog about 3.0 migration to cents _Originally posted by @therefromhere in https://github.com/dj-stripe/dj-stripe/issues/955#issuecomment-527251976_ </issue> <code> [start of djstripe/fields.py] 1 """ 2 dj-stripe Custom Field Definitions 3 """ 4 import decimal 5 6 from django.core.validators import MaxValueValidator, MinValueValidator 7 from django.db import models 8 9 from .settings import USE_NATIVE_JSONFIELD 10 from .utils import convert_tstamp 11 12 if USE_NATIVE_JSONFIELD: 13 from django.contrib.postgres.fields import JSONField as BaseJSONField 14 else: 15 from jsonfield import JSONField as BaseJSONField 16 17 18 class PaymentMethodForeignKey(models.ForeignKey): 19 def __init__(self, **kwargs): 20 kwargs.setdefault("to", "DjstripePaymentMethod") 21 super().__init__(**kwargs) 22 23 24 class StripePercentField(models.DecimalField): 25 """A field used to define a percent according to djstripe logic.""" 26 27 def __init__(self, *args, **kwargs): 28 """Assign default args to this field.""" 29 defaults = { 30 "decimal_places": 2, 31 "max_digits": 5, 32 "validators": [MinValueValidator(1), MaxValueValidator(100)], 33 } 34 defaults.update(kwargs) 35 super().__init__(*args, **defaults) 36 37 38 class StripeCurrencyCodeField(models.CharField): 39 """ 40 A field used to store a three-letter currency code (eg. usd, eur, ...) 41 """ 42 43 def __init__(self, *args, **kwargs): 44 defaults = {"max_length": 3, "help_text": "Three-letter ISO currency code"} 45 defaults.update(kwargs) 46 super().__init__(*args, **defaults) 47 48 49 class StripeQuantumCurrencyAmountField(models.IntegerField): 50 pass 51 52 53 class StripeDecimalCurrencyAmountField(models.DecimalField): 54 """ 55 A field used to define currency according to djstripe logic. 56 57 Stripe is always in cents. djstripe stores everything in dollars. 58 """ 59 60 def __init__(self, *args, **kwargs): 61 """Assign default args to this field.""" 62 defaults = {"decimal_places": 2, "max_digits": 8} 63 defaults.update(kwargs) 64 super().__init__(*args, **defaults) 65 66 def stripe_to_db(self, data): 67 """Convert the raw value to decimal representation.""" 68 val = data.get(self.name) 69 70 # Note: 0 is a possible return value, which is 'falseish' 71 if val is not None: 72 return val / decimal.Decimal("100") 73 74 75 class StripeEnumField(models.CharField): 76 def __init__(self, enum, *args, **kwargs): 77 self.enum = enum 78 choices = enum.choices 79 defaults = {"choices": choices, "max_length": max(len(k) for k, v in choices)} 80 defaults.update(kwargs) 81 super().__init__(*args, **defaults) 82 83 def deconstruct(self): 84 name, path, args, kwargs = super().deconstruct() 85 kwargs["enum"] = self.enum 86 del kwargs["choices"] 87 return name, path, args, kwargs 88 89 90 class StripeIdField(models.CharField): 91 """A field with enough space to hold any stripe ID.""" 92 93 def __init__(self, *args, **kwargs): 94 """ 95 Assign default args to this field. 96 97 As per: https://stripe.com/docs/upgrades 98 You can safely assume object IDs we generate will never exceed 255 99 characters, but you should be able to handle IDs of up to that 100 length. 101 """ 102 defaults = {"max_length": 255, "blank": False, "null": False} 103 defaults.update(kwargs) 104 super().__init__(*args, **defaults) 105 106 107 class StripeDateTimeField(models.DateTimeField): 108 """A field used to define a DateTimeField value according to djstripe logic.""" 109 110 def stripe_to_db(self, data): 111 """Convert the raw timestamp value to a DateTime representation.""" 112 val = data.get(self.name) 113 114 # Note: 0 is a possible return value, which is 'falseish' 115 if val is not None: 116 return convert_tstamp(val) 117 118 119 class JSONField(BaseJSONField): 120 """A field used to define a JSONField value according to djstripe logic.""" 121 122 pass 123 [end of djstripe/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/djstripe/fields.py b/djstripe/fields.py --- a/djstripe/fields.py +++ b/djstripe/fields.py @@ -47,14 +47,22 @@ class StripeQuantumCurrencyAmountField(models.IntegerField): + """ + A field used to store currency amounts in cents (etc) as per stripe. + """ + pass class StripeDecimalCurrencyAmountField(models.DecimalField): """ - A field used to define currency according to djstripe logic. + A legacy field to store currency amounts in dollars (etc). + + Stripe is always in cents. Historically djstripe stored everything in dollars. - Stripe is always in cents. djstripe stores everything in dollars. + Note: Don't use this for new fields, use StripeQuantumCurrencyAmountField instead. + We're planning on migrating existing fields in dj-stripe 3.0, + see https://github.com/dj-stripe/dj-stripe/issues/955 """ def __init__(self, *args, **kwargs):
{"golden_diff": "diff --git a/djstripe/fields.py b/djstripe/fields.py\n--- a/djstripe/fields.py\n+++ b/djstripe/fields.py\n@@ -47,14 +47,22 @@\n \n \n class StripeQuantumCurrencyAmountField(models.IntegerField):\n+ \"\"\"\n+ A field used to store currency amounts in cents (etc) as per stripe.\n+ \"\"\"\n+\n pass\n \n \n class StripeDecimalCurrencyAmountField(models.DecimalField):\n \"\"\"\n- A field used to define currency according to djstripe logic.\n+ A legacy field to store currency amounts in dollars (etc).\n+\n+ Stripe is always in cents. Historically djstripe stored everything in dollars.\n \n- Stripe is always in cents. djstripe stores everything in dollars.\n+ Note: Don't use this for new fields, use StripeQuantumCurrencyAmountField instead.\n+ We're planning on migrating existing fields in dj-stripe 3.0,\n+ see https://github.com/dj-stripe/dj-stripe/issues/955\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n", "issue": "Document our plan to standardise on storing currency in cents\n- [x] Add note on field classes that StripeQuantumCurrencyAmountField should be used for new fields\r\n- [x] Update changelog about 3.0 migration to cents\r\n\r\n_Originally posted by @therefromhere in https://github.com/dj-stripe/dj-stripe/issues/955#issuecomment-527251976_\n", "before_files": [{"content": "\"\"\"\ndj-stripe Custom Field Definitions\n\"\"\"\nimport decimal\n\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom .settings import USE_NATIVE_JSONFIELD\nfrom .utils import convert_tstamp\n\nif USE_NATIVE_JSONFIELD:\n from django.contrib.postgres.fields import JSONField as BaseJSONField\nelse:\n from jsonfield import JSONField as BaseJSONField\n\n\nclass PaymentMethodForeignKey(models.ForeignKey):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"to\", \"DjstripePaymentMethod\")\n super().__init__(**kwargs)\n\n\nclass StripePercentField(models.DecimalField):\n \"\"\"A field used to define a percent according to djstripe logic.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\n \"decimal_places\": 2,\n \"max_digits\": 5,\n \"validators\": [MinValueValidator(1), MaxValueValidator(100)],\n }\n defaults.update(kwargs)\n super().__init__(*args, **defaults)\n\n\nclass StripeCurrencyCodeField(models.CharField):\n \"\"\"\n A field used to store a three-letter currency code (eg. usd, eur, ...)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n defaults = {\"max_length\": 3, \"help_text\": \"Three-letter ISO currency code\"}\n defaults.update(kwargs)\n super().__init__(*args, **defaults)\n\n\nclass StripeQuantumCurrencyAmountField(models.IntegerField):\n pass\n\n\nclass StripeDecimalCurrencyAmountField(models.DecimalField):\n \"\"\"\n A field used to define currency according to djstripe logic.\n\n Stripe is always in cents. djstripe stores everything in dollars.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Assign default args to this field.\"\"\"\n defaults = {\"decimal_places\": 2, \"max_digits\": 8}\n defaults.update(kwargs)\n super().__init__(*args, **defaults)\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw value to decimal representation.\"\"\"\n val = data.get(self.name)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return val / decimal.Decimal(\"100\")\n\n\nclass StripeEnumField(models.CharField):\n def __init__(self, enum, *args, **kwargs):\n self.enum = enum\n choices = enum.choices\n defaults = {\"choices\": choices, \"max_length\": max(len(k) for k, v in choices)}\n defaults.update(kwargs)\n super().__init__(*args, **defaults)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs[\"enum\"] = self.enum\n del kwargs[\"choices\"]\n return name, path, args, kwargs\n\n\nclass StripeIdField(models.CharField):\n \"\"\"A field with enough space to hold any stripe ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Assign default args to this field.\n\n As per: https://stripe.com/docs/upgrades\n You can safely assume object IDs we generate will never exceed 255\n characters, but you should be able to handle IDs of up to that\n length.\n \"\"\"\n defaults = {\"max_length\": 255, \"blank\": False, \"null\": False}\n defaults.update(kwargs)\n super().__init__(*args, **defaults)\n\n\nclass StripeDateTimeField(models.DateTimeField):\n \"\"\"A field used to define a DateTimeField value according to djstripe logic.\"\"\"\n\n def stripe_to_db(self, data):\n \"\"\"Convert the raw timestamp value to a DateTime representation.\"\"\"\n val = data.get(self.name)\n\n # Note: 0 is a possible return value, which is 'falseish'\n if val is not None:\n return convert_tstamp(val)\n\n\nclass JSONField(BaseJSONField):\n \"\"\"A field used to define a JSONField value according to djstripe logic.\"\"\"\n\n pass\n", "path": "djstripe/fields.py"}]}
1,753
240
gh_patches_debug_23652
rasdani/github-patches
git_diff
Mailu__Mailu-2150
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fix key error in dictionary ## What type of PR? (Feature, enhancement, bug-fix, documentation) ## What does this PR do? ### Related issue(s) - Auto close an issue like: closes #2145 ## Prerequisites Before we can consider review and merge, please make sure the following list is done and checked. If an entry in not applicable, you can check it or remove it from the list. - [ ] In case of feature or enhancement: documentation updated accordingly - [ ] Unless it's docs or a minor change: add [changelog](https://mailu.io/master/contributors/workflow.html#changelog) entry file. </issue> <code> [start of core/admin/mailu/internal/views/auth.py] 1 from mailu import models, utils 2 from mailu.internal import internal, nginx 3 from flask import current_app as app 4 5 import flask 6 import flask_login 7 import base64 8 9 @internal.route("/auth/email") 10 def nginx_authentication(): 11 """ Main authentication endpoint for Nginx email server 12 """ 13 client_ip = flask.request.headers["Client-Ip"] 14 headers = flask.request.headers 15 if headers["Auth-Port"] == '25' and headers['Auth-Method'] == 'plain': 16 response = flask.Response() 17 response.headers['Auth-Status'] = 'AUTH not supported' 18 response.headers['Auth-Error-Code'] = '502 5.5.1' 19 utils.limiter.rate_limit_ip(client_ip) 20 return response 21 if utils.limiter.should_rate_limit_ip(client_ip): 22 status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit') 23 response = flask.Response() 24 response.headers['Auth-Status'] = status 25 response.headers['Auth-Error-Code'] = code 26 if int(flask.request.headers['Auth-Login-Attempt']) < 10: 27 response.headers['Auth-Wait'] = '3' 28 return response 29 headers = nginx.handle_authentication(flask.request.headers) 30 response = flask.Response() 31 for key, value in headers.items(): 32 response.headers[key] = str(value) 33 is_valid_user = False 34 is_from_webmail = headers['Auth-Port'] in ['10143', '10025'] 35 if response.headers.get("Auth-User-Exists"): 36 username = response.headers["Auth-User"] 37 if utils.limiter.should_rate_limit_user(username, client_ip): 38 # FIXME could be done before handle_authentication() 39 status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit') 40 response = flask.Response() 41 response.headers['Auth-Status'] = status 42 response.headers['Auth-Error-Code'] = code 43 if int(flask.request.headers['Auth-Login-Attempt']) < 10: 44 response.headers['Auth-Wait'] = '3' 45 return response 46 is_valid_user = True 47 if headers.get("Auth-Status") == "OK": 48 utils.limiter.exempt_ip_from_ratelimits(client_ip) 49 elif is_valid_user: 50 utils.limiter.rate_limit_user(username, client_ip) 51 elif not is_from_webmail: 52 utils.limiter.rate_limit_ip(client_ip) 53 return response 54 55 @internal.route("/auth/admin") 56 def admin_authentication(): 57 """ Fails if the user is not an authenticated admin. 58 """ 59 if (not flask_login.current_user.is_anonymous 60 and flask_login.current_user.global_admin 61 and flask_login.current_user.enabled): 62 return "" 63 return flask.abort(403) 64 65 @internal.route("/auth/user") 66 def user_authentication(): 67 """ Fails if the user is not authenticated. 68 """ 69 if (not flask_login.current_user.is_anonymous 70 and flask_login.current_user.enabled): 71 response = flask.Response() 72 email = flask_login.current_user.get_id() 73 response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, email, "") 74 response.headers["X-User-Token"] = utils.gen_temp_token(email, flask.session) 75 return response 76 return flask.abort(403) 77 78 79 @internal.route("/auth/basic") 80 def basic_authentication(): 81 """ Tries to authenticate using the Authorization header. 82 """ 83 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr) 84 if utils.limiter.should_rate_limit_ip(client_ip): 85 response = flask.Response(status=401) 86 response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit from one source exceeded"' 87 response.headers['Retry-After'] = '60' 88 return response 89 authorization = flask.request.headers.get("Authorization") 90 if authorization and authorization.startswith("Basic "): 91 encoded = authorization.replace("Basic ", "") 92 user_email, password = base64.b64decode(encoded).split(b":", 1) 93 user_email = user_email.decode("utf8") 94 if utils.limiter.should_rate_limit_user(user_email, client_ip): 95 response = flask.Response(status=401) 96 response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit for this username exceeded"' 97 response.headers['Retry-After'] = '60' 98 return response 99 user = models.User.query.get(user_email) 100 if user and nginx.check_credentials(user, password.decode('utf-8'), client_ip, "web"): 101 response = flask.Response() 102 response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, user.email, "") 103 utils.limiter.exempt_ip_from_ratelimits(client_ip) 104 return response 105 utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip) 106 response = flask.Response(status=401) 107 response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"' 108 return response 109 [end of core/admin/mailu/internal/views/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py --- a/core/admin/mailu/internal/views/auth.py +++ b/core/admin/mailu/internal/views/auth.py @@ -18,7 +18,8 @@ response.headers['Auth-Error-Code'] = '502 5.5.1' utils.limiter.rate_limit_ip(client_ip) return response - if utils.limiter.should_rate_limit_ip(client_ip): + is_from_webmail = headers['Auth-Port'] in ['10143', '10025'] + if not is_from_webmail and utils.limiter.should_rate_limit_ip(client_ip): status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit') response = flask.Response() response.headers['Auth-Status'] = status @@ -31,7 +32,6 @@ for key, value in headers.items(): response.headers[key] = str(value) is_valid_user = False - is_from_webmail = headers['Auth-Port'] in ['10143', '10025'] if response.headers.get("Auth-User-Exists"): username = response.headers["Auth-User"] if utils.limiter.should_rate_limit_user(username, client_ip):
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py\n--- a/core/admin/mailu/internal/views/auth.py\n+++ b/core/admin/mailu/internal/views/auth.py\n@@ -18,7 +18,8 @@\n response.headers['Auth-Error-Code'] = '502 5.5.1'\n utils.limiter.rate_limit_ip(client_ip)\n return response\n- if utils.limiter.should_rate_limit_ip(client_ip):\n+ is_from_webmail = headers['Auth-Port'] in ['10143', '10025']\n+ if not is_from_webmail and utils.limiter.should_rate_limit_ip(client_ip):\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n@@ -31,7 +32,6 @@\n for key, value in headers.items():\n response.headers[key] = str(value)\n is_valid_user = False\n- is_from_webmail = headers['Auth-Port'] in ['10143', '10025']\n if response.headers.get(\"Auth-User-Exists\"):\n username = response.headers[\"Auth-User\"]\n if utils.limiter.should_rate_limit_user(username, client_ip):\n", "issue": "fix key error in dictionary\n## What type of PR?\r\n\r\n(Feature, enhancement, bug-fix, documentation)\r\n\r\n## What does this PR do?\r\n\r\n### Related issue(s)\r\n- Auto close an issue like: closes #2145\r\n\r\n## Prerequisites\r\nBefore we can consider review and merge, please make sure the following list is done and checked.\r\nIf an entry in not applicable, you can check it or remove it from the list.\r\n\r\n- [ ] In case of feature or enhancement: documentation updated accordingly\r\n- [ ] Unless it's docs or a minor change: add [changelog](https://mailu.io/master/contributors/workflow.html#changelog) entry file.\r\n\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n client_ip = flask.request.headers[\"Client-Ip\"]\n headers = flask.request.headers\n if headers[\"Auth-Port\"] == '25' and headers['Auth-Method'] == 'plain':\n response = flask.Response()\n response.headers['Auth-Status'] = 'AUTH not supported'\n response.headers['Auth-Error-Code'] = '502 5.5.1'\n utils.limiter.rate_limit_ip(client_ip)\n return response\n if utils.limiter.should_rate_limit_ip(client_ip):\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n is_valid_user = False\n is_from_webmail = headers['Auth-Port'] in ['10143', '10025']\n if response.headers.get(\"Auth-User-Exists\"):\n username = response.headers[\"Auth-User\"]\n if utils.limiter.should_rate_limit_user(username, client_ip):\n # FIXME could be done before handle_authentication()\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n is_valid_user = True\n if headers.get(\"Auth-Status\") == \"OK\":\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n elif is_valid_user:\n utils.limiter.rate_limit_user(username, client_ip)\n elif not is_from_webmail:\n utils.limiter.rate_limit_ip(client_ip)\n return response\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\[email protected](\"/auth/user\")\ndef user_authentication():\n \"\"\" Fails if the user is not authenticated.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.enabled):\n response = flask.Response()\n email = flask_login.current_user.get_id()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, email, \"\")\n response.headers[\"X-User-Token\"] = utils.gen_temp_token(email, flask.session)\n return response\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n if utils.limiter.should_rate_limit_ip(client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit from one source exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user_email = user_email.decode(\"utf8\")\n if utils.limiter.should_rate_limit_user(user_email, client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit for this username exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n user = models.User.query.get(user_email)\n if user and nginx.check_credentials(user, password.decode('utf-8'), client_ip, \"web\"):\n response = flask.Response()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, user.email, \"\")\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n return response\n utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip)\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}]}
1,995
293
gh_patches_debug_6805
rasdani/github-patches
git_diff
Pylons__pyramid-2672
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> reify docstring doesn't render properly The testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify Unfortunately this section is essential for understanding the example. </issue> <code> [start of pyramid/decorator.py] 1 from functools import update_wrapper 2 3 4 class reify(object): 5 """ Use as a class method decorator. It operates almost exactly like the 6 Python ``@property`` decorator, but it puts the result of the method it 7 decorates into the instance dict after the first call, effectively 8 replacing the function it decorates with an instance variable. It is, in 9 Python parlance, a non-data descriptor. An example: 10 11 .. testsetup:: 12 13 from pyramid.decorator import reify 14 15 class Foo(object): 16 @reify 17 def jammy(self): 18 print('jammy called') 19 return 1 20 21 And usage of Foo: 22 23 .. doctest:: 24 25 >>> f = Foo() 26 >>> v = f.jammy 27 jammy called 28 >>> print(v) 29 1 30 >>> f.jammy 31 1 32 >>> # jammy func not called the second time; it replaced itself with 1 33 >>> # Note: reassignment is possible 34 >>> f.jammy = 2 35 >>> f.jammy 36 2 37 """ 38 def __init__(self, wrapped): 39 self.wrapped = wrapped 40 update_wrapper(self, wrapped) 41 42 def __get__(self, inst, objtype=None): 43 if inst is None: 44 return self 45 val = self.wrapped(inst) 46 setattr(inst, self.wrapped.__name__, val) 47 return val 48 49 [end of pyramid/decorator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyramid/decorator.py b/pyramid/decorator.py --- a/pyramid/decorator.py +++ b/pyramid/decorator.py @@ -8,6 +8,16 @@ replacing the function it decorates with an instance variable. It is, in Python parlance, a non-data descriptor. An example: + .. code-block:: python + + from pyramid.decorator import reify + + class Foo(object): + @reify + def jammy(self): + print('jammy called') + return 1 + .. testsetup:: from pyramid.decorator import reify
{"golden_diff": "diff --git a/pyramid/decorator.py b/pyramid/decorator.py\n--- a/pyramid/decorator.py\n+++ b/pyramid/decorator.py\n@@ -8,6 +8,16 @@\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n \n+ .. code-block:: python\n+\n+ from pyramid.decorator import reify\n+\n+ class Foo(object):\n+ @reify\n+ def jammy(self):\n+ print('jammy called')\n+ return 1\n+\n .. testsetup::\n \n from pyramid.decorator import reify\n", "issue": "reify docstring doesn't render properly\nThe testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify\n\nUnfortunately this section is essential for understanding the example.\n\n", "before_files": [{"content": "from functools import update_wrapper\n\n\nclass reify(object):\n \"\"\" Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n decorates into the instance dict after the first call, effectively\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n\n .. testsetup::\n\n from pyramid.decorator import reify\n\n class Foo(object):\n @reify\n def jammy(self):\n print('jammy called')\n return 1\n\n And usage of Foo:\n\n .. doctest::\n\n >>> f = Foo()\n >>> v = f.jammy\n jammy called\n >>> print(v)\n 1\n >>> f.jammy\n 1\n >>> # jammy func not called the second time; it replaced itself with 1\n >>> # Note: reassignment is possible\n >>> f.jammy = 2\n >>> f.jammy\n 2\n \"\"\"\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n def __get__(self, inst, objtype=None):\n if inst is None:\n return self\n val = self.wrapped(inst)\n setattr(inst, self.wrapped.__name__, val)\n return val\n\n", "path": "pyramid/decorator.py"}]}
995
150
gh_patches_debug_17650
rasdani/github-patches
git_diff
conan-io__conan-4834
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> conan export-pkg with --package-folder does not set the metadata info When an export-pkg command is used with the ``--package-folder``, the *metadata.json* is not updated with the package ID and package revision. Then, when the user tries to upload the package ir errors with: ``` ERROR: Binary package hello/1.0@user/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 not found ``` Here is a test reproducing the issue: ``` def upload_export_pkg_test(self): """ Package metadata is created when doing an export-pkg and then uploading the package """ server1 = TestServer([("*/*@*/*", "*")], [("*/*@*/*", "*")], users={"lasote": "mypass"}) servers = OrderedDict() servers["server1"] = server1 client = TestClient(servers=servers) client.save({"release/kk.lib": ""}) client.run("user lasote -r server1 -p mypass") client.run("new hello/1.0 --header") client.run("export-pkg . user/testing -pf release") client.run("upload hello/1.0@user/testing --all -r server1") ``` The original issue comes from #4768 </issue> <code> [start of conans/client/cmd/export_pkg.py] 1 import os 2 3 from conans.client import packager 4 from conans.client.graph.graph_manager import load_deps_info 5 from conans.errors import ConanException 6 from conans.model.manifest import FileTreeManifest 7 from conans.model.ref import PackageReference 8 from conans.util.files import rmdir 9 10 11 def export_pkg(cache, graph_manager, hook_manager, recorder, output, 12 ref, source_folder, build_folder, package_folder, install_folder, 13 graph_info, force): 14 15 conan_file_path = cache.conanfile(ref) 16 if not os.path.exists(conan_file_path): 17 raise ConanException("Package recipe '%s' does not exist" % str(ref)) 18 19 # The graph has to be loaded with build_mode=[ref.name], so that node is not tried 20 # to be downloaded from remotes 21 deps_graph, _ = graph_manager.load_graph(ref, None, graph_info=graph_info, build_mode=[ref.name], 22 check_updates=False, update=False, 23 remote_name=None, recorder=recorder) 24 # this is a bit tricky, but works. The root (virtual), has only 1 neighbor, 25 # which is the exported pkg 26 nodes = deps_graph.root.neighbors() 27 conanfile = nodes[0].conanfile 28 from conans.client.conan_api import existing_info_files 29 if install_folder and existing_info_files(install_folder): 30 load_deps_info(install_folder, conanfile, required=True) 31 package_id = nodes[0].package_id 32 output.info("Packaging to %s" % package_id) 33 pref = PackageReference(ref, package_id) 34 dest_package_folder = cache.package(pref, short_paths=conanfile.short_paths) 35 36 if os.path.exists(dest_package_folder): 37 if force: 38 rmdir(dest_package_folder) 39 else: 40 raise ConanException("Package already exists. Please use --force, -f to " 41 "overwrite it") 42 43 recipe_hash = cache.package_layout(ref).recipe_manifest().summary_hash 44 conanfile.info.recipe_hash = recipe_hash 45 conanfile.develop = True 46 if package_folder: 47 packager.export_pkg(conanfile, package_id, package_folder, dest_package_folder, 48 hook_manager, conan_file_path, ref) 49 else: 50 packager.create_package(conanfile, package_id, source_folder, build_folder, 51 dest_package_folder, install_folder, hook_manager, conan_file_path, 52 ref, local=True) 53 with cache.package_layout(ref).update_metadata() as metadata: 54 readed_manifest = FileTreeManifest.load(dest_package_folder) 55 metadata.packages[package_id].revision = readed_manifest.summary_hash 56 metadata.packages[package_id].recipe_revision = metadata.recipe.revision 57 58 recorder.package_exported(pref) 59 [end of conans/client/cmd/export_pkg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/client/cmd/export_pkg.py b/conans/client/cmd/export_pkg.py --- a/conans/client/cmd/export_pkg.py +++ b/conans/client/cmd/export_pkg.py @@ -50,9 +50,9 @@ packager.create_package(conanfile, package_id, source_folder, build_folder, dest_package_folder, install_folder, hook_manager, conan_file_path, ref, local=True) - with cache.package_layout(ref).update_metadata() as metadata: - readed_manifest = FileTreeManifest.load(dest_package_folder) - metadata.packages[package_id].revision = readed_manifest.summary_hash - metadata.packages[package_id].recipe_revision = metadata.recipe.revision + with cache.package_layout(ref).update_metadata() as metadata: + readed_manifest = FileTreeManifest.load(dest_package_folder) + metadata.packages[package_id].revision = readed_manifest.summary_hash + metadata.packages[package_id].recipe_revision = metadata.recipe.revision recorder.package_exported(pref)
{"golden_diff": "diff --git a/conans/client/cmd/export_pkg.py b/conans/client/cmd/export_pkg.py\n--- a/conans/client/cmd/export_pkg.py\n+++ b/conans/client/cmd/export_pkg.py\n@@ -50,9 +50,9 @@\n packager.create_package(conanfile, package_id, source_folder, build_folder,\n dest_package_folder, install_folder, hook_manager, conan_file_path,\n ref, local=True)\n- with cache.package_layout(ref).update_metadata() as metadata:\n- readed_manifest = FileTreeManifest.load(dest_package_folder)\n- metadata.packages[package_id].revision = readed_manifest.summary_hash\n- metadata.packages[package_id].recipe_revision = metadata.recipe.revision\n+ with cache.package_layout(ref).update_metadata() as metadata:\n+ readed_manifest = FileTreeManifest.load(dest_package_folder)\n+ metadata.packages[package_id].revision = readed_manifest.summary_hash\n+ metadata.packages[package_id].recipe_revision = metadata.recipe.revision\n \n recorder.package_exported(pref)\n", "issue": "conan export-pkg with --package-folder does not set the metadata info\nWhen an export-pkg command is used with the ``--package-folder``, the *metadata.json* is not updated with the package ID and package revision.\r\n\r\nThen, when the user tries to upload the package ir errors with:\r\n```\r\nERROR: Binary package hello/1.0@user/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 not found\r\n```\r\n\r\nHere is a test reproducing the issue:\r\n\r\n```\r\n def upload_export_pkg_test(self):\r\n \"\"\"\r\n Package metadata is created when doing an export-pkg and then uploading the package\r\n \"\"\"\r\n server1 = TestServer([(\"*/*@*/*\", \"*\")], [(\"*/*@*/*\", \"*\")], users={\"lasote\": \"mypass\"})\r\n servers = OrderedDict()\r\n servers[\"server1\"] = server1\r\n client = TestClient(servers=servers)\r\n client.save({\"release/kk.lib\": \"\"})\r\n client.run(\"user lasote -r server1 -p mypass\")\r\n client.run(\"new hello/1.0 --header\")\r\n client.run(\"export-pkg . user/testing -pf release\")\r\n client.run(\"upload hello/1.0@user/testing --all -r server1\")\r\n```\r\n\r\nThe original issue comes from #4768\n", "before_files": [{"content": "import os\n\nfrom conans.client import packager\nfrom conans.client.graph.graph_manager import load_deps_info\nfrom conans.errors import ConanException\nfrom conans.model.manifest import FileTreeManifest\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import rmdir\n\n\ndef export_pkg(cache, graph_manager, hook_manager, recorder, output,\n ref, source_folder, build_folder, package_folder, install_folder,\n graph_info, force):\n\n conan_file_path = cache.conanfile(ref)\n if not os.path.exists(conan_file_path):\n raise ConanException(\"Package recipe '%s' does not exist\" % str(ref))\n\n # The graph has to be loaded with build_mode=[ref.name], so that node is not tried\n # to be downloaded from remotes\n deps_graph, _ = graph_manager.load_graph(ref, None, graph_info=graph_info, build_mode=[ref.name],\n check_updates=False, update=False,\n remote_name=None, recorder=recorder)\n # this is a bit tricky, but works. The root (virtual), has only 1 neighbor,\n # which is the exported pkg\n nodes = deps_graph.root.neighbors()\n conanfile = nodes[0].conanfile\n from conans.client.conan_api import existing_info_files\n if install_folder and existing_info_files(install_folder):\n load_deps_info(install_folder, conanfile, required=True)\n package_id = nodes[0].package_id\n output.info(\"Packaging to %s\" % package_id)\n pref = PackageReference(ref, package_id)\n dest_package_folder = cache.package(pref, short_paths=conanfile.short_paths)\n\n if os.path.exists(dest_package_folder):\n if force:\n rmdir(dest_package_folder)\n else:\n raise ConanException(\"Package already exists. Please use --force, -f to \"\n \"overwrite it\")\n\n recipe_hash = cache.package_layout(ref).recipe_manifest().summary_hash\n conanfile.info.recipe_hash = recipe_hash\n conanfile.develop = True\n if package_folder:\n packager.export_pkg(conanfile, package_id, package_folder, dest_package_folder,\n hook_manager, conan_file_path, ref)\n else:\n packager.create_package(conanfile, package_id, source_folder, build_folder,\n dest_package_folder, install_folder, hook_manager, conan_file_path,\n ref, local=True)\n with cache.package_layout(ref).update_metadata() as metadata:\n readed_manifest = FileTreeManifest.load(dest_package_folder)\n metadata.packages[package_id].revision = readed_manifest.summary_hash\n metadata.packages[package_id].recipe_revision = metadata.recipe.revision\n\n recorder.package_exported(pref)\n", "path": "conans/client/cmd/export_pkg.py"}]}
1,538
224
gh_patches_debug_17967
rasdani/github-patches
git_diff
WeblateOrg__weblate-11641
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No way to set default notification setting for watched projects ### Describe the issue Thanks for this great project. On the notifications part there is some room for improvements as when you come from transifex, you get a lot of useful notifications which encourage user to finish the translation of a language when stuff changes. Would it be possible to set some defaults for the "New translation component was created" and "New string is available for translation" settings. As IMO for these settings an instant notification should be sent out. Probably some more would be nice too. ### I already tried - [X] I've read and searched [the documentation](https://docs.weblate.org/). - [X] I've searched for similar issues in this repository. ### Steps to reproduce the behavior 1. Got to /accounts/profile/#notifications__1 2. Check the component and translation notifications ### Expected behavior Some default settings to adjust, either in the GUI or by configuration variable. ### Screenshots _No response_ ### Exception traceback _No response_ ### How do you run Weblate? Docker container ### Weblate versions _No response_ ### Weblate deploy checks _No response_ ### Additional context Kind of related to #5155. </issue> <code> [start of weblate/accounts/data.py] 1 # Copyright © Michal Čihař <[email protected]> 2 # 3 # SPDX-License-Identifier: GPL-3.0-or-later 4 5 from weblate.accounts.notifications import ( 6 FREQ_INSTANT, 7 SCOPE_ADMIN, 8 SCOPE_ALL, 9 SCOPE_WATCHED, 10 ) 11 12 DEFAULT_NOTIFICATIONS = [ 13 (SCOPE_ALL, FREQ_INSTANT, "MentionCommentNotificaton"), 14 (SCOPE_WATCHED, FREQ_INSTANT, "LastAuthorCommentNotificaton"), 15 (SCOPE_WATCHED, FREQ_INSTANT, "MentionCommentNotificaton"), 16 (SCOPE_WATCHED, FREQ_INSTANT, "NewAnnouncementNotificaton"), 17 (SCOPE_ADMIN, FREQ_INSTANT, "MergeFailureNotification"), 18 (SCOPE_ADMIN, FREQ_INSTANT, "ParseErrorNotification"), 19 (SCOPE_ADMIN, FREQ_INSTANT, "NewTranslationNotificaton"), 20 (SCOPE_ADMIN, FREQ_INSTANT, "NewAlertNotificaton"), 21 (SCOPE_ADMIN, FREQ_INSTANT, "NewAnnouncementNotificaton"), 22 ] 23 24 25 def create_default_notifications(user) -> None: 26 for scope, frequency, notification in DEFAULT_NOTIFICATIONS: 27 user.subscription_set.get_or_create( 28 scope=scope, notification=notification, defaults={"frequency": frequency} 29 ) 30 [end of weblate/accounts/data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/weblate/accounts/data.py b/weblate/accounts/data.py --- a/weblate/accounts/data.py +++ b/weblate/accounts/data.py @@ -4,6 +4,7 @@ from weblate.accounts.notifications import ( FREQ_INSTANT, + FREQ_WEEKLY, SCOPE_ADMIN, SCOPE_ALL, SCOPE_WATCHED, @@ -14,6 +15,7 @@ (SCOPE_WATCHED, FREQ_INSTANT, "LastAuthorCommentNotificaton"), (SCOPE_WATCHED, FREQ_INSTANT, "MentionCommentNotificaton"), (SCOPE_WATCHED, FREQ_INSTANT, "NewAnnouncementNotificaton"), + (SCOPE_WATCHED, FREQ_WEEKLY, "NewStringNotificaton"), (SCOPE_ADMIN, FREQ_INSTANT, "MergeFailureNotification"), (SCOPE_ADMIN, FREQ_INSTANT, "ParseErrorNotification"), (SCOPE_ADMIN, FREQ_INSTANT, "NewTranslationNotificaton"),
{"golden_diff": "diff --git a/weblate/accounts/data.py b/weblate/accounts/data.py\n--- a/weblate/accounts/data.py\n+++ b/weblate/accounts/data.py\n@@ -4,6 +4,7 @@\n \n from weblate.accounts.notifications import (\n FREQ_INSTANT,\n+ FREQ_WEEKLY,\n SCOPE_ADMIN,\n SCOPE_ALL,\n SCOPE_WATCHED,\n@@ -14,6 +15,7 @@\n (SCOPE_WATCHED, FREQ_INSTANT, \"LastAuthorCommentNotificaton\"),\n (SCOPE_WATCHED, FREQ_INSTANT, \"MentionCommentNotificaton\"),\n (SCOPE_WATCHED, FREQ_INSTANT, \"NewAnnouncementNotificaton\"),\n+ (SCOPE_WATCHED, FREQ_WEEKLY, \"NewStringNotificaton\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"MergeFailureNotification\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"ParseErrorNotification\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"NewTranslationNotificaton\"),\n", "issue": "No way to set default notification setting for watched projects\n### Describe the issue\n\nThanks for this great project. On the notifications part there is some room for improvements as when you come from transifex, you get a lot of useful notifications which encourage user to finish the translation of a language when stuff changes. Would it be possible to set some defaults for the \"New translation component was created\" and \"New string is available for translation\" settings. As IMO for these settings an instant notification should be sent out. Probably some more would be nice too.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Got to /accounts/profile/#notifications__1\r\n2. Check the component and translation notifications\n\n### Expected behavior\n\nSome default settings to adjust, either in the GUI or by configuration variable.\n\n### Screenshots\n\n_No response_\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nDocker container\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\nKind of related to #5155.\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom weblate.accounts.notifications import (\n FREQ_INSTANT,\n SCOPE_ADMIN,\n SCOPE_ALL,\n SCOPE_WATCHED,\n)\n\nDEFAULT_NOTIFICATIONS = [\n (SCOPE_ALL, FREQ_INSTANT, \"MentionCommentNotificaton\"),\n (SCOPE_WATCHED, FREQ_INSTANT, \"LastAuthorCommentNotificaton\"),\n (SCOPE_WATCHED, FREQ_INSTANT, \"MentionCommentNotificaton\"),\n (SCOPE_WATCHED, FREQ_INSTANT, \"NewAnnouncementNotificaton\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"MergeFailureNotification\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"ParseErrorNotification\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"NewTranslationNotificaton\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"NewAlertNotificaton\"),\n (SCOPE_ADMIN, FREQ_INSTANT, \"NewAnnouncementNotificaton\"),\n]\n\n\ndef create_default_notifications(user) -> None:\n for scope, frequency, notification in DEFAULT_NOTIFICATIONS:\n user.subscription_set.get_or_create(\n scope=scope, notification=notification, defaults={\"frequency\": frequency}\n )\n", "path": "weblate/accounts/data.py"}]}
1,136
227
gh_patches_debug_28982
rasdani/github-patches
git_diff
bokeh__bokeh-5327
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Examples with deprecation warnings As of 0.12.3, the following examples have deprecation warnings - plotting/file/hover.py - plotting/file/tap.py </issue> <code> [start of examples/plotting/file/tap.py] 1 import itertools 2 import numpy as np 3 4 from bokeh.plotting import ColumnDataSource, figure, show, output_file 5 from bokeh.models import TapTool 6 7 xx, yy = np.meshgrid(range(0,101,4), range(0,101,4)) 8 x = xx.flatten() 9 y = yy.flatten() 10 N = len(x) 11 inds = [str(i) for i in np.arange(N)] 12 radii = np.random.random(size=N)*0.4 + 1.7 13 colors = [ 14 "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y) 15 ] 16 17 source = ColumnDataSource(data=dict( 18 x=x, 19 y=y, 20 radius=radii, 21 colors=colors, 22 foo=list(itertools.permutations("abcdef"))[:N], 23 bar=np.random.normal(size=N), 24 )) 25 26 TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,tap,save" 27 28 p = figure(title="Tappy Scatter", tools=TOOLS) 29 30 cr = p.circle(x, y, radius=radii, source=source, 31 fill_color=colors, fill_alpha=0.6, line_color=None) 32 33 tr = p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt", 34 text_baseline="middle", text_align="center") 35 36 # in the browser console, you will see messages when circles are clicked 37 tool = p.select_one(TapTool).renderers = [cr] 38 39 output_file("tap.html", title="tap.py example") 40 41 show(p) # open a browser 42 [end of examples/plotting/file/tap.py] [start of examples/plotting/file/hover.py] 1 import itertools 2 3 import numpy as np 4 5 from bokeh.plotting import ColumnDataSource, figure, show, output_file 6 from bokeh.models import HoverTool 7 8 TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,save" 9 10 xx, yy = np.meshgrid(range(0,101,4), range(0,101,4)) 11 x = xx.flatten() 12 y = yy.flatten() 13 N = len(x) 14 inds = [str(i) for i in np.arange(N)] 15 radii = np.random.random(size=N)*0.4 + 1.7 16 colors = [ 17 "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y) 18 ] 19 20 source = ColumnDataSource(data=dict( 21 x=x, 22 y=y, 23 radius=radii, 24 colors=colors, 25 foo=list(itertools.permutations("abcdef"))[:N], 26 bar=np.random.normal(size=N), 27 )) 28 29 p = figure(title="Hoverful Scatter", tools=TOOLS) 30 31 p.circle(x, y, radius=radii, source=source, 32 fill_color=colors, fill_alpha=0.6, line_color=None) 33 34 p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt", 35 text_baseline="middle", text_align="center") 36 37 hover = p.select_one(HoverTool).tooltips = [ 38 ("index", "$index"), 39 ("(x,y)", "($x, $y)"), 40 ("radius", "@radius"), 41 ("fill color", "$color[hex, swatch]:fill_color"), 42 ("foo", "@foo"), 43 ("bar", "@bar"), 44 ] 45 46 output_file("hover.html", title="hover.py example") 47 48 show(p) # open a browser 49 [end of examples/plotting/file/hover.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/plotting/file/hover.py b/examples/plotting/file/hover.py --- a/examples/plotting/file/hover.py +++ b/examples/plotting/file/hover.py @@ -28,8 +28,8 @@ p = figure(title="Hoverful Scatter", tools=TOOLS) -p.circle(x, y, radius=radii, source=source, - fill_color=colors, fill_alpha=0.6, line_color=None) +p.circle(x='x', y='y', radius='radius', source=source, + fill_color='colors', fill_alpha=0.6, line_color=None) p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt", text_baseline="middle", text_align="center") diff --git a/examples/plotting/file/tap.py b/examples/plotting/file/tap.py --- a/examples/plotting/file/tap.py +++ b/examples/plotting/file/tap.py @@ -1,7 +1,6 @@ -import itertools import numpy as np -from bokeh.plotting import ColumnDataSource, figure, show, output_file +from bokeh.plotting import figure, show, output_file from bokeh.models import TapTool xx, yy = np.meshgrid(range(0,101,4), range(0,101,4)) @@ -14,20 +13,11 @@ "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y) ] -source = ColumnDataSource(data=dict( - x=x, - y=y, - radius=radii, - colors=colors, - foo=list(itertools.permutations("abcdef"))[:N], - bar=np.random.normal(size=N), -)) - TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,tap,save" p = figure(title="Tappy Scatter", tools=TOOLS) -cr = p.circle(x, y, radius=radii, source=source, +cr = p.circle(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None) tr = p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt",
{"golden_diff": "diff --git a/examples/plotting/file/hover.py b/examples/plotting/file/hover.py\n--- a/examples/plotting/file/hover.py\n+++ b/examples/plotting/file/hover.py\n@@ -28,8 +28,8 @@\n \n p = figure(title=\"Hoverful Scatter\", tools=TOOLS)\n \n-p.circle(x, y, radius=radii, source=source,\n- fill_color=colors, fill_alpha=0.6, line_color=None)\n+p.circle(x='x', y='y', radius='radius', source=source,\n+ fill_color='colors', fill_alpha=0.6, line_color=None)\n \n p.text(x, y, text=inds, alpha=0.5, text_font_size=\"5pt\",\n text_baseline=\"middle\", text_align=\"center\")\ndiff --git a/examples/plotting/file/tap.py b/examples/plotting/file/tap.py\n--- a/examples/plotting/file/tap.py\n+++ b/examples/plotting/file/tap.py\n@@ -1,7 +1,6 @@\n-import itertools\n import numpy as np\n \n-from bokeh.plotting import ColumnDataSource, figure, show, output_file\n+from bokeh.plotting import figure, show, output_file\n from bokeh.models import TapTool\n \n xx, yy = np.meshgrid(range(0,101,4), range(0,101,4))\n@@ -14,20 +13,11 @@\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n ]\n \n-source = ColumnDataSource(data=dict(\n- x=x,\n- y=y,\n- radius=radii,\n- colors=colors,\n- foo=list(itertools.permutations(\"abcdef\"))[:N],\n- bar=np.random.normal(size=N),\n-))\n-\n TOOLS=\"crosshair,pan,wheel_zoom,box_zoom,reset,tap,save\"\n \n p = figure(title=\"Tappy Scatter\", tools=TOOLS)\n \n-cr = p.circle(x, y, radius=radii, source=source,\n+cr = p.circle(x, y, radius=radii,\n fill_color=colors, fill_alpha=0.6, line_color=None)\n \n tr = p.text(x, y, text=inds, alpha=0.5, text_font_size=\"5pt\",\n", "issue": "Examples with deprecation warnings\nAs of 0.12.3, the following examples have deprecation warnings\n- plotting/file/hover.py\n- plotting/file/tap.py\n\n", "before_files": [{"content": "import itertools\nimport numpy as np\n\nfrom bokeh.plotting import ColumnDataSource, figure, show, output_file\nfrom bokeh.models import TapTool\n\nxx, yy = np.meshgrid(range(0,101,4), range(0,101,4))\nx = xx.flatten()\ny = yy.flatten()\nN = len(x)\ninds = [str(i) for i in np.arange(N)]\nradii = np.random.random(size=N)*0.4 + 1.7\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\nsource = ColumnDataSource(data=dict(\n x=x,\n y=y,\n radius=radii,\n colors=colors,\n foo=list(itertools.permutations(\"abcdef\"))[:N],\n bar=np.random.normal(size=N),\n))\n\nTOOLS=\"crosshair,pan,wheel_zoom,box_zoom,reset,tap,save\"\n\np = figure(title=\"Tappy Scatter\", tools=TOOLS)\n\ncr = p.circle(x, y, radius=radii, source=source,\n fill_color=colors, fill_alpha=0.6, line_color=None)\n\ntr = p.text(x, y, text=inds, alpha=0.5, text_font_size=\"5pt\",\n text_baseline=\"middle\", text_align=\"center\")\n\n# in the browser console, you will see messages when circles are clicked\ntool = p.select_one(TapTool).renderers = [cr]\n\noutput_file(\"tap.html\", title=\"tap.py example\")\n\nshow(p) # open a browser\n", "path": "examples/plotting/file/tap.py"}, {"content": "import itertools\n\nimport numpy as np\n\nfrom bokeh.plotting import ColumnDataSource, figure, show, output_file\nfrom bokeh.models import HoverTool\n\nTOOLS=\"crosshair,pan,wheel_zoom,box_zoom,reset,hover,save\"\n\nxx, yy = np.meshgrid(range(0,101,4), range(0,101,4))\nx = xx.flatten()\ny = yy.flatten()\nN = len(x)\ninds = [str(i) for i in np.arange(N)]\nradii = np.random.random(size=N)*0.4 + 1.7\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\nsource = ColumnDataSource(data=dict(\n x=x,\n y=y,\n radius=radii,\n colors=colors,\n foo=list(itertools.permutations(\"abcdef\"))[:N],\n bar=np.random.normal(size=N),\n))\n\np = figure(title=\"Hoverful Scatter\", tools=TOOLS)\n\np.circle(x, y, radius=radii, source=source,\n fill_color=colors, fill_alpha=0.6, line_color=None)\n\np.text(x, y, text=inds, alpha=0.5, text_font_size=\"5pt\",\n text_baseline=\"middle\", text_align=\"center\")\n\nhover = p.select_one(HoverTool).tooltips = [\n (\"index\", \"$index\"),\n (\"(x,y)\", \"($x, $y)\"),\n (\"radius\", \"@radius\"),\n (\"fill color\", \"$color[hex, swatch]:fill_color\"),\n (\"foo\", \"@foo\"),\n (\"bar\", \"@bar\"),\n]\n\noutput_file(\"hover.html\", title=\"hover.py example\")\n\nshow(p) # open a browser\n", "path": "examples/plotting/file/hover.py"}]}
1,545
532
gh_patches_debug_96
rasdani/github-patches
git_diff
pymodbus-dev__pymodbus-1422
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> py.typed missing in pip install ### Versions - Python: 3.11 - OS: macOS - Pymodbus: 3.2.0 (pip install) - Modbus Hardware (if used): ### Description running mypy on my project complains about missing ``` artisanlib/modbusport.py:68: error: Skipping analyzing "pymodbus.constants": module is installed, but missing library stubs or py.typed marker [import] artisanlib/modbusport.py:69: error: Skipping analyzing "pymodbus.payload": module is installed, but missing library stubs or py.typed marker [import] artisanlib/modbusport.py:241: error: Skipping analyzing "pymodbus.client": module is installed, but missing library stubs or py.typed marker [import] artisanlib/modbusport.py:385: error: Skipping analyzing "pymodbus.pdu": module is installed, but missing library stubs or py.typed marker [import] ``` despite all your nice work on adding type annotations. The file py.typed id not get installed along via ``` # sudo -H python3 -m pip install pymodbus --upgrade ``` ``` # cd /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pymodbus # ls ./py.typed ls: ./py.typed: No such file or directory ``` I added it as follows and the mypy errors went away. ``` # cd /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pymodbus # sudo touch py.typed ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 """Installs pymodbus using setuptools.""" 3 4 5 # --------------------------------------------------------------------------- # 6 # initialization 7 # --------------------------------------------------------------------------- # 8 from setuptools import setup 9 10 11 dependencies = {} 12 with open("requirements.txt") as reqs: 13 option = None 14 for line in reqs.read().split("\n"): 15 if line == "": 16 option = None 17 elif line.startswith("# install:"): 18 option = line.split(":")[1] 19 dependencies[option] = [] 20 elif not line.startswith("#") and option: 21 dependencies[option].append(line) 22 23 install_req = dependencies["required"] 24 del dependencies["required"] 25 26 27 # --------------------------------------------------------------------------- # 28 # configuration 29 # --------------------------------------------------------------------------- # 30 setup( 31 install_requires=install_req, 32 extras_require=dependencies, 33 ) 34 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,4 +30,5 @@ setup( install_requires=install_req, extras_require=dependencies, + package_data={"pymodbus": ["py.typed"]}, )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,4 +30,5 @@\n setup(\n install_requires=install_req,\n extras_require=dependencies,\n+ package_data={\"pymodbus\": [\"py.typed\"]},\n )\n", "issue": "py.typed missing in pip install\n\r\n### Versions\r\n\r\n- Python: 3.11\r\n- OS: macOS\r\n- Pymodbus: 3.2.0 (pip install)\r\n- Modbus Hardware (if used):\r\n\r\n### Description\r\n\r\nrunning mypy on my project complains about missing \r\n\r\n```\r\nartisanlib/modbusport.py:68: error: Skipping analyzing \"pymodbus.constants\": module is installed, but missing library stubs or py.typed marker [import]\r\nartisanlib/modbusport.py:69: error: Skipping analyzing \"pymodbus.payload\": module is installed, but missing library stubs or py.typed marker [import]\r\nartisanlib/modbusport.py:241: error: Skipping analyzing \"pymodbus.client\": module is installed, but missing library stubs or py.typed marker [import]\r\nartisanlib/modbusport.py:385: error: Skipping analyzing \"pymodbus.pdu\": module is installed, but missing library stubs or py.typed marker [import]\r\n```\r\n\r\ndespite all your nice work on adding type annotations. The file py.typed id not get installed along via\r\n\r\n```\r\n# sudo -H python3 -m pip install pymodbus --upgrade\r\n```\r\n\r\n```\r\n# cd /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pymodbus\r\n# ls ./py.typed\r\nls: ./py.typed: No such file or directory\r\n```\r\n\r\nI added it as follows and the mypy errors went away.\r\n\r\n```\r\n# cd /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pymodbus\r\n# sudo touch py.typed\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Installs pymodbus using setuptools.\"\"\"\n\n\n# --------------------------------------------------------------------------- #\n# initialization\n# --------------------------------------------------------------------------- #\nfrom setuptools import setup\n\n\ndependencies = {}\nwith open(\"requirements.txt\") as reqs:\n option = None\n for line in reqs.read().split(\"\\n\"):\n if line == \"\":\n option = None\n elif line.startswith(\"# install:\"):\n option = line.split(\":\")[1]\n dependencies[option] = []\n elif not line.startswith(\"#\") and option:\n dependencies[option].append(line)\n\ninstall_req = dependencies[\"required\"]\ndel dependencies[\"required\"]\n\n\n# --------------------------------------------------------------------------- #\n# configuration\n# --------------------------------------------------------------------------- #\nsetup(\n install_requires=install_req,\n extras_require=dependencies,\n)\n", "path": "setup.py"}]}
1,121
65
gh_patches_debug_23502
rasdani/github-patches
git_diff
uccser__cs-unplugged-255
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create example test for templatetags Create example test for templatetags </issue> <code> [start of csunplugged/general/templatetags/render_html_field.py] 1 """Module for the custom render_html_field template tag.""" 2 3 from django import template 4 from django.template import Template, Variable, TemplateSyntaxError 5 6 7 class RenderHTMLFieldNode(template.Node): 8 """Class used for the custom render_html_field template tag.""" 9 10 def __init__(self, item_to_be_rendered): 11 """Create the RenderHTMLFieldNode object.""" 12 self.item_to_be_rendered = Variable(item_to_be_rendered) 13 14 def render(self, context): 15 """Render the text with the static template tag. 16 17 Returns: 18 Rendered string of text, or an empty string if the render 19 fails to convert. 20 """ 21 try: 22 actual_item = '{% load static %}\n' + self.item_to_be_rendered.resolve(context) 23 return Template(actual_item).render(context) 24 except template.VariableDoesNotExist: 25 return '' 26 27 28 def render_html_field(parser, token): 29 """Run when the render_html_field template tag is used. 30 31 Returns: 32 Rendered string of text, or an empty string if the render 33 fails to convert. 34 """ 35 bits = token.split_contents() 36 if len(bits) != 2: 37 raise TemplateSyntaxError("'%s' takes only one argument" 38 " (a variable representing a template to render)" % bits[0]) 39 return RenderHTMLFieldNode(bits[1]) 40 41 42 register = template.Library() 43 render_html_field = register.tag(render_html_field) 44 [end of csunplugged/general/templatetags/render_html_field.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/csunplugged/general/templatetags/render_html_field.py b/csunplugged/general/templatetags/render_html_field.py --- a/csunplugged/general/templatetags/render_html_field.py +++ b/csunplugged/general/templatetags/render_html_field.py @@ -3,6 +3,12 @@ from django import template from django.template import Template, Variable, TemplateSyntaxError +INVALID_ATTRIBUTE_MESSAGE = "The 'render_html_field' tag was given an " \ + "attribute that could not be converted to a string." + +MISSING_ATTRIBUTE_MESSAGE = "The 'render_html_field' tag was given an " \ + "attribute that does not exist." + class RenderHTMLFieldNode(template.Node): """Class used for the custom render_html_field template tag.""" @@ -21,8 +27,10 @@ try: actual_item = '{% load static %}\n' + self.item_to_be_rendered.resolve(context) return Template(actual_item).render(context) + except TypeError: + raise TemplateSyntaxError(INVALID_ATTRIBUTE_MESSAGE) except template.VariableDoesNotExist: - return '' + raise TemplateSyntaxError(MISSING_ATTRIBUTE_MESSAGE) def render_html_field(parser, token):
{"golden_diff": "diff --git a/csunplugged/general/templatetags/render_html_field.py b/csunplugged/general/templatetags/render_html_field.py\n--- a/csunplugged/general/templatetags/render_html_field.py\n+++ b/csunplugged/general/templatetags/render_html_field.py\n@@ -3,6 +3,12 @@\n from django import template\n from django.template import Template, Variable, TemplateSyntaxError\n \n+INVALID_ATTRIBUTE_MESSAGE = \"The 'render_html_field' tag was given an \" \\\n+ \"attribute that could not be converted to a string.\"\n+\n+MISSING_ATTRIBUTE_MESSAGE = \"The 'render_html_field' tag was given an \" \\\n+ \"attribute that does not exist.\"\n+\n \n class RenderHTMLFieldNode(template.Node):\n \"\"\"Class used for the custom render_html_field template tag.\"\"\"\n@@ -21,8 +27,10 @@\n try:\n actual_item = '{% load static %}\\n' + self.item_to_be_rendered.resolve(context)\n return Template(actual_item).render(context)\n+ except TypeError:\n+ raise TemplateSyntaxError(INVALID_ATTRIBUTE_MESSAGE)\n except template.VariableDoesNotExist:\n- return ''\n+ raise TemplateSyntaxError(MISSING_ATTRIBUTE_MESSAGE)\n \n \n def render_html_field(parser, token):\n", "issue": "Create example test for templatetags\n\nCreate example test for templatetags\n\n", "before_files": [{"content": "\"\"\"Module for the custom render_html_field template tag.\"\"\"\n\nfrom django import template\nfrom django.template import Template, Variable, TemplateSyntaxError\n\n\nclass RenderHTMLFieldNode(template.Node):\n \"\"\"Class used for the custom render_html_field template tag.\"\"\"\n\n def __init__(self, item_to_be_rendered):\n \"\"\"Create the RenderHTMLFieldNode object.\"\"\"\n self.item_to_be_rendered = Variable(item_to_be_rendered)\n\n def render(self, context):\n \"\"\"Render the text with the static template tag.\n\n Returns:\n Rendered string of text, or an empty string if the render\n fails to convert.\n \"\"\"\n try:\n actual_item = '{% load static %}\\n' + self.item_to_be_rendered.resolve(context)\n return Template(actual_item).render(context)\n except template.VariableDoesNotExist:\n return ''\n\n\ndef render_html_field(parser, token):\n \"\"\"Run when the render_html_field template tag is used.\n\n Returns:\n Rendered string of text, or an empty string if the render\n fails to convert.\n \"\"\"\n bits = token.split_contents()\n if len(bits) != 2:\n raise TemplateSyntaxError(\"'%s' takes only one argument\"\n \" (a variable representing a template to render)\" % bits[0])\n return RenderHTMLFieldNode(bits[1])\n\n\nregister = template.Library()\nrender_html_field = register.tag(render_html_field)\n", "path": "csunplugged/general/templatetags/render_html_field.py"}]}
953
275
gh_patches_debug_10305
rasdani/github-patches
git_diff
scikit-hep__pyhf-1242
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add pyhf.writexml and pyhf.readxml to public API docs # Description As first brought up in PR #1125, `pyhf.writexml` and `pyhf.readxml` are not currently documented in the public Python API docs. This should get fixed. </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major 7 'tensorflow-probability~=0.10.0', 8 ], 9 'torch': ['torch~=1.2'], 10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'], 11 'xmlio': [ 12 'uproot3~=3.14', 13 'uproot~=4.0', 14 ], # uproot3 required until writing to ROOT supported in uproot4 15 'minuit': ['iminuit~=2.1'], 16 } 17 extras_require['backends'] = sorted( 18 set( 19 extras_require['tensorflow'] 20 + extras_require['torch'] 21 + extras_require['jax'] 22 + extras_require['minuit'] 23 ) 24 ) 25 extras_require['contrib'] = sorted({'matplotlib', 'requests'}) 26 extras_require['lint'] = sorted({'flake8', 'black'}) 27 28 extras_require['test'] = sorted( 29 set( 30 extras_require['backends'] 31 + extras_require['xmlio'] 32 + extras_require['contrib'] 33 + extras_require['shellcomplete'] 34 + [ 35 'pytest~=6.0', 36 'pytest-cov>=2.5.1', 37 'pytest-mock', 38 'pytest-benchmark[histogram]', 39 'pytest-console-scripts', 40 'pytest-mpl', 41 'pydocstyle', 42 'coverage>=4.0', # coveralls 43 'papermill~=2.0', 44 'nteract-scrapbook~=0.2', 45 'jupyter', 46 'graphviz', 47 'jsonpatch', 48 ] 49 ) 50 ) 51 extras_require['docs'] = sorted( 52 { 53 'sphinx>=3.1.2', 54 'sphinxcontrib-bibtex~=1.0', 55 'sphinx-click', 56 'sphinx_rtd_theme', 57 'nbsphinx', 58 'ipywidgets', 59 'sphinx-issues', 60 'sphinx-copybutton>0.2.9', 61 } 62 ) 63 extras_require['develop'] = sorted( 64 set( 65 extras_require['docs'] 66 + extras_require['lint'] 67 + extras_require['test'] 68 + [ 69 'nbdime', 70 'bump2version', 71 'ipython', 72 'pre-commit', 73 'check-manifest', 74 'codemetapy>=0.3.4', 75 'twine', 76 ] 77 ) 78 ) 79 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 80 81 82 setup( 83 extras_require=extras_require, 84 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 85 ) 86 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -49,16 +49,19 @@ ) ) extras_require['docs'] = sorted( - { - 'sphinx>=3.1.2', - 'sphinxcontrib-bibtex~=1.0', - 'sphinx-click', - 'sphinx_rtd_theme', - 'nbsphinx', - 'ipywidgets', - 'sphinx-issues', - 'sphinx-copybutton>0.2.9', - } + set( + extras_require['xmlio'] + + [ + 'sphinx>=3.1.2', + 'sphinxcontrib-bibtex~=1.0', + 'sphinx-click', + 'sphinx_rtd_theme', + 'nbsphinx', + 'ipywidgets', + 'sphinx-issues', + 'sphinx-copybutton>0.2.9', + ] + ) ) extras_require['develop'] = sorted( set(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,16 +49,19 @@\n )\n )\n extras_require['docs'] = sorted(\n- {\n- 'sphinx>=3.1.2',\n- 'sphinxcontrib-bibtex~=1.0',\n- 'sphinx-click',\n- 'sphinx_rtd_theme',\n- 'nbsphinx',\n- 'ipywidgets',\n- 'sphinx-issues',\n- 'sphinx-copybutton>0.2.9',\n- }\n+ set(\n+ extras_require['xmlio']\n+ + [\n+ 'sphinx>=3.1.2',\n+ 'sphinxcontrib-bibtex~=1.0',\n+ 'sphinx-click',\n+ 'sphinx_rtd_theme',\n+ 'nbsphinx',\n+ 'ipywidgets',\n+ 'sphinx-issues',\n+ 'sphinx-copybutton>0.2.9',\n+ ]\n+ )\n )\n extras_require['develop'] = sorted(\n set(\n", "issue": "Add pyhf.writexml and pyhf.readxml to public API docs\n# Description\r\n\r\nAs first brought up in PR #1125, `pyhf.writexml` and `pyhf.readxml` are not currently documented in the public Python API docs. This should get fixed.\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n {\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=1.0',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n }\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,342
240