problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_31114
rasdani/github-patches
git_diff
bridgecrewio__checkov-2154
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_174 incorrect reporting CKV_AWS_174 is being triggered in our terraform code even though we have the viewer certificate set to use TLSv.1.2. Snippet of our code here: viewer_certificate { acm_certificate_arn = aws_acm_certificate.cert.arn ssl_support_method = "sni-only" minimum_protocol_version = "TLSv1.2_2019" } Steps to reproduce the behavior: Running checkov on our terraform code **Expected behavior** This check should be passed **Additional context** It looks to me like the issue is in the code between lines 17 and 19. I dont think based on the terraform documentation and the if statements that it would ever pass if using an acm certificate https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py </issue> <code> [start of checkov/terraform/checks/resource/aws/CloudfrontTLS12.py] 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class CloudFrontTLS12(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Verify CloudFront Distribution Viewer Certificate is using TLS v1.2" 8 id = "CKV_AWS_174" 9 supported_resources = ["aws_cloudfront_distribution"] 10 categories = [CheckCategories.ENCRYPTION] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def scan_resource_conf(self, conf): 14 if "viewer_certificate" in conf.keys(): 15 # check if cloudfront_default_certificate is true then this could use less than tls 1.2 16 viewer_certificate = conf["viewer_certificate"][0] 17 if 'cloudfront_default_certificate' in viewer_certificate: 18 #is not using the default certificate 19 if viewer_certificate["cloudfront_default_certificate"] is not True: 20 #these protocol versions 21 if "minimum_protocol_version" in viewer_certificate: 22 protocol=viewer_certificate["minimum_protocol_version"][0] 23 if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']: 24 return CheckResult.PASSED 25 26 #No cert specified so using default which can be less that tls 1.2 27 return CheckResult.FAILED 28 29 def get_inspected_key(self): 30 31 return "viewer_certificate/[0]/minimum_protocol_version" 32 33 def get_expected_values(self): 34 return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021'] 35 36 37 check = CloudFrontTLS12() [end of checkov/terraform/checks/resource/aws/CloudfrontTLS12.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py --- a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py +++ b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py @@ -12,18 +12,18 @@ def scan_resource_conf(self, conf): if "viewer_certificate" in conf.keys(): - # check if cloudfront_default_certificate is true then this could use less than tls 1.2 viewer_certificate = conf["viewer_certificate"][0] - if 'cloudfront_default_certificate' in viewer_certificate: - #is not using the default certificate - if viewer_certificate["cloudfront_default_certificate"] is not True: - #these protocol versions - if "minimum_protocol_version" in viewer_certificate: - protocol=viewer_certificate["minimum_protocol_version"][0] - if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']: - return CheckResult.PASSED - - #No cert specified so using default which can be less that tls 1.2 + # check if cloudfront_default_certificate is true then this could use less than tls 1.2 + if ("cloudfront_default_certificate" in viewer_certificate and viewer_certificate + ["cloudfront_default_certificate"][0] is not True) or ( + 'minimum_protocol_version' in viewer_certificate): + # is not using the default certificate + if 'minimum_protocol_version' in viewer_certificate: + protocol = viewer_certificate["minimum_protocol_version"][0] + # these protocol versions + if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']: + return CheckResult.PASSED + # No cert specified so using default which can be less that tls 1.2 return CheckResult.FAILED def get_inspected_key(self): @@ -34,4 +34,4 @@ return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021'] -check = CloudFrontTLS12() \ No newline at end of file +check = CloudFrontTLS12()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\n--- a/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\n+++ b/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\n@@ -12,18 +12,18 @@\n \n def scan_resource_conf(self, conf):\n if \"viewer_certificate\" in conf.keys():\n- # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n viewer_certificate = conf[\"viewer_certificate\"][0]\n- if 'cloudfront_default_certificate' in viewer_certificate:\n- #is not using the default certificate\n- if viewer_certificate[\"cloudfront_default_certificate\"] is not True:\n- #these protocol versions\n- if \"minimum_protocol_version\" in viewer_certificate:\n- protocol=viewer_certificate[\"minimum_protocol_version\"][0]\n- if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n- return CheckResult.PASSED\n-\n- #No cert specified so using default which can be less that tls 1.2\n+ # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n+ if (\"cloudfront_default_certificate\" in viewer_certificate and viewer_certificate\n+ [\"cloudfront_default_certificate\"][0] is not True) or (\n+ 'minimum_protocol_version' in viewer_certificate):\n+ # is not using the default certificate\n+ if 'minimum_protocol_version' in viewer_certificate:\n+ protocol = viewer_certificate[\"minimum_protocol_version\"][0]\n+ # these protocol versions\n+ if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n+ return CheckResult.PASSED\n+ # No cert specified so using default which can be less that tls 1.2\n return CheckResult.FAILED\n \n def get_inspected_key(self):\n@@ -34,4 +34,4 @@\n return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']\n \n \n-check = CloudFrontTLS12()\n\\ No newline at end of file\n+check = CloudFrontTLS12()\n", "issue": "CKV_AWS_174 incorrect reporting\nCKV_AWS_174 is being triggered in our terraform code even though we have the viewer certificate set to use TLSv.1.2. Snippet of our code here:\r\n\r\nviewer_certificate {\r\n acm_certificate_arn = aws_acm_certificate.cert.arn\r\n ssl_support_method = \"sni-only\"\r\n minimum_protocol_version = \"TLSv1.2_2019\" \r\n}\r\n\r\n\r\nSteps to reproduce the behavior:\r\nRunning checkov on our terraform code\r\n\r\n**Expected behavior**\r\nThis check should be passed\r\n\r\n\r\n\r\n**Additional context**\r\nIt looks to me like the issue is in the code between lines 17 and 19. I dont think based on the terraform documentation and the if statements that it would ever pass if using an acm certificate\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/CloudfrontTLS12.py\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass CloudFrontTLS12(BaseResourceValueCheck):\n def __init__(self):\n name = \"Verify CloudFront Distribution Viewer Certificate is using TLS v1.2\"\n id = \"CKV_AWS_174\"\n supported_resources = [\"aws_cloudfront_distribution\"]\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if \"viewer_certificate\" in conf.keys():\n # check if cloudfront_default_certificate is true then this could use less than tls 1.2\n viewer_certificate = conf[\"viewer_certificate\"][0]\n if 'cloudfront_default_certificate' in viewer_certificate:\n #is not using the default certificate\n if viewer_certificate[\"cloudfront_default_certificate\"] is not True:\n #these protocol versions\n if \"minimum_protocol_version\" in viewer_certificate:\n protocol=viewer_certificate[\"minimum_protocol_version\"][0]\n if protocol in ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']:\n return CheckResult.PASSED\n\n #No cert specified so using default which can be less that tls 1.2\n return CheckResult.FAILED\n\n def get_inspected_key(self):\n\n return \"viewer_certificate/[0]/minimum_protocol_version\"\n\n def get_expected_values(self):\n return ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021']\n\n\ncheck = CloudFrontTLS12()", "path": "checkov/terraform/checks/resource/aws/CloudfrontTLS12.py"}]}
1,217
552
gh_patches_debug_5802
rasdani/github-patches
git_diff
akvo__akvo-rsr-4094
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Investigate creation of duplicate user accounts with differently cased emails - [ ] Verify that lookups using email are using `__iexact` or something like that. - [ ] Figure out a plan for existing duplicates </issue> <code> [start of akvo/rest/views/utils.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 5 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 6 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 7 8 from django.conf import settings 9 from django.contrib.auth import get_user_model 10 from django.core.cache import cache 11 from django.utils.cache import get_cache_key, _generate_cache_header_key 12 from django.db import IntegrityError 13 14 15 def get_cached_data(request, key_prefix, data, serializer): 16 """Function to get serialized data from the cache based on the request.""" 17 cache_header_key = _generate_cache_header_key(key_prefix, request) 18 if cache.get(cache_header_key) is None: 19 cache.set(cache_header_key, [], None) 20 21 cache_key = get_cache_key(request, key_prefix) 22 cached_data = cache.get(cache_key, None) 23 cache_used = True 24 if not cached_data and data is not None: 25 cache_used = False 26 cached_data = serializer(data, many=True).data 27 cache.set(cache_key, cached_data) 28 29 return cached_data, cache_used 30 31 32 def set_cached_data(request, key_prefix, data): 33 """Function to save data to the cache based on the request.""" 34 35 cache_header_key = _generate_cache_header_key(key_prefix, request) 36 if cache.get(cache_header_key) is None: 37 cache.set(cache_header_key, [], None) 38 39 cache_key = get_cache_key(request, key_prefix) 40 cache.set(cache_key, data) 41 42 43 def get_qs_elements_for_page(qs, request, count): 44 """Return queryset elements to be shown on the current page""" 45 limit = int_or_none(request.GET.get('limit')) or settings.PROJECT_DIRECTORY_PAGE_SIZES[0] 46 limit = min(limit, settings.PROJECT_DIRECTORY_PAGE_SIZES[-1]) 47 max_page_number = 1 + int(count / limit) 48 page_number = min(max_page_number, int_or_none(request.GET.get('page')) or 1) 49 start = (page_number - 1) * limit 50 end = page_number * limit 51 return qs[start:end] 52 53 54 def int_or_none(value): 55 """Return int or None given a value.""" 56 try: 57 return int(value) 58 except Exception: 59 return None 60 61 62 def create_invited_user(email): 63 User = get_user_model() 64 # Check if the user already exists, based on the email address 65 try: 66 invited_user = User.objects.get(email=email) 67 except User.DoesNotExist: 68 try: 69 invited_user = User.objects.create_user(username=email, email=email) 70 except IntegrityError: 71 return None 72 return invited_user 73 [end of akvo/rest/views/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/utils.py b/akvo/rest/views/utils.py --- a/akvo/rest/views/utils.py +++ b/akvo/rest/views/utils.py @@ -63,7 +63,7 @@ User = get_user_model() # Check if the user already exists, based on the email address try: - invited_user = User.objects.get(email=email) + invited_user = User.objects.get(email__iexact=email) except User.DoesNotExist: try: invited_user = User.objects.create_user(username=email, email=email)
{"golden_diff": "diff --git a/akvo/rest/views/utils.py b/akvo/rest/views/utils.py\n--- a/akvo/rest/views/utils.py\n+++ b/akvo/rest/views/utils.py\n@@ -63,7 +63,7 @@\n User = get_user_model()\n # Check if the user already exists, based on the email address\n try:\n- invited_user = User.objects.get(email=email)\n+ invited_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n try:\n invited_user = User.objects.create_user(username=email, email=email)\n", "issue": "Investigate creation of duplicate user accounts with differently cased emails\n- [ ] Verify that lookups using email are using `__iexact` or something like that. \n- [ ] Figure out a plan for existing duplicates\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.utils.cache import get_cache_key, _generate_cache_header_key\nfrom django.db import IntegrityError\n\n\ndef get_cached_data(request, key_prefix, data, serializer):\n \"\"\"Function to get serialized data from the cache based on the request.\"\"\"\n cache_header_key = _generate_cache_header_key(key_prefix, request)\n if cache.get(cache_header_key) is None:\n cache.set(cache_header_key, [], None)\n\n cache_key = get_cache_key(request, key_prefix)\n cached_data = cache.get(cache_key, None)\n cache_used = True\n if not cached_data and data is not None:\n cache_used = False\n cached_data = serializer(data, many=True).data\n cache.set(cache_key, cached_data)\n\n return cached_data, cache_used\n\n\ndef set_cached_data(request, key_prefix, data):\n \"\"\"Function to save data to the cache based on the request.\"\"\"\n\n cache_header_key = _generate_cache_header_key(key_prefix, request)\n if cache.get(cache_header_key) is None:\n cache.set(cache_header_key, [], None)\n\n cache_key = get_cache_key(request, key_prefix)\n cache.set(cache_key, data)\n\n\ndef get_qs_elements_for_page(qs, request, count):\n \"\"\"Return queryset elements to be shown on the current page\"\"\"\n limit = int_or_none(request.GET.get('limit')) or settings.PROJECT_DIRECTORY_PAGE_SIZES[0]\n limit = min(limit, settings.PROJECT_DIRECTORY_PAGE_SIZES[-1])\n max_page_number = 1 + int(count / limit)\n page_number = min(max_page_number, int_or_none(request.GET.get('page')) or 1)\n start = (page_number - 1) * limit\n end = page_number * limit\n return qs[start:end]\n\n\ndef int_or_none(value):\n \"\"\"Return int or None given a value.\"\"\"\n try:\n return int(value)\n except Exception:\n return None\n\n\ndef create_invited_user(email):\n User = get_user_model()\n # Check if the user already exists, based on the email address\n try:\n invited_user = User.objects.get(email=email)\n except User.DoesNotExist:\n try:\n invited_user = User.objects.create_user(username=email, email=email)\n except IntegrityError:\n return None\n return invited_user\n", "path": "akvo/rest/views/utils.py"}]}
1,306
125
gh_patches_debug_7429
rasdani/github-patches
git_diff
cloudtools__troposphere-457
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Elasticsearch Domain DomainName shouldn't be required According to the CF documentation, `DomainName` isn't required: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html </issue> <code> [start of troposphere/elasticsearch.py] 1 # Copyright (c) 2012-2015, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSProperty, AWSObject 7 from .validators import boolean, integer, integer_range, positive_integer 8 9 VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1') 10 11 try: 12 from awacs.aws import Policy 13 policytypes = (dict, Policy) 14 except ImportError: 15 policytypes = dict, 16 17 18 def validate_volume_type(volume_type): 19 """Validate VolumeType for ElasticsearchDomain""" 20 if volume_type not in VALID_VOLUME_TYPES: 21 raise ValueError("Elasticsearch Domain VolumeType must be one of: %s" % 22 ", ".join(VALID_VOLUME_TYPES)) 23 return volume_type 24 25 26 class EBSOptions(AWSProperty): 27 props = { 28 'EBSEnabled': (boolean, False), 29 'Iops': (positive_integer, False), 30 'VolumeSize': (integer, False), 31 'VolumeType': (validate_volume_type, False) 32 } 33 34 def validate(self): 35 volume_type = self.properties.get('VolumeType') 36 iops = self.properties.get('Iops') 37 if volume_type == 'io1' and not iops: 38 raise ValueError("Must specify Iops if VolumeType is 'io1'.") 39 40 41 class ElasticsearchClusterConfig(AWSProperty): 42 props = { 43 'DedicatedMasterCount': (integer, False), 44 'DedicatedMasterEnabled': (boolean, False), 45 'DedicatedMasterType': (basestring, False), 46 'InstanceCount': (integer, False), 47 'InstanceType': (basestring, False), 48 'ZoneAwarenessEnabled': (boolean, False) 49 } 50 51 52 class SnapshotOptions(AWSProperty): 53 props = { 54 'AutomatedSnapshotStartHour': (integer_range(0, 23), False) 55 } 56 57 58 class ElasticsearchDomain(AWSObject): 59 resource_type = "AWS::Elasticsearch::Domain" 60 61 props = { 62 'AccessPolicies': (policytypes, False), 63 'AdvancedOptions': (dict, False), 64 'DomainName': (basestring, True), 65 'EBSOptions': (EBSOptions, False), 66 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False), 67 'SnapshotOptions': (SnapshotOptions, False), 68 'Tags': (list, False) 69 } 70 [end of troposphere/elasticsearch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py --- a/troposphere/elasticsearch.py +++ b/troposphere/elasticsearch.py @@ -61,7 +61,7 @@ props = { 'AccessPolicies': (policytypes, False), 'AdvancedOptions': (dict, False), - 'DomainName': (basestring, True), + 'DomainName': (basestring, False), 'EBSOptions': (EBSOptions, False), 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False), 'SnapshotOptions': (SnapshotOptions, False),
{"golden_diff": "diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py\n--- a/troposphere/elasticsearch.py\n+++ b/troposphere/elasticsearch.py\n@@ -61,7 +61,7 @@\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n- 'DomainName': (basestring, True),\n+ 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n", "issue": "Elasticsearch Domain DomainName shouldn't be required\nAccording to the CF documentation, `DomainName` isn't required: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html\n\n", "before_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass ElasticsearchDomain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'DomainName': (basestring, True),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': (list, False)\n }\n", "path": "troposphere/elasticsearch.py"}]}
1,236
140
gh_patches_debug_6167
rasdani/github-patches
git_diff
mesonbuild__meson-2462
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MSI installed meson fails to rerun in visual studio Initially, I ran `meson build` from the source code directory `xxx` to create the build directory. Later, if any `meson.build` files are modified, Visual studio fails to rerun Meson with the backtrace below. Meson is installed with MSI. It works with ninja as backend. It also works if meson isn't installed with MSI. It seems like `mesonscript` in `regen_checker` is invalid when meson is installed with MSI. ``` >meson.exe : error : unrecognized arguments: --internal regenerate C:\Users\niklas\Documents\git\xxx C:\Users\niklas\Documents\git\xxx 1> Traceback (most recent call last): 1> File "C:\Users\niklas\AppData\Local\Programs\Python\Python36-32\lib\site-packages\cx_Freeze\initscripts\__startup__.py", line 14, in run 1> module.run() 1> File "C:\Users\niklas\AppData\Local\Programs\Python\Python36-32\lib\site-packages\cx_Freeze\initscripts\Console.py", line 26, in run 1> exec(code, m.__dict__) 1> File "meson.py", line 37, in <module> 1> File "meson.py", line 34, in main 1> File "mesonbuild\mesonmain.py", line 311, in run 1> File "mesonbuild\mesonmain.py", line 278, in run_script_command 1> File "mesonbuild\scripts\regen_checker.py", line 56, in run 1> File "mesonbuild\scripts\regen_checker.py", line 42, in regen 1> File "C:\Users\niklas\AppData\Local\Programs\Python\Python36-32\lib\subprocess.py", line 291, in check_call 1> raise CalledProcessError(retcode, cmd) 1> subprocess.CalledProcessError: Command '['C:\\Program Files\\Meson\\meson.exe', 'C:\\Users\\niklas\\Documents\\git\\xxx\\meson', '--internal', 'regenerate', 'C:\\Users\\niklas\\Documents\\git\\xxx\\build', 'C:\\Users\\niklas\\Documents\\git\\xxx', '--backend=vs2015']' returned non-zero exit status 2. ``` </issue> <code> [start of mesonbuild/scripts/regen_checker.py] 1 # Copyright 2015-2016 The Meson development team 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import sys, os 16 import pickle, subprocess 17 18 # This could also be used for XCode. 19 20 def need_regen(regeninfo, regen_timestamp): 21 for i in regeninfo.depfiles: 22 curfile = os.path.join(regeninfo.build_dir, i) 23 curtime = os.stat(curfile).st_mtime 24 if curtime > regen_timestamp: 25 return True 26 # The timestamp file gets automatically deleted by MSBuild during a 'Clean' build. 27 # We must make sure to recreate it, even if we do not regenerate the solution. 28 # Otherwise, Visual Studio will always consider the REGEN project out of date. 29 print("Everything is up-to-date, regeneration of build files is not needed.") 30 from ..backend.vs2010backend import Vs2010Backend 31 Vs2010Backend.touch_regen_timestamp(regeninfo.build_dir) 32 return False 33 34 def regen(regeninfo, mesonscript, backend): 35 cmd = [sys.executable, 36 mesonscript, 37 '--internal', 38 'regenerate', 39 regeninfo.build_dir, 40 regeninfo.source_dir, 41 '--backend=' + backend] 42 subprocess.check_call(cmd) 43 44 def run(args): 45 private_dir = args[0] 46 dumpfile = os.path.join(private_dir, 'regeninfo.dump') 47 coredata = os.path.join(private_dir, 'coredata.dat') 48 with open(dumpfile, 'rb') as f: 49 regeninfo = pickle.load(f) 50 with open(coredata, 'rb') as f: 51 coredata = pickle.load(f) 52 mesonscript = coredata.meson_script_launcher 53 backend = coredata.get_builtin_option('backend') 54 regen_timestamp = os.stat(dumpfile).st_mtime 55 if need_regen(regeninfo, regen_timestamp): 56 regen(regeninfo, mesonscript, backend) 57 sys.exit(0) 58 59 if __name__ == '__main__': 60 run(sys.argv[1:]) 61 [end of mesonbuild/scripts/regen_checker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mesonbuild/scripts/regen_checker.py b/mesonbuild/scripts/regen_checker.py --- a/mesonbuild/scripts/regen_checker.py +++ b/mesonbuild/scripts/regen_checker.py @@ -32,9 +32,11 @@ return False def regen(regeninfo, mesonscript, backend): - cmd = [sys.executable, - mesonscript, - '--internal', + if sys.executable.lower().endswith('meson.exe'): + cmd_exe = [sys.executable] + else: + cmd_exe = [sys.executable, mesonscript] + cmd = cmd_exe + ['--internal', 'regenerate', regeninfo.build_dir, regeninfo.source_dir,
{"golden_diff": "diff --git a/mesonbuild/scripts/regen_checker.py b/mesonbuild/scripts/regen_checker.py\n--- a/mesonbuild/scripts/regen_checker.py\n+++ b/mesonbuild/scripts/regen_checker.py\n@@ -32,9 +32,11 @@\n return False\n \n def regen(regeninfo, mesonscript, backend):\n- cmd = [sys.executable,\n- mesonscript,\n- '--internal',\n+ if sys.executable.lower().endswith('meson.exe'):\n+ cmd_exe = [sys.executable]\n+ else:\n+ cmd_exe = [sys.executable, mesonscript]\n+ cmd = cmd_exe + ['--internal',\n 'regenerate',\n regeninfo.build_dir,\n regeninfo.source_dir,\n", "issue": "MSI installed meson fails to rerun in visual studio\nInitially, I ran `meson build` from the source code directory `xxx` to create the build directory.\r\nLater, if any `meson.build` files are modified, Visual studio fails to rerun Meson with the backtrace below. Meson is installed with MSI. It works with ninja as backend. It also works if meson isn't installed with MSI.\r\n\r\nIt seems like `mesonscript` in `regen_checker` is invalid when meson is installed with MSI.\r\n\r\n```\r\n>meson.exe : error : unrecognized arguments: --internal regenerate C:\\Users\\niklas\\Documents\\git\\xxx C:\\Users\\niklas\\Documents\\git\\xxx\r\n1> Traceback (most recent call last):\r\n1> File \"C:\\Users\\niklas\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\cx_Freeze\\initscripts\\__startup__.py\", line 14, in run\r\n1> module.run()\r\n1> File \"C:\\Users\\niklas\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\cx_Freeze\\initscripts\\Console.py\", line 26, in run\r\n1> exec(code, m.__dict__)\r\n1> File \"meson.py\", line 37, in <module>\r\n1> File \"meson.py\", line 34, in main\r\n1> File \"mesonbuild\\mesonmain.py\", line 311, in run\r\n1> File \"mesonbuild\\mesonmain.py\", line 278, in run_script_command\r\n1> File \"mesonbuild\\scripts\\regen_checker.py\", line 56, in run\r\n1> File \"mesonbuild\\scripts\\regen_checker.py\", line 42, in regen\r\n1> File \"C:\\Users\\niklas\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\subprocess.py\", line 291, in check_call\r\n1> raise CalledProcessError(retcode, cmd)\r\n1> subprocess.CalledProcessError: Command '['C:\\\\Program Files\\\\Meson\\\\meson.exe', 'C:\\\\Users\\\\niklas\\\\Documents\\\\git\\\\xxx\\\\meson', '--internal', 'regenerate', 'C:\\\\Users\\\\niklas\\\\Documents\\\\git\\\\xxx\\\\build', 'C:\\\\Users\\\\niklas\\\\Documents\\\\git\\\\xxx', '--backend=vs2015']' returned non-zero exit status 2.\r\n```\n", "before_files": [{"content": "# Copyright 2015-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys, os\nimport pickle, subprocess\n\n# This could also be used for XCode.\n\ndef need_regen(regeninfo, regen_timestamp):\n for i in regeninfo.depfiles:\n curfile = os.path.join(regeninfo.build_dir, i)\n curtime = os.stat(curfile).st_mtime\n if curtime > regen_timestamp:\n return True\n # The timestamp file gets automatically deleted by MSBuild during a 'Clean' build.\n # We must make sure to recreate it, even if we do not regenerate the solution.\n # Otherwise, Visual Studio will always consider the REGEN project out of date.\n print(\"Everything is up-to-date, regeneration of build files is not needed.\")\n from ..backend.vs2010backend import Vs2010Backend\n Vs2010Backend.touch_regen_timestamp(regeninfo.build_dir)\n return False\n\ndef regen(regeninfo, mesonscript, backend):\n cmd = [sys.executable,\n mesonscript,\n '--internal',\n 'regenerate',\n regeninfo.build_dir,\n regeninfo.source_dir,\n '--backend=' + backend]\n subprocess.check_call(cmd)\n\ndef run(args):\n private_dir = args[0]\n dumpfile = os.path.join(private_dir, 'regeninfo.dump')\n coredata = os.path.join(private_dir, 'coredata.dat')\n with open(dumpfile, 'rb') as f:\n regeninfo = pickle.load(f)\n with open(coredata, 'rb') as f:\n coredata = pickle.load(f)\n mesonscript = coredata.meson_script_launcher\n backend = coredata.get_builtin_option('backend')\n regen_timestamp = os.stat(dumpfile).st_mtime\n if need_regen(regeninfo, regen_timestamp):\n regen(regeninfo, mesonscript, backend)\n sys.exit(0)\n\nif __name__ == '__main__':\n run(sys.argv[1:])\n", "path": "mesonbuild/scripts/regen_checker.py"}]}
1,790
168
gh_patches_debug_5400
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2874
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider tgifridays is broken During the global build at 2021-05-26-14-42-23, spider **tgifridays** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tgifridays.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson)) </issue> <code> [start of locations/spiders/tgifridays.py] 1 # -*- coding: utf-8 -*- 2 import datetime 3 import re 4 import json 5 6 import scrapy 7 from locations.items import GeojsonPointItem 8 from locations.hours import OpeningHours 9 10 11 DAY_MAPPING = { 12 'Monday': 'Mo', 13 'Tuesday': 'Tu', 14 'Wednesday': 'We', 15 'Thursday': 'Th', 16 'Friday': 'Fr', 17 'Saturday': 'Sa', 18 'Sunday': 'Su' 19 } 20 21 22 class TGIFridaySpider(scrapy.Spider): 23 download_delay = 0.2 24 name = "tgifridays" 25 item_attributes = { 'brand': "TGI Friday's" } 26 allowed_domains = ["tgifridays.com"] 27 start_urls = ( 28 'https://locations.tgifridays.com/sitemap.xml', 29 ) 30 31 def parse_hours(self, hours): 32 opening_hours = OpeningHours() 33 34 for hour in hours: 35 if hour["opens"] == "Closed": 36 continue 37 elif hour["closes"] == "Closed": 38 continue 39 else: 40 opening_hours.add_range( 41 day=hour["dayOfWeek"].replace('http://schema.org/', '')[:2], 42 open_time=hour["opens"], 43 close_time=hour["closes"], 44 time_format='%I:%M%p', 45 ) 46 47 return opening_hours.as_opening_hours() 48 49 def parse_store(self, response): 50 # The JSON blob has an extra "}\r\n" at the end 51 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first()[:-3]) 52 53 properties = { 54 'addr_full': data['address']['streetAddress'], 55 'phone': data['telephone'], 56 'city': data['address']['addressLocality'], 57 'state': data['address']['addressRegion'], 58 'postcode': data['address']['postalCode'], 59 'country': data['address']['addressCountry'], 60 'ref': data['@id'], 61 'website': data['url'], 62 'lat': data['geo']['latitude'], 63 'lon': data['geo']['longitude'], 64 'name': data['name'], 65 } 66 67 hours = self.parse_hours(data.get("openingHoursSpecification", [])) 68 if hours: 69 properties["opening_hours"] = hours 70 71 yield GeojsonPointItem(**properties) 72 73 def parse(self, response): 74 response.selector.remove_namespaces() 75 city_urls = response.xpath('//url/loc/text()').extract() 76 for path in city_urls: 77 if path.count('/') == 5: 78 yield scrapy.Request( 79 path.strip(), 80 callback=self.parse_store, 81 ) 82 [end of locations/spiders/tgifridays.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/tgifridays.py b/locations/spiders/tgifridays.py --- a/locations/spiders/tgifridays.py +++ b/locations/spiders/tgifridays.py @@ -32,9 +32,9 @@ opening_hours = OpeningHours() for hour in hours: - if hour["opens"] == "Closed": + if hour["opens"] in ("Closed", ""): continue - elif hour["closes"] == "Closed": + elif hour["closes"] in ("Closed", ""): continue else: opening_hours.add_range(
{"golden_diff": "diff --git a/locations/spiders/tgifridays.py b/locations/spiders/tgifridays.py\n--- a/locations/spiders/tgifridays.py\n+++ b/locations/spiders/tgifridays.py\n@@ -32,9 +32,9 @@\n opening_hours = OpeningHours()\n \n for hour in hours:\n- if hour[\"opens\"] == \"Closed\":\n+ if hour[\"opens\"] in (\"Closed\", \"\"):\n continue\n- elif hour[\"closes\"] == \"Closed\":\n+ elif hour[\"closes\"] in (\"Closed\", \"\"):\n continue\n else:\n opening_hours.add_range(\n", "issue": "Spider tgifridays is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tgifridays** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tgifridays.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tgifridays.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport re\nimport json\n\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass TGIFridaySpider(scrapy.Spider):\n download_delay = 0.2\n name = \"tgifridays\"\n item_attributes = { 'brand': \"TGI Friday's\" }\n allowed_domains = [\"tgifridays.com\"]\n start_urls = (\n 'https://locations.tgifridays.com/sitemap.xml',\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n if hour[\"opens\"] == \"Closed\":\n continue\n elif hour[\"closes\"] == \"Closed\":\n continue\n else:\n opening_hours.add_range(\n day=hour[\"dayOfWeek\"].replace('http://schema.org/', '')[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n time_format='%I:%M%p',\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n # The JSON blob has an extra \"}\\r\\n\" at the end\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()[:-3])\n\n properties = {\n 'addr_full': data['address']['streetAddress'],\n 'phone': data['telephone'],\n 'city': data['address']['addressLocality'],\n 'state': data['address']['addressRegion'],\n 'postcode': data['address']['postalCode'],\n 'country': data['address']['addressCountry'],\n 'ref': data['@id'],\n 'website': data['url'],\n 'lat': data['geo']['latitude'],\n 'lon': data['geo']['longitude'],\n 'name': data['name'],\n }\n\n hours = self.parse_hours(data.get(\"openingHoursSpecification\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.count('/') == 5:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n", "path": "locations/spiders/tgifridays.py"}]}
1,444
140
gh_patches_debug_20993
rasdani/github-patches
git_diff
dask__distributed-779
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> distributed-1.15.0rc1 seems wrongly requiring "futures" from a Python-3.6 installation Collecting futures (from distributed>=1.14; extra == "complete"->dask[complete]->-r C:\Winpython\basedir36 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 import os 4 from setuptools import setup 5 import sys 6 import versioneer 7 8 requires = open('requirements.txt').read().strip().split('\n') 9 10 setup(name='distributed', 11 version=versioneer.get_version(), 12 cmdclass=versioneer.get_cmdclass(), 13 description='Distributed computing', 14 url='https://distributed.readthedocs.io/en/latest/', 15 maintainer='Matthew Rocklin', 16 maintainer_email='[email protected]', 17 license='BSD', 18 package_data={ '': ['templates/index.html'], }, 19 include_package_data=True, 20 install_requires=requires, 21 packages=['distributed', 22 'distributed.bokeh', 23 'distributed.bokeh.background', 24 'distributed.bokeh.status', 25 'distributed.bokeh.tasks', 26 'distributed.bokeh.workers', 27 'distributed.cli', 28 'distributed.deploy', 29 'distributed.diagnostics', 30 'distributed.protocol', 31 'distributed.http'], 32 long_description=(open('README.md').read() if os.path.exists('README.md') 33 else ''), 34 entry_points=''' 35 [console_scripts] 36 dask-ssh=distributed.cli.dask_ssh:go 37 dask-submit=distributed.cli.dask_submit:go 38 dask-remote=distributed.cli.dask_remote:go 39 dask-scheduler=distributed.cli.dask_scheduler:go 40 dask-worker=distributed.cli.dask_worker:go 41 ''', 42 zip_safe=False) 43 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -6,6 +6,18 @@ import versioneer requires = open('requirements.txt').read().strip().split('\n') +install_requires = [] +extras_require = {} +for r in requires: + if ';' in r: + # requirements.txt conditional dependencies need to be reformatted for wheels + # to the form: `'[extra_name]:condition' : ['requirements']` + req, cond = r.split(';', 1) + cond = ':' + cond + cond_reqs = extras_require.setdefault(cond, []) + cond_reqs.append(req) + else: + install_requires.append(r) setup(name='distributed', version=versioneer.get_version(), @@ -17,7 +29,8 @@ license='BSD', package_data={ '': ['templates/index.html'], }, include_package_data=True, - install_requires=requires, + install_requires=install_requires, + extras_require=extras_require, packages=['distributed', 'distributed.bokeh', 'distributed.bokeh.background',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,6 +6,18 @@\n import versioneer\n \n requires = open('requirements.txt').read().strip().split('\\n')\n+install_requires = []\n+extras_require = {}\n+for r in requires:\n+ if ';' in r:\n+ # requirements.txt conditional dependencies need to be reformatted for wheels\n+ # to the form: `'[extra_name]:condition' : ['requirements']`\n+ req, cond = r.split(';', 1)\n+ cond = ':' + cond\n+ cond_reqs = extras_require.setdefault(cond, [])\n+ cond_reqs.append(req)\n+ else:\n+ install_requires.append(r)\n \n setup(name='distributed',\n version=versioneer.get_version(),\n@@ -17,7 +29,8 @@\n license='BSD',\n package_data={ '': ['templates/index.html'], },\n include_package_data=True,\n- install_requires=requires,\n+ install_requires=install_requires,\n+ extras_require=extras_require,\n packages=['distributed',\n 'distributed.bokeh',\n 'distributed.bokeh.background',\n", "issue": "distributed-1.15.0rc1 seems wrongly requiring \"futures\" from a Python-3.6 installation\nCollecting futures (from distributed>=1.14; extra == \"complete\"->dask[complete]->-r C:\\Winpython\\basedir36\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup\nimport sys\nimport versioneer\n\nrequires = open('requirements.txt').read().strip().split('\\n')\n\nsetup(name='distributed',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description='Distributed computing',\n url='https://distributed.readthedocs.io/en/latest/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n package_data={ '': ['templates/index.html'], },\n include_package_data=True,\n install_requires=requires,\n packages=['distributed',\n 'distributed.bokeh',\n 'distributed.bokeh.background',\n 'distributed.bokeh.status',\n 'distributed.bokeh.tasks',\n 'distributed.bokeh.workers',\n 'distributed.cli',\n 'distributed.deploy',\n 'distributed.diagnostics',\n 'distributed.protocol',\n 'distributed.http'],\n long_description=(open('README.md').read() if os.path.exists('README.md')\n else ''),\n entry_points='''\n [console_scripts]\n dask-ssh=distributed.cli.dask_ssh:go\n dask-submit=distributed.cli.dask_submit:go\n dask-remote=distributed.cli.dask_remote:go\n dask-scheduler=distributed.cli.dask_scheduler:go\n dask-worker=distributed.cli.dask_worker:go\n ''',\n zip_safe=False)\n", "path": "setup.py"}]}
973
250
gh_patches_debug_13258
rasdani/github-patches
git_diff
lutris__lutris-2955
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PCSX2 Runner: Add config path/file options (Feature request) It would be nice to have a way to specify a config file as for example some games run better with a multi-threaded microVU than others. It would also enable to have different window sizes set for those seeking square pixels as some NTSC games run at 640x448 and others use 512x448. Same goes for PAL region games. :slightly_smiling_face: The command line is: `PCSX2 --cfg=<str>` I'm absolutely fine if you put it on low priority as probably only few people use PCSX2 anyways. :wink: </issue> <code> [start of lutris/runners/pcsx2.py] 1 # Standard Library 2 from gettext import gettext as _ 3 4 # Lutris Modules 5 from lutris.runners.runner import Runner 6 from lutris.util import system 7 8 9 class pcsx2(Runner): 10 human_name = _("PCSX2") 11 description = _("PlayStation 2 emulator") 12 platforms = [_("Sony PlayStation 2")] 13 runnable_alone = True 14 runner_executable = "pcsx2/PCSX2" 15 game_options = [{ 16 "option": "main_file", 17 "type": "file", 18 "label": _("ISO file"), 19 "default_path": "game_path", 20 }] 21 22 runner_options = [ 23 { 24 "option": "fullscreen", 25 "type": "bool", 26 "label": _("Fullscreen"), 27 "default": False, 28 }, 29 { 30 "option": "full_boot", 31 "type": "bool", 32 "label": _("Fullboot"), 33 "default": False 34 }, 35 { 36 "option": "nogui", 37 "type": "bool", 38 "label": _("No GUI"), 39 "default": False 40 }, 41 { 42 "option": "config_file", 43 "type": "file", 44 "label": _("Custom config file"), 45 "advanced": True, 46 }, 47 { 48 "option": "config_path", 49 "type": "directory_chooser", 50 "label": _("Custom config path"), 51 "advanced": True, 52 }, 53 ] 54 55 def play(self): 56 arguments = [self.get_executable()] 57 58 if self.runner_config.get("fullscreen"): 59 arguments.append("--fullscreen") 60 if self.runner_config.get("full_boot"): 61 arguments.append("--fullboot") 62 if self.runner_config.get("nogui"): 63 arguments.append("--nogui") 64 if self.runner_config.get("config_file"): 65 arguments.append("--cfg=%s", self.runner_config["config_file"]) 66 if self.runner_config.get("config_path"): 67 arguments.append("--cfgpath=%s", self.runner_config["config_path"]) 68 69 iso = self.game_config.get("main_file") or "" 70 if not system.path_exists(iso): 71 return {"error": "FILE_NOT_FOUND", "file": iso} 72 arguments.append(iso) 73 return {"command": arguments} 74 [end of lutris/runners/pcsx2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/runners/pcsx2.py b/lutris/runners/pcsx2.py --- a/lutris/runners/pcsx2.py +++ b/lutris/runners/pcsx2.py @@ -62,9 +62,9 @@ if self.runner_config.get("nogui"): arguments.append("--nogui") if self.runner_config.get("config_file"): - arguments.append("--cfg=%s", self.runner_config["config_file"]) + arguments.append("--cfg={}".format(self.runner_config["config_file"])) if self.runner_config.get("config_path"): - arguments.append("--cfgpath=%s", self.runner_config["config_path"]) + arguments.append("--cfgpath={}".format(self.runner_config["config_path"])) iso = self.game_config.get("main_file") or "" if not system.path_exists(iso):
{"golden_diff": "diff --git a/lutris/runners/pcsx2.py b/lutris/runners/pcsx2.py\n--- a/lutris/runners/pcsx2.py\n+++ b/lutris/runners/pcsx2.py\n@@ -62,9 +62,9 @@\n if self.runner_config.get(\"nogui\"):\n arguments.append(\"--nogui\")\n if self.runner_config.get(\"config_file\"):\n- arguments.append(\"--cfg=%s\", self.runner_config[\"config_file\"])\n+ arguments.append(\"--cfg={}\".format(self.runner_config[\"config_file\"]))\n if self.runner_config.get(\"config_path\"):\n- arguments.append(\"--cfgpath=%s\", self.runner_config[\"config_path\"])\n+ arguments.append(\"--cfgpath={}\".format(self.runner_config[\"config_path\"]))\n \n iso = self.game_config.get(\"main_file\") or \"\"\n if not system.path_exists(iso):\n", "issue": "PCSX2 Runner: Add config path/file options (Feature request)\nIt would be nice to have a way to specify a config file as for example some games run better with a multi-threaded microVU than others. It would also enable to have different window sizes set for those seeking square pixels as some NTSC games run at 640x448 and others use 512x448. Same goes for PAL region games. :slightly_smiling_face: \r\n\r\nThe command line is: `PCSX2 --cfg=<str>`\r\n\r\nI'm absolutely fine if you put it on low priority as probably only few people use PCSX2 anyways. :wink: \n", "before_files": [{"content": "# Standard Library\nfrom gettext import gettext as _\n\n# Lutris Modules\nfrom lutris.runners.runner import Runner\nfrom lutris.util import system\n\n\nclass pcsx2(Runner):\n human_name = _(\"PCSX2\")\n description = _(\"PlayStation 2 emulator\")\n platforms = [_(\"Sony PlayStation 2\")]\n runnable_alone = True\n runner_executable = \"pcsx2/PCSX2\"\n game_options = [{\n \"option\": \"main_file\",\n \"type\": \"file\",\n \"label\": _(\"ISO file\"),\n \"default_path\": \"game_path\",\n }]\n\n runner_options = [\n {\n \"option\": \"fullscreen\",\n \"type\": \"bool\",\n \"label\": _(\"Fullscreen\"),\n \"default\": False,\n },\n {\n \"option\": \"full_boot\",\n \"type\": \"bool\",\n \"label\": _(\"Fullboot\"),\n \"default\": False\n },\n {\n \"option\": \"nogui\",\n \"type\": \"bool\",\n \"label\": _(\"No GUI\"),\n \"default\": False\n },\n {\n \"option\": \"config_file\",\n \"type\": \"file\",\n \"label\": _(\"Custom config file\"),\n \"advanced\": True,\n },\n {\n \"option\": \"config_path\",\n \"type\": \"directory_chooser\",\n \"label\": _(\"Custom config path\"),\n \"advanced\": True,\n },\n ]\n\n def play(self):\n arguments = [self.get_executable()]\n\n if self.runner_config.get(\"fullscreen\"):\n arguments.append(\"--fullscreen\")\n if self.runner_config.get(\"full_boot\"):\n arguments.append(\"--fullboot\")\n if self.runner_config.get(\"nogui\"):\n arguments.append(\"--nogui\")\n if self.runner_config.get(\"config_file\"):\n arguments.append(\"--cfg=%s\", self.runner_config[\"config_file\"])\n if self.runner_config.get(\"config_path\"):\n arguments.append(\"--cfgpath=%s\", self.runner_config[\"config_path\"])\n\n iso = self.game_config.get(\"main_file\") or \"\"\n if not system.path_exists(iso):\n return {\"error\": \"FILE_NOT_FOUND\", \"file\": iso}\n arguments.append(iso)\n return {\"command\": arguments}\n", "path": "lutris/runners/pcsx2.py"}]}
1,299
191
gh_patches_debug_2612
rasdani/github-patches
git_diff
scikit-hep__pyhf-307
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add --version flag to pyhf CLI # Description As [suggested by Lukas](https://github.com/diana-hep/pyhf/pull/304#issuecomment-428856809), adding a `--version` flag to the pyhf CLI could be useful. </issue> <code> [start of pyhf/commandline.py] 1 import logging 2 logging.basicConfig() 3 log = logging.getLogger(__name__) 4 5 import click 6 import json 7 import os 8 import jsonpatch 9 import sys 10 11 from . import readxml 12 from . import writexml 13 from .utils import runOnePoint 14 from .pdf import Model 15 16 17 @click.group(context_settings=dict(help_option_names=['-h', '--help'])) 18 def pyhf(): 19 pass 20 21 @pyhf.command() 22 @click.argument('entrypoint-xml', type=click.Path(exists=True)) 23 @click.option('--basedir', help='The base directory for the XML files to point relative to.', type=click.Path(exists=True), default=os.getcwd()) 24 @click.option('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None) 25 @click.option('--track-progress/--hide-progress', default=True) 26 def xml2json(entrypoint_xml, basedir, output_file, track_progress): 27 """ Entrypoint XML: The top-level XML file for the PDF definition. """ 28 spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress) 29 if output_file is None: 30 print(json.dumps(spec, indent=4, sort_keys=True)) 31 else: 32 with open(output_file, 'w+') as out_file: 33 json.dump(spec, out_file, indent=4, sort_keys=True) 34 log.debug("Written to {0:s}".format(output_file)) 35 sys.exit(0) 36 37 @pyhf.command() 38 @click.argument('workspace', default='-') 39 @click.argument('xmlfile', default='-') 40 @click.option('--specroot', default=click.Path(exists=True)) 41 @click.option('--dataroot', default=click.Path(exists=True)) 42 def json2xml(workspace, xmlfile, specroot, dataroot): 43 with click.open_file(workspace, 'r') as specstream: 44 d = json.load(specstream) 45 with click.open_file(xmlfile, 'w') as outstream: 46 outstream.write(writexml.writexml(d, specroot, dataroot,'').decode('utf-8')) 47 sys.exit(0) 48 49 @pyhf.command() 50 @click.argument('workspace', default='-') 51 @click.option('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None) 52 @click.option('--measurement', default=None) 53 @click.option('-p', '--patch', multiple=True) 54 @click.option('--qualify-names/--no-qualify-names', default=False) 55 def cls(workspace, output_file, measurement, qualify_names, patch): 56 with click.open_file(workspace, 'r') as specstream: 57 d = json.load(specstream) 58 measurements = d['toplvl']['measurements'] 59 measurement_names = [m['name'] for m in measurements] 60 measurement_index = 0 61 log.debug('measurements defined:\n\t{0:s}'.format('\n\t'.join(measurement_names))) 62 if measurement and measurement not in measurement_names: 63 log.error('no measurement by name \'{0:s}\' exists, pick from one of the valid ones above'.format(measurement)) 64 sys.exit(1) 65 else: 66 if not measurement and len(measurements) > 1: 67 log.warning('multiple measurements defined. Taking the first measurement.') 68 measurement_index = 0 69 elif measurement: 70 measurement_index = measurement_names.index(measurement) 71 72 log.debug('calculating CLs for measurement {0:s}'.format(measurements[measurement_index]['name'])) 73 spec = {'channels':d['channels']} 74 for p in patch: 75 with click.open_file(p, 'r') as read_file: 76 p = jsonpatch.JsonPatch(json.loads(read_file.read())) 77 spec = p.apply(spec) 78 p = Model(spec, poiname=measurements[measurement_index]['config']['poi'], qualify_names=qualify_names) 79 result = runOnePoint(1.0, sum((d['data'][c['name']] for c in d['channels']),[]) + p.config.auxdata, p) 80 result = {'CLs_obs': result[-2].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()} 81 if output_file is None: 82 print(json.dumps(result, indent=4, sort_keys=True)) 83 else: 84 with open(output_file, 'w+') as out_file: 85 json.dump(result, out_file, indent=4, sort_keys=True) 86 log.debug("Written to {0:s}".format(output_file)) 87 sys.exit(0) 88 [end of pyhf/commandline.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyhf/commandline.py b/pyhf/commandline.py --- a/pyhf/commandline.py +++ b/pyhf/commandline.py @@ -12,9 +12,11 @@ from . import writexml from .utils import runOnePoint from .pdf import Model +from .version import __version__ @click.group(context_settings=dict(help_option_names=['-h', '--help'])) [email protected]_option(version=__version__) def pyhf(): pass
{"golden_diff": "diff --git a/pyhf/commandline.py b/pyhf/commandline.py\n--- a/pyhf/commandline.py\n+++ b/pyhf/commandline.py\n@@ -12,9 +12,11 @@\n from . import writexml\n from .utils import runOnePoint\n from .pdf import Model\n+from .version import __version__\n \n \n @click.group(context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\n def pyhf():\n pass\n", "issue": "Add --version flag to pyhf CLI\n# Description\r\n\r\nAs [suggested by Lukas](https://github.com/diana-hep/pyhf/pull/304#issuecomment-428856809), adding a `--version` flag to the pyhf CLI could be useful.\n", "before_files": [{"content": "import logging\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\nimport click\nimport json\nimport os\nimport jsonpatch\nimport sys\n\nfrom . import readxml\nfrom . import writexml\nfrom .utils import runOnePoint\nfrom .pdf import Model\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\ndef pyhf():\n pass\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected]('--basedir', help='The base directory for the XML files to point relative to.', type=click.Path(exists=True), default=os.getcwd())\[email protected]('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n sys.exit(0)\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('xmlfile', default='-')\[email protected]('--specroot', default=click.Path(exists=True))\[email protected]('--dataroot', default=click.Path(exists=True))\ndef json2xml(workspace, xmlfile, specroot, dataroot):\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n with click.open_file(xmlfile, 'w') as outstream:\n outstream.write(writexml.writexml(d, specroot, dataroot,'').decode('utf-8'))\n sys.exit(0)\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-file', help='The location of the output json file. If not specified, prints to screen.', default=None)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--qualify-names/--no-qualify-names', default=False)\ndef cls(workspace, output_file, measurement, qualify_names, patch):\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n measurements = d['toplvl']['measurements']\n measurement_names = [m['name'] for m in measurements]\n measurement_index = 0\n log.debug('measurements defined:\\n\\t{0:s}'.format('\\n\\t'.join(measurement_names)))\n if measurement and measurement not in measurement_names:\n log.error('no measurement by name \\'{0:s}\\' exists, pick from one of the valid ones above'.format(measurement))\n sys.exit(1)\n else:\n if not measurement and len(measurements) > 1:\n log.warning('multiple measurements defined. Taking the first measurement.')\n measurement_index = 0\n elif measurement:\n measurement_index = measurement_names.index(measurement)\n\n log.debug('calculating CLs for measurement {0:s}'.format(measurements[measurement_index]['name']))\n spec = {'channels':d['channels']}\n for p in patch:\n with click.open_file(p, 'r') as read_file:\n p = jsonpatch.JsonPatch(json.loads(read_file.read()))\n spec = p.apply(spec)\n p = Model(spec, poiname=measurements[measurement_index]['config']['poi'], qualify_names=qualify_names)\n result = runOnePoint(1.0, sum((d['data'][c['name']] for c in d['channels']),[]) + p.config.auxdata, p)\n result = {'CLs_obs': result[-2].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n sys.exit(0)\n", "path": "pyhf/commandline.py"}]}
1,730
108
gh_patches_debug_33199
rasdani/github-patches
git_diff
python-poetry__poetry-1395
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> poetry shell does not activate virtualenv <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **OS version and name**: Mac OS X, High Sierra - **Poetry version**: 0.12.5 ## Issue Similar to ```pipenv shell```, I would have expected that when running ```poetry shell``` the virtualenv gets activated, but apparently this is not the case... ```console ➜ which python /Users/timon/.pyenv/shims/python ➜ poetry shell Spawning shell within /Users/timon/Library/Caches/pypoetry/virtualenvs/YOLO-SAR-py3.7 ➜ which python /Users/timon/.pyenv/shims/python ➜ source /Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/activate ➜ which python /Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python ``` for comparison ```console ➜ poetry run which python /Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python ``` Am I misunderstanding something and this is expected behaviour or is it a bug? Thanks a lot already for your time :) </issue> <code> [start of poetry/utils/shell.py] 1 import os 2 3 from shellingham import detect_shell 4 from shellingham import ShellDetectionFailure 5 6 7 class Shell: 8 """ 9 Represents the current shell. 10 """ 11 12 _shell = None 13 14 def __init__(self, name, path): # type: (str, str) -> None 15 self._name = name 16 self._path = path 17 18 @property 19 def name(self): # type: () -> str 20 return self._name 21 22 @property 23 def path(self): # type: () -> str 24 return self._path 25 26 @classmethod 27 def get(cls): # type: () -> Shell 28 """ 29 Retrieve the current shell. 30 """ 31 if cls._shell is not None: 32 return cls._shell 33 34 try: 35 name, path = detect_shell(os.getpid()) 36 except (RuntimeError, ShellDetectionFailure): 37 raise RuntimeError("Unable to detect the current shell.") 38 39 cls._shell = cls(name, path) 40 41 return cls._shell 42 43 def __repr__(self): # type: () -> str 44 return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path) 45 [end of poetry/utils/shell.py] [start of poetry/console/commands/shell.py] 1 import sys 2 3 from os import environ 4 from distutils.util import strtobool 5 6 from .env_command import EnvCommand 7 8 9 class ShellCommand(EnvCommand): 10 11 name = "shell" 12 description = "Spawns a shell within the virtual environment." 13 14 help = """The <info>shell</> command spawns a shell, according to the 15 <comment>$SHELL</> environment variable, within the virtual environment. 16 If one doesn't exist yet, it will be created. 17 """ 18 19 def handle(self): 20 from poetry.utils.shell import Shell 21 22 # Check if it's already activated or doesn't exist and won't be created 23 venv_activated = strtobool(environ.get("POETRY_ACTIVE", "0")) or getattr( 24 sys, "real_prefix", sys.prefix 25 ) == str(self.env.path) 26 if venv_activated: 27 self.line( 28 "Virtual environment already activated: " 29 "<info>{}</>".format(self.env.path) 30 ) 31 32 return 33 34 self.line("Spawning shell within <info>{}</>".format(self.env.path)) 35 36 # Setting this to avoid spawning unnecessary nested shells 37 environ["POETRY_ACTIVE"] = "1" 38 shell = Shell.get() 39 self.env.execute(shell.path) 40 environ.pop("POETRY_ACTIVE") 41 [end of poetry/console/commands/shell.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/poetry/console/commands/shell.py b/poetry/console/commands/shell.py --- a/poetry/console/commands/shell.py +++ b/poetry/console/commands/shell.py @@ -36,5 +36,5 @@ # Setting this to avoid spawning unnecessary nested shells environ["POETRY_ACTIVE"] = "1" shell = Shell.get() - self.env.execute(shell.path) + shell.activate(self.env) environ.pop("POETRY_ACTIVE") diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py --- a/poetry/utils/shell.py +++ b/poetry/utils/shell.py @@ -1,8 +1,16 @@ import os +import signal +import sys +import pexpect + +from clikit.utils.terminal import Terminal from shellingham import detect_shell from shellingham import ShellDetectionFailure +from ._compat import WINDOWS +from .env import VirtualEnv + class Shell: """ @@ -40,5 +48,51 @@ return cls._shell + def activate(self, env): # type: (VirtualEnv) -> None + if WINDOWS: + return env.execute(self.path) + + terminal = Terminal() + with env.temp_environ(): + c = pexpect.spawn( + self._path, ["-i"], dimensions=(terminal.height, terminal.width) + ) + + c.setecho(False) + activate_script = self._get_activate_script() + bin_dir = "Scripts" if WINDOWS else "bin" + activate_path = env.path / bin_dir / activate_script + c.sendline("{} {}".format(self._get_source_command(), activate_path)) + + def resize(sig, data): + terminal = Terminal() + c.setwinsize(terminal.height, terminal.width) + + signal.signal(signal.SIGWINCH, resize) + + # Interact with the new shell. + c.interact(escape_character=None) + c.close() + + sys.exit(c.exitstatus) + + def _get_activate_script(self): + if "fish" == self._name: + suffix = ".fish" + elif "csh" == self._name: + suffix = ".csh" + else: + suffix = "" + + return "activate" + suffix + + def _get_source_command(self): + if "fish" == self._name: + return "source" + elif "csh" == self._name: + return "source" + + return "." + def __repr__(self): # type: () -> str return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path)
{"golden_diff": "diff --git a/poetry/console/commands/shell.py b/poetry/console/commands/shell.py\n--- a/poetry/console/commands/shell.py\n+++ b/poetry/console/commands/shell.py\n@@ -36,5 +36,5 @@\n # Setting this to avoid spawning unnecessary nested shells\n environ[\"POETRY_ACTIVE\"] = \"1\"\n shell = Shell.get()\n- self.env.execute(shell.path)\n+ shell.activate(self.env)\n environ.pop(\"POETRY_ACTIVE\")\ndiff --git a/poetry/utils/shell.py b/poetry/utils/shell.py\n--- a/poetry/utils/shell.py\n+++ b/poetry/utils/shell.py\n@@ -1,8 +1,16 @@\n import os\n+import signal\n+import sys\n \n+import pexpect\n+\n+from clikit.utils.terminal import Terminal\n from shellingham import detect_shell\n from shellingham import ShellDetectionFailure\n \n+from ._compat import WINDOWS\n+from .env import VirtualEnv\n+\n \n class Shell:\n \"\"\"\n@@ -40,5 +48,51 @@\n \n return cls._shell\n \n+ def activate(self, env): # type: (VirtualEnv) -> None\n+ if WINDOWS:\n+ return env.execute(self.path)\n+\n+ terminal = Terminal()\n+ with env.temp_environ():\n+ c = pexpect.spawn(\n+ self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n+ )\n+\n+ c.setecho(False)\n+ activate_script = self._get_activate_script()\n+ bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n+ activate_path = env.path / bin_dir / activate_script\n+ c.sendline(\"{} {}\".format(self._get_source_command(), activate_path))\n+\n+ def resize(sig, data):\n+ terminal = Terminal()\n+ c.setwinsize(terminal.height, terminal.width)\n+\n+ signal.signal(signal.SIGWINCH, resize)\n+\n+ # Interact with the new shell.\n+ c.interact(escape_character=None)\n+ c.close()\n+\n+ sys.exit(c.exitstatus)\n+\n+ def _get_activate_script(self):\n+ if \"fish\" == self._name:\n+ suffix = \".fish\"\n+ elif \"csh\" == self._name:\n+ suffix = \".csh\"\n+ else:\n+ suffix = \"\"\n+\n+ return \"activate\" + suffix\n+\n+ def _get_source_command(self):\n+ if \"fish\" == self._name:\n+ return \"source\"\n+ elif \"csh\" == self._name:\n+ return \"source\"\n+\n+ return \".\"\n+\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "issue": "poetry shell does not activate virtualenv \n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Mac OS X, High Sierra\r\n- **Poetry version**: 0.12.5\r\n\r\n## Issue\r\nSimilar to ```pipenv shell```, I would have expected that when running ```poetry shell``` the virtualenv gets activated, but apparently this is not the case...\r\n\r\n\r\n```console\r\n\u279c which python\r\n/Users/timon/.pyenv/shims/python\r\n\u279c poetry shell\r\nSpawning shell within /Users/timon/Library/Caches/pypoetry/virtualenvs/YOLO-SAR-py3.7\r\n\u279c which python\r\n/Users/timon/.pyenv/shims/python\r\n\u279c source /Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/activate\r\n\u279c which python\r\n/Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python\r\n```\r\n\r\nfor comparison\r\n```console\r\n\u279c poetry run which python\r\n/Users/timon/Library/Caches/pypoetry/virtualenvs/yolo-sar-py3.7/bin/python\r\n```\r\n\r\n\r\nAm I misunderstanding something and this is expected behaviour or is it a bug? \r\n\r\nThanks a lot already for your time :)\n", "before_files": [{"content": "import os\n\nfrom shellingham import detect_shell\nfrom shellingham import ShellDetectionFailure\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name, path): # type: (str, str) -> None\n self._name = name\n self._path = path\n\n @property\n def name(self): # type: () -> str\n return self._name\n\n @property\n def path(self): # type: () -> str\n return self._path\n\n @classmethod\n def get(cls): # type: () -> Shell\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "path": "poetry/utils/shell.py"}, {"content": "import sys\n\nfrom os import environ\nfrom distutils.util import strtobool\n\nfrom .env_command import EnvCommand\n\n\nclass ShellCommand(EnvCommand):\n\n name = \"shell\"\n description = \"Spawns a shell within the virtual environment.\"\n\n help = \"\"\"The <info>shell</> command spawns a shell, according to the\n<comment>$SHELL</> environment variable, within the virtual environment.\nIf one doesn't exist yet, it will be created.\n\"\"\"\n\n def handle(self):\n from poetry.utils.shell import Shell\n\n # Check if it's already activated or doesn't exist and won't be created\n venv_activated = strtobool(environ.get(\"POETRY_ACTIVE\", \"0\")) or getattr(\n sys, \"real_prefix\", sys.prefix\n ) == str(self.env.path)\n if venv_activated:\n self.line(\n \"Virtual environment already activated: \"\n \"<info>{}</>\".format(self.env.path)\n )\n\n return\n\n self.line(\"Spawning shell within <info>{}</>\".format(self.env.path))\n\n # Setting this to avoid spawning unnecessary nested shells\n environ[\"POETRY_ACTIVE\"] = \"1\"\n shell = Shell.get()\n self.env.execute(shell.path)\n environ.pop(\"POETRY_ACTIVE\")\n", "path": "poetry/console/commands/shell.py"}]}
1,629
624
gh_patches_debug_15549
rasdani/github-patches
git_diff
freedomofpress__securedrop-5674
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dual distro support broke "securedrop-admin verify" ## Description When adding support for Focal to the configuration tests in #5529, a check of the `MOLECULE_SCENARIO_NAME` environment variable broke `securedrop-admin verify`, where it's not set. ## Steps to Reproduce On an admin workstation: - Run `securedrop-admin setup -t` - Run `securedrop-admin verify` ## Expected Behavior That the configuration tests would run. ## Actual Behavior You get an error [here](https://github.com/freedomofpress/securedrop/blob/76d133a7e5962f8d904e507d93e6a61575358eeb/molecule/testinfra/conftest.py#L31) saying `'NoneType' object has no attribute 'endswith'`. ## Comments Should probably check if it's `None` or just add `""` as the default in the `os.environ.get` call. </issue> <code> [start of molecule/testinfra/conftest.py] 1 """ 2 Configuration for TestInfra test suite for SecureDrop. 3 Handles importing host-specific test vars, so test functions 4 can be reused across multiple hosts, with varied targets. 5 6 Vars should be placed in `testinfra/vars/<hostname>.yml`. 7 """ 8 9 import io 10 import os 11 import yaml 12 import testutils 13 14 # The config tests target staging by default. It's possible to override 15 # for e.g. prod, but the associated vars files are not yet ported. 16 target_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging') 17 18 19 def securedrop_import_testinfra_vars(hostname, with_header=False): 20 """ 21 Import vars from a YAML file to populate tests with host-specific 22 values used in checks. For instance, the SecureDrop docroot will 23 be under /vagrant in development, but /var/www/securedrop in staging. 24 25 Vars must be stored in `testinfra/vars/<hostname>.yml`. 26 """ 27 filepath = os.path.join(os.path.dirname(__file__), "vars", hostname+".yml") 28 with io.open(filepath, 'r') as f: 29 hostvars = yaml.safe_load(f) 30 31 if os.environ.get("MOLECULE_SCENARIO_NAME").endswith("focal"): 32 hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.8") # noqa: E501 33 hostvars['python_version'] = "3.8" 34 else: 35 hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.5") # noqa: E501 36 hostvars['python_version'] = "3.5" 37 38 if with_header: 39 hostvars = dict(securedrop_test_vars=hostvars) 40 41 return hostvars 42 43 44 def lookup_molecule_info(): 45 """ 46 Molecule automatically writes YAML files documenting dynamic host info 47 such as remote IPs. Read that file and pass back the config dict. 48 """ 49 molecule_instance_config_path = os.path.abspath( 50 os.environ['MOLECULE_INSTANCE_CONFIG']) 51 with open(molecule_instance_config_path, 'r') as f: 52 molecule_instance_config = yaml.safe_load(f) 53 return molecule_instance_config 54 55 56 class Myvalues: 57 def __init__(self): 58 pass 59 60 61 value = securedrop_import_testinfra_vars(target_host) 62 res = Myvalues() 63 for key, value in value.items(): 64 setattr(res, key, value) 65 testutils.securedrop_test_vars = res 66 [end of molecule/testinfra/conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/molecule/testinfra/conftest.py b/molecule/testinfra/conftest.py --- a/molecule/testinfra/conftest.py +++ b/molecule/testinfra/conftest.py @@ -28,7 +28,16 @@ with io.open(filepath, 'r') as f: hostvars = yaml.safe_load(f) - if os.environ.get("MOLECULE_SCENARIO_NAME").endswith("focal"): + # Testing against both Focal and Xenial must be supported for now in both + # staging scenarios, and in prod via `USE_FOCAL=1 ./securedrop-admin verify` + testing_focal = False + scenario_env = "MOLECULE_SCENARIO_NAME" + if scenario_env in os.environ and os.environ.get(scenario_env).endswith("focal"): + testing_focal = True + if "USE_FOCAL" in os.environ: + testing_focal = True + + if testing_focal: hostvars['securedrop_venv_site_packages'] = hostvars["securedrop_venv_site_packages"].format("3.8") # noqa: E501 hostvars['python_version'] = "3.8" else:
{"golden_diff": "diff --git a/molecule/testinfra/conftest.py b/molecule/testinfra/conftest.py\n--- a/molecule/testinfra/conftest.py\n+++ b/molecule/testinfra/conftest.py\n@@ -28,7 +28,16 @@\n with io.open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n \n- if os.environ.get(\"MOLECULE_SCENARIO_NAME\").endswith(\"focal\"):\n+ # Testing against both Focal and Xenial must be supported for now in both\n+ # staging scenarios, and in prod via `USE_FOCAL=1 ./securedrop-admin verify`\n+ testing_focal = False\n+ scenario_env = \"MOLECULE_SCENARIO_NAME\"\n+ if scenario_env in os.environ and os.environ.get(scenario_env).endswith(\"focal\"):\n+ testing_focal = True\n+ if \"USE_FOCAL\" in os.environ:\n+ testing_focal = True\n+\n+ if testing_focal:\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.8\") # noqa: E501\n hostvars['python_version'] = \"3.8\"\n else:\n", "issue": "Dual distro support broke \"securedrop-admin verify\"\n## Description\r\n\r\nWhen adding support for Focal to the configuration tests in #5529, a check of the `MOLECULE_SCENARIO_NAME` environment variable broke `securedrop-admin verify`, where it's not set.\r\n\r\n## Steps to Reproduce\r\n\r\nOn an admin workstation:\r\n- Run `securedrop-admin setup -t`\r\n- Run `securedrop-admin verify`\r\n\r\n## Expected Behavior\r\n\r\nThat the configuration tests would run.\r\n\r\n## Actual Behavior\r\n\r\nYou get an error [here](https://github.com/freedomofpress/securedrop/blob/76d133a7e5962f8d904e507d93e6a61575358eeb/molecule/testinfra/conftest.py#L31) saying `'NoneType' object has no attribute 'endswith'`. \r\n\r\n## Comments\r\n\r\nShould probably check if it's `None` or just add `\"\"` as the default in the `os.environ.get` call.\n", "before_files": [{"content": "\"\"\"\nConfiguration for TestInfra test suite for SecureDrop.\nHandles importing host-specific test vars, so test functions\ncan be reused across multiple hosts, with varied targets.\n\nVars should be placed in `testinfra/vars/<hostname>.yml`.\n\"\"\"\n\nimport io\nimport os\nimport yaml\nimport testutils\n\n# The config tests target staging by default. It's possible to override\n# for e.g. prod, but the associated vars files are not yet ported.\ntarget_host = os.environ.get('SECUREDROP_TESTINFRA_TARGET_HOST', 'staging')\n\n\ndef securedrop_import_testinfra_vars(hostname, with_header=False):\n \"\"\"\n Import vars from a YAML file to populate tests with host-specific\n values used in checks. For instance, the SecureDrop docroot will\n be under /vagrant in development, but /var/www/securedrop in staging.\n\n Vars must be stored in `testinfra/vars/<hostname>.yml`.\n \"\"\"\n filepath = os.path.join(os.path.dirname(__file__), \"vars\", hostname+\".yml\")\n with io.open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n\n if os.environ.get(\"MOLECULE_SCENARIO_NAME\").endswith(\"focal\"):\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.8\") # noqa: E501\n hostvars['python_version'] = \"3.8\"\n else:\n hostvars['securedrop_venv_site_packages'] = hostvars[\"securedrop_venv_site_packages\"].format(\"3.5\") # noqa: E501\n hostvars['python_version'] = \"3.5\"\n\n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n\n return hostvars\n\n\ndef lookup_molecule_info():\n \"\"\"\n Molecule automatically writes YAML files documenting dynamic host info\n such as remote IPs. Read that file and pass back the config dict.\n \"\"\"\n molecule_instance_config_path = os.path.abspath(\n os.environ['MOLECULE_INSTANCE_CONFIG'])\n with open(molecule_instance_config_path, 'r') as f:\n molecule_instance_config = yaml.safe_load(f)\n return molecule_instance_config\n\n\nclass Myvalues:\n def __init__(self):\n pass\n\n\nvalue = securedrop_import_testinfra_vars(target_host)\nres = Myvalues()\nfor key, value in value.items():\n setattr(res, key, value)\ntestutils.securedrop_test_vars = res\n", "path": "molecule/testinfra/conftest.py"}]}
1,437
277
gh_patches_debug_1314
rasdani/github-patches
git_diff
apache__airflow-9699
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TimeSensor triggers immediately when used over midnight (UTC) <!-- Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions. Don't worry if they're not all applicable; just try to include what you can :-) If you need to include code snippets or logs, please put them in fenced code blocks. If they're super-long, please use the details tag like <details><summary>super-long log</summary> lots of stuff </details> Please delete these comment blocks before submitting the issue. --> <!-- IMPORTANT!!! PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE NEXT TO "SUBMIT NEW ISSUE" BUTTON!!! PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!! Please complete the next sections or the issue will be closed. This questions are the first thing we need to know to understand the context. --> **Apache Airflow version**: 1.10.10 (issue exists in current master as well) **Environment**: does not seem relevant **What happened**: The TimeSensor does trigger if the current time is later than the defined trigger time. Looking at the [source code](https://github.com/apache/airflow/blob/master/airflow/sensors/time_sensor.py), the trigger rule is defined as ``` return timezone.utcnow().time() > self.target_time ``` This leads to problems when the DAG runs over midnight UTC. For example, suppose the following DAG: ``` with DAG('foo', default_args={'start_date': datetime(2020, 7, 1, tzinfo=pendulum.timezone("Europe/Berlin"))}, schedule_interval="0 0 * * *") as dag: # in summer, Europe/Berlin is two hours after UTC, hence: time_04h00_local = TimeSensor(task_id="time_01h30", target_time=time(hour=2, minute=00)) ``` This DAG will be triggered at 22:00 UTC. Then, according to the trigger rule: ``` 22:00 UTC > 2:00 UTC ``` Hence, the TimeSensor will be triggered immediately. **What you expected to happen**: The TimeSensor should trigger at the following day if `target_time < next_execution_date.time()` **Possible workarounds**: One can always use the TimeDeltaSensor to archive similar effects. This does result in code that is not as readable, though. </issue> <code> [start of airflow/sensors/time_sensor.py] 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 19 from airflow.sensors.base_sensor_operator import BaseSensorOperator 20 from airflow.utils import timezone 21 from airflow.utils.decorators import apply_defaults 22 23 24 class TimeSensor(BaseSensorOperator): 25 """ 26 Waits until the specified time of the day. 27 28 :param target_time: time after which the job succeeds 29 :type target_time: datetime.time 30 """ 31 32 @apply_defaults 33 def __init__(self, target_time, *args, **kwargs): 34 super().__init__(*args, **kwargs) 35 self.target_time = target_time 36 37 def poke(self, context): 38 self.log.info('Checking if the time (%s) has come', self.target_time) 39 return timezone.utcnow().time() > self.target_time 40 [end of airflow/sensors/time_sensor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/sensors/time_sensor.py b/airflow/sensors/time_sensor.py --- a/airflow/sensors/time_sensor.py +++ b/airflow/sensors/time_sensor.py @@ -36,4 +36,4 @@ def poke(self, context): self.log.info('Checking if the time (%s) has come', self.target_time) - return timezone.utcnow().time() > self.target_time + return timezone.make_naive(timezone.utcnow()).time() > self.target_time
{"golden_diff": "diff --git a/airflow/sensors/time_sensor.py b/airflow/sensors/time_sensor.py\n--- a/airflow/sensors/time_sensor.py\n+++ b/airflow/sensors/time_sensor.py\n@@ -36,4 +36,4 @@\n \n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n- return timezone.utcnow().time() > self.target_time\n+ return timezone.make_naive(timezone.utcnow()).time() > self.target_time\n", "issue": "TimeSensor triggers immediately when used over midnight (UTC)\n<!--\r\n\r\nWelcome to Apache Airflow! For a smooth issue process, try to answer the following questions.\r\nDon't worry if they're not all applicable; just try to include what you can :-)\r\n\r\nIf you need to include code snippets or logs, please put them in fenced code\r\nblocks. If they're super-long, please use the details tag like\r\n<details><summary>super-long log</summary> lots of stuff </details>\r\n\r\nPlease delete these comment blocks before submitting the issue.\r\n\r\n-->\r\n\r\n<!--\r\n\r\nIMPORTANT!!!\r\n\r\nPLEASE CHECK \"SIMILAR TO X EXISTING ISSUES\" OPTION IF VISIBLE\r\nNEXT TO \"SUBMIT NEW ISSUE\" BUTTON!!!\r\n\r\nPLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!\r\n\r\nPlease complete the next sections or the issue will be closed.\r\nThis questions are the first thing we need to know to understand the context.\r\n\r\n-->\r\n\r\n**Apache Airflow version**: 1.10.10 (issue exists in current master as well)\r\n\r\n**Environment**: does not seem relevant\r\n\r\n**What happened**:\r\n\r\nThe TimeSensor does trigger if the current time is later than the defined trigger time. Looking at the [source code](https://github.com/apache/airflow/blob/master/airflow/sensors/time_sensor.py), the trigger rule is defined as\r\n```\r\nreturn timezone.utcnow().time() > self.target_time\r\n```\r\nThis leads to problems when the DAG runs over midnight UTC. For example, suppose the following DAG:\r\n\r\n```\r\nwith DAG('foo', \r\n default_args={'start_date': datetime(2020, 7, 1, tzinfo=pendulum.timezone(\"Europe/Berlin\"))}, \r\n schedule_interval=\"0 0 * * *\") as dag:\r\n\r\n # in summer, Europe/Berlin is two hours after UTC, hence: \r\n time_04h00_local = TimeSensor(task_id=\"time_01h30\", target_time=time(hour=2, minute=00))\r\n```\r\n\r\nThis DAG will be triggered at 22:00 UTC. Then, according to the trigger rule:\r\n```\r\n22:00 UTC > 2:00 UTC\r\n```\r\nHence, the TimeSensor will be triggered immediately. \r\n\r\n**What you expected to happen**:\r\n\r\nThe TimeSensor should trigger at the following day if `target_time < next_execution_date.time()`\r\n\r\n**Possible workarounds**:\r\n\r\nOne can always use the TimeDeltaSensor to archive similar effects. This does result in code that is not as readable, though. \n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom airflow.sensors.base_sensor_operator import BaseSensorOperator\nfrom airflow.utils import timezone\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass TimeSensor(BaseSensorOperator):\n \"\"\"\n Waits until the specified time of the day.\n\n :param target_time: time after which the job succeeds\n :type target_time: datetime.time\n \"\"\"\n\n @apply_defaults\n def __init__(self, target_time, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.target_time = target_time\n\n def poke(self, context):\n self.log.info('Checking if the time (%s) has come', self.target_time)\n return timezone.utcnow().time() > self.target_time\n", "path": "airflow/sensors/time_sensor.py"}]}
1,477
115
gh_patches_debug_6124
rasdani/github-patches
git_diff
conan-io__conan-3087
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> package_id() regression bug in conan 1.4.x? Consider the following `conanfile.py`: ```python from conans import ConanFile class TestConan(ConanFile): name = "Test" version = "0.0.1" settings = "os", "arch" def package_id(self): self.info.include_build_settings() self.info.settings.os_build = self.info.settings.os self.info.settings.arch_build = self.info.settings.arch del self.info.settings.os del self.info.settings.arch print(">>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: ",str(self.info.settings.os_build)) print(">>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: ",str(self.info.settings.arch_build)) ``` Now test it with conan 1.3.3: ``` C:\Users\dbely\conan\conan-test>pip install conan==1.3.3 ... C:\Users\dbely\conan\conan-test>conan create . dbely/testing ... Test/0.0.1@dbely/testing: The stored package has not changed >>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows >>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64 Test/0.0.1@dbely/testing: Installing package ... C:\Users\dbely\conan\conan-test>conan search Test/0.0.1@dbely/testing Existing packages for recipe Test/0.0.1@dbely/testing: Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93 [settings] arch_build: x86_64 os_build: Windows Outdated from recipe: False ``` Everything is good. Upgrade to conan 1.4.4 (all 1.4.x versions behave the same) and try again: ``` C:\Users\dbely\conan\conan-test>pip install conan==1.4.4 ... C:\Users\dbely\conan\conan-test>conan create . dbely/testing ... Test/0.0.1@dbely/testing: A new conanfile.py version was exported Test/0.0.1@dbely/testing: Folder: C:\Users\dbely\.conan\data\Test\0.0.1\dbely\testing\export >>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows >>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64 Test/0.0.1@dbely/testing: Installing package >>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: None >>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: None ... C:\Users\dbely\conan\conan-test>conan search Test/0.0.1@dbely/testing Existing packages for recipe Test/0.0.1@dbely/testing: Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93 [settings] arch_build: None os_build: None Outdated from recipe: False ``` Oops! `package_id()` is now called twice and after the second call `os_build` and `arch_build` are set to `None`. Looks like a bug to me. </issue> <code> [start of conans/client/graph/printer.py] 1 from conans.client.output import Color 2 from conans.model.ref import PackageReference 3 from conans.model.workspace import WORKSPACE_FILE 4 5 6 def print_graph(deps_graph, out): 7 all_nodes = [] 8 ids = set() 9 for node in sorted(n for n in deps_graph.nodes if n.conan_ref): 10 package_id = PackageReference(node.conan_ref, node.conanfile.package_id()) 11 if package_id not in ids: 12 all_nodes.append(node) 13 ids.add(package_id) 14 requires = [n for n in all_nodes] 15 out.writeln("Requirements", Color.BRIGHT_YELLOW) 16 17 def _recipes(nodes): 18 for node in nodes: 19 if node.remote == WORKSPACE_FILE: 20 from_text = "from '%s'" % WORKSPACE_FILE 21 else: 22 from_text = "from local cache" if not node.remote else "from '%s'" % node.remote.name 23 out.writeln(" %s %s" % (repr(node.conan_ref), from_text), Color.BRIGHT_CYAN) 24 _recipes(requires) 25 out.writeln("Packages", Color.BRIGHT_YELLOW) 26 27 def _packages(nodes): 28 for node in nodes: 29 ref, conanfile = node.conan_ref, node.conanfile 30 ref = PackageReference(ref, conanfile.info.package_id()) 31 out.writeln(" %s" % (repr(ref)), Color.BRIGHT_CYAN) 32 _packages(requires) 33 34 out.writeln("") 35 [end of conans/client/graph/printer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/client/graph/printer.py b/conans/client/graph/printer.py --- a/conans/client/graph/printer.py +++ b/conans/client/graph/printer.py @@ -7,7 +7,7 @@ all_nodes = [] ids = set() for node in sorted(n for n in deps_graph.nodes if n.conan_ref): - package_id = PackageReference(node.conan_ref, node.conanfile.package_id()) + package_id = PackageReference(node.conan_ref, node.conanfile.info.package_id()) if package_id not in ids: all_nodes.append(node) ids.add(package_id)
{"golden_diff": "diff --git a/conans/client/graph/printer.py b/conans/client/graph/printer.py\n--- a/conans/client/graph/printer.py\n+++ b/conans/client/graph/printer.py\n@@ -7,7 +7,7 @@\n all_nodes = []\n ids = set()\n for node in sorted(n for n in deps_graph.nodes if n.conan_ref):\n- package_id = PackageReference(node.conan_ref, node.conanfile.package_id())\n+ package_id = PackageReference(node.conan_ref, node.conanfile.info.package_id())\n if package_id not in ids:\n all_nodes.append(node)\n ids.add(package_id)\n", "issue": "package_id() regression bug in conan 1.4.x?\nConsider the following `conanfile.py`:\r\n```python\r\nfrom conans import ConanFile\r\n\r\nclass TestConan(ConanFile):\r\n name = \"Test\"\r\n version = \"0.0.1\"\r\n settings = \"os\", \"arch\"\r\n\r\n def package_id(self):\r\n self.info.include_build_settings()\r\n self.info.settings.os_build = self.info.settings.os\r\n self.info.settings.arch_build = self.info.settings.arch\r\n del self.info.settings.os\r\n del self.info.settings.arch\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: \",str(self.info.settings.os_build))\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: \",str(self.info.settings.arch_build))\r\n```\r\n\r\nNow test it with conan 1.3.3:\r\n```\r\nC:\\Users\\dbely\\conan\\conan-test>pip install conan==1.3.3\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan create . dbely/testing\r\n...\r\nTest/0.0.1@dbely/testing: The stored package has not changed\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64\r\nTest/0.0.1@dbely/testing: Installing package\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan search Test/0.0.1@dbely/testing\r\nExisting packages for recipe Test/0.0.1@dbely/testing:\r\n\r\n Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93\r\n [settings]\r\n arch_build: x86_64\r\n os_build: Windows\r\n Outdated from recipe: False\r\n```\r\nEverything is good. Upgrade to conan 1.4.4 (all 1.4.x versions behave the same) and try again:\r\n```\r\nC:\\Users\\dbely\\conan\\conan-test>pip install conan==1.4.4\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan create . dbely/testing\r\n...\r\nTest/0.0.1@dbely/testing: A new conanfile.py version was exported\r\nTest/0.0.1@dbely/testing: Folder: C:\\Users\\dbely\\.conan\\data\\Test\\0.0.1\\dbely\\testing\\export\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: Windows\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: x86_64\r\nTest/0.0.1@dbely/testing: Installing package\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> os_build: None\r\n>>>>>>>>>>>>>>>>>>>>>>>>>>>> arch_build: None\r\n...\r\nC:\\Users\\dbely\\conan\\conan-test>conan search Test/0.0.1@dbely/testing\r\nExisting packages for recipe Test/0.0.1@dbely/testing:\r\n\r\n Package_ID: 456f15897172eef340fcbac8a70811f2beb26a93\r\n [settings]\r\n arch_build: None\r\n os_build: None\r\n Outdated from recipe: False\r\n```\r\nOops! `package_id()` is now called twice and after the second call `os_build` and `arch_build` are set to `None`. Looks like a bug to me. \n", "before_files": [{"content": "from conans.client.output import Color\nfrom conans.model.ref import PackageReference\nfrom conans.model.workspace import WORKSPACE_FILE\n\n\ndef print_graph(deps_graph, out):\n all_nodes = []\n ids = set()\n for node in sorted(n for n in deps_graph.nodes if n.conan_ref):\n package_id = PackageReference(node.conan_ref, node.conanfile.package_id())\n if package_id not in ids:\n all_nodes.append(node)\n ids.add(package_id)\n requires = [n for n in all_nodes]\n out.writeln(\"Requirements\", Color.BRIGHT_YELLOW)\n\n def _recipes(nodes):\n for node in nodes:\n if node.remote == WORKSPACE_FILE:\n from_text = \"from '%s'\" % WORKSPACE_FILE\n else:\n from_text = \"from local cache\" if not node.remote else \"from '%s'\" % node.remote.name\n out.writeln(\" %s %s\" % (repr(node.conan_ref), from_text), Color.BRIGHT_CYAN)\n _recipes(requires)\n out.writeln(\"Packages\", Color.BRIGHT_YELLOW)\n\n def _packages(nodes):\n for node in nodes:\n ref, conanfile = node.conan_ref, node.conanfile\n ref = PackageReference(ref, conanfile.info.package_id())\n out.writeln(\" %s\" % (repr(ref)), Color.BRIGHT_CYAN)\n _packages(requires)\n\n out.writeln(\"\")\n", "path": "conans/client/graph/printer.py"}]}
1,651
138
gh_patches_debug_25780
rasdani/github-patches
git_diff
pre-commit__pre-commit-1382
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No colors when hooks are run by a git commit Hi, I use pre-commit at home on linux which works perfect. But at work I have a windows pc. Here I have problems with the colorfull output. When the hooks are run by `tox` calling `pre-commit run` there are colors as usual. When the hooks are run by a `git commit` the colors are missing. Concrete I mean the green for 'Passed', red for 'Failed ' and yellow/brown for 'Skipped' in the overview. There is no difference if I run it via git-bash, cmd or powershell. Also there is no difference if I use the pycharm buildin terminal or others. </issue> <code> [start of pre_commit/color.py] 1 import os 2 import sys 3 4 if sys.platform == 'win32': # pragma: no cover (windows) 5 def _enable() -> None: 6 from ctypes import POINTER 7 from ctypes import windll 8 from ctypes import WinError 9 from ctypes import WINFUNCTYPE 10 from ctypes.wintypes import BOOL 11 from ctypes.wintypes import DWORD 12 from ctypes.wintypes import HANDLE 13 14 STD_OUTPUT_HANDLE = -11 15 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 16 17 def bool_errcheck(result, func, args): 18 if not result: 19 raise WinError() 20 return args 21 22 GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)( 23 ('GetStdHandle', windll.kernel32), ((1, 'nStdHandle'),), 24 ) 25 26 GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))( 27 ('GetConsoleMode', windll.kernel32), 28 ((1, 'hConsoleHandle'), (2, 'lpMode')), 29 ) 30 GetConsoleMode.errcheck = bool_errcheck 31 32 SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)( 33 ('SetConsoleMode', windll.kernel32), 34 ((1, 'hConsoleHandle'), (1, 'dwMode')), 35 ) 36 SetConsoleMode.errcheck = bool_errcheck 37 38 # As of Windows 10, the Windows console supports (some) ANSI escape 39 # sequences, but it needs to be enabled using `SetConsoleMode` first. 40 # 41 # More info on the escape sequences supported: 42 # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx 43 stdout = GetStdHandle(STD_OUTPUT_HANDLE) 44 flags = GetConsoleMode(stdout) 45 SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING) 46 47 try: 48 _enable() 49 except OSError: 50 terminal_supports_color = False 51 else: 52 terminal_supports_color = True 53 else: # pragma: win32 no cover 54 terminal_supports_color = True 55 56 RED = '\033[41m' 57 GREEN = '\033[42m' 58 YELLOW = '\033[43;30m' 59 TURQUOISE = '\033[46;30m' 60 SUBTLE = '\033[2m' 61 NORMAL = '\033[m' 62 63 64 def format_color(text: str, color: str, use_color_setting: bool) -> str: 65 """Format text with color. 66 67 Args: 68 text - Text to be formatted with color if `use_color` 69 color - The color start string 70 use_color_setting - Whether or not to color 71 """ 72 if use_color_setting: 73 return f'{color}{text}{NORMAL}' 74 else: 75 return text 76 77 78 COLOR_CHOICES = ('auto', 'always', 'never') 79 80 81 def use_color(setting: str) -> bool: 82 """Choose whether to use color based on the command argument. 83 84 Args: 85 setting - Either `auto`, `always`, or `never` 86 """ 87 if setting not in COLOR_CHOICES: 88 raise ValueError(setting) 89 90 return ( 91 setting == 'always' or ( 92 setting == 'auto' and 93 sys.stdout.isatty() and 94 terminal_supports_color and 95 os.getenv('TERM') != 'dumb' 96 ) 97 ) 98 [end of pre_commit/color.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/color.py b/pre_commit/color.py --- a/pre_commit/color.py +++ b/pre_commit/color.py @@ -11,7 +11,7 @@ from ctypes.wintypes import DWORD from ctypes.wintypes import HANDLE - STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 def bool_errcheck(result, func, args): @@ -40,9 +40,9 @@ # # More info on the escape sequences supported: # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx - stdout = GetStdHandle(STD_OUTPUT_HANDLE) - flags = GetConsoleMode(stdout) - SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING) + stderr = GetStdHandle(STD_ERROR_HANDLE) + flags = GetConsoleMode(stderr) + SetConsoleMode(stderr, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING) try: _enable() @@ -90,7 +90,7 @@ return ( setting == 'always' or ( setting == 'auto' and - sys.stdout.isatty() and + sys.stderr.isatty() and terminal_supports_color and os.getenv('TERM') != 'dumb' )
{"golden_diff": "diff --git a/pre_commit/color.py b/pre_commit/color.py\n--- a/pre_commit/color.py\n+++ b/pre_commit/color.py\n@@ -11,7 +11,7 @@\n from ctypes.wintypes import DWORD\n from ctypes.wintypes import HANDLE\n \n- STD_OUTPUT_HANDLE = -11\n+ STD_ERROR_HANDLE = -12\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4\n \n def bool_errcheck(result, func, args):\n@@ -40,9 +40,9 @@\n #\n # More info on the escape sequences supported:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx\n- stdout = GetStdHandle(STD_OUTPUT_HANDLE)\n- flags = GetConsoleMode(stdout)\n- SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n+ stderr = GetStdHandle(STD_ERROR_HANDLE)\n+ flags = GetConsoleMode(stderr)\n+ SetConsoleMode(stderr, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n \n try:\n _enable()\n@@ -90,7 +90,7 @@\n return (\n setting == 'always' or (\n setting == 'auto' and\n- sys.stdout.isatty() and\n+ sys.stderr.isatty() and\n terminal_supports_color and\n os.getenv('TERM') != 'dumb'\n )\n", "issue": "No colors when hooks are run by a git commit\nHi,\r\nI use pre-commit at home on linux which works perfect. But at work I have a windows pc. Here I have problems with the colorfull output. \r\n\r\nWhen the hooks are run by `tox` calling `pre-commit run` there are colors as usual. When the hooks are run by a `git commit` the colors are missing.\r\n\r\nConcrete I mean the green for 'Passed', red for 'Failed ' and yellow/brown for 'Skipped' in the overview.\r\n\r\nThere is no difference if I run it via git-bash, cmd or powershell. Also there is no difference if I use the pycharm buildin terminal or others.\n", "before_files": [{"content": "import os\nimport sys\n\nif sys.platform == 'win32': # pragma: no cover (windows)\n def _enable() -> None:\n from ctypes import POINTER\n from ctypes import windll\n from ctypes import WinError\n from ctypes import WINFUNCTYPE\n from ctypes.wintypes import BOOL\n from ctypes.wintypes import DWORD\n from ctypes.wintypes import HANDLE\n\n STD_OUTPUT_HANDLE = -11\n ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4\n\n def bool_errcheck(result, func, args):\n if not result:\n raise WinError()\n return args\n\n GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(\n ('GetStdHandle', windll.kernel32), ((1, 'nStdHandle'),),\n )\n\n GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(\n ('GetConsoleMode', windll.kernel32),\n ((1, 'hConsoleHandle'), (2, 'lpMode')),\n )\n GetConsoleMode.errcheck = bool_errcheck\n\n SetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, DWORD)(\n ('SetConsoleMode', windll.kernel32),\n ((1, 'hConsoleHandle'), (1, 'dwMode')),\n )\n SetConsoleMode.errcheck = bool_errcheck\n\n # As of Windows 10, the Windows console supports (some) ANSI escape\n # sequences, but it needs to be enabled using `SetConsoleMode` first.\n #\n # More info on the escape sequences supported:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/mt638032(v=vs.85).aspx\n stdout = GetStdHandle(STD_OUTPUT_HANDLE)\n flags = GetConsoleMode(stdout)\n SetConsoleMode(stdout, flags | ENABLE_VIRTUAL_TERMINAL_PROCESSING)\n\n try:\n _enable()\n except OSError:\n terminal_supports_color = False\n else:\n terminal_supports_color = True\nelse: # pragma: win32 no cover\n terminal_supports_color = True\n\nRED = '\\033[41m'\nGREEN = '\\033[42m'\nYELLOW = '\\033[43;30m'\nTURQUOISE = '\\033[46;30m'\nSUBTLE = '\\033[2m'\nNORMAL = '\\033[m'\n\n\ndef format_color(text: str, color: str, use_color_setting: bool) -> str:\n \"\"\"Format text with color.\n\n Args:\n text - Text to be formatted with color if `use_color`\n color - The color start string\n use_color_setting - Whether or not to color\n \"\"\"\n if use_color_setting:\n return f'{color}{text}{NORMAL}'\n else:\n return text\n\n\nCOLOR_CHOICES = ('auto', 'always', 'never')\n\n\ndef use_color(setting: str) -> bool:\n \"\"\"Choose whether to use color based on the command argument.\n\n Args:\n setting - Either `auto`, `always`, or `never`\n \"\"\"\n if setting not in COLOR_CHOICES:\n raise ValueError(setting)\n\n return (\n setting == 'always' or (\n setting == 'auto' and\n sys.stdout.isatty() and\n terminal_supports_color and\n os.getenv('TERM') != 'dumb'\n )\n )\n", "path": "pre_commit/color.py"}]}
1,607
305
gh_patches_debug_27472
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2899
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> voting in brainstorming the votings are shown on pop up for ideas within brainstorming although there is no voting. <img width="332" alt="bildschirmfoto 2019-02-05 um 15 01 57" src="https://user-images.githubusercontent.com/35491681/52278354-20299380-2957-11e9-8368-dfb42c142a3a.png"> </issue> <code> [start of meinberlin/apps/newsletters/emails.py] 1 from email.mime.image import MIMEImage 2 3 from django.apps import apps 4 from django.conf import settings 5 from django.contrib import auth 6 7 from adhocracy4.emails.mixins import ReportToAdminEmailMixin 8 from meinberlin.apps.contrib.emails import Email 9 10 Organisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL) 11 User = auth.get_user_model() 12 13 14 class NewsletterEmail(ReportToAdminEmailMixin, Email): 15 template_name = 'meinberlin_newsletters/emails/newsletter_email' 16 17 def dispatch(self, object, *args, **kwargs): 18 organisation_pk = kwargs.pop('organisation_pk', None) 19 organisation = None 20 if organisation_pk: 21 organisation = Organisation.objects.get(pk=organisation_pk) 22 kwargs['organisation'] = organisation 23 24 return super().dispatch(object, *args, **kwargs) 25 26 def get_reply_to(self): 27 return ['{} <{}>'.format(self.object.sender_name, self.object.sender)] 28 29 def get_receivers(self): 30 return User.objects\ 31 .filter(id__in=self.kwargs['participant_ids'])\ 32 .filter(get_newsletters=True)\ 33 .filter(is_active=True)\ 34 .distinct() 35 36 def get_attachments(self): 37 attachments = super().get_attachments() 38 39 organisation = self.kwargs['organisation'] 40 if organisation and organisation.logo: 41 f = open(organisation.logo.path, 'rb') 42 logo = MIMEImage(f.read()) 43 logo.add_header('Content-ID', '<{}>'.format('organisation_logo')) 44 attachments += [logo] 45 46 return attachments 47 48 49 class NewsletterEmailAll(NewsletterEmail): 50 51 def get_receivers(self): 52 return User.objects\ 53 .filter(is_active=True)\ 54 .distinct() 55 [end of meinberlin/apps/newsletters/emails.py] [start of meinberlin/apps/users/admin.py] 1 from django.contrib import admin 2 from django.contrib import auth 3 from django.contrib.auth.models import Group 4 from django.utils.translation import ugettext_lazy as _ 5 6 from . import models 7 from .forms import UserAdminForm 8 9 10 class UserAdmin(auth.admin.UserAdmin): 11 form = UserAdminForm 12 fieldsets = ( 13 (None, {'fields': ('username', 'email', 'password', 'groups')}), 14 (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}), 15 (_('Important dates'), {'fields': ('last_login', 'date_joined')}), 16 ) 17 add_fieldsets = ( 18 (None, { 19 'classes': ('wide',), 20 'fields': ('username', 'email', 'password1', 'password2'), 21 }), 22 ) 23 readonly_fields = ('date_joined', 'last_login') 24 list_display = ( 25 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff', 26 'is_superuser' 27 ) 28 list_filter = ('is_staff', 'is_superuser', 'last_login') 29 search_fields = ('username', 'email', 'id') 30 31 32 class GroupAdmin(admin.ModelAdmin): 33 fieldsets = ( 34 (None, {'fields': ('name', )}), 35 ) 36 37 38 admin.site.register(models.User, UserAdmin) 39 admin.site.unregister(Group) 40 admin.site.register(Group, GroupAdmin) 41 [end of meinberlin/apps/users/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/newsletters/emails.py b/meinberlin/apps/newsletters/emails.py --- a/meinberlin/apps/newsletters/emails.py +++ b/meinberlin/apps/newsletters/emails.py @@ -50,5 +50,6 @@ def get_receivers(self): return User.objects\ + .filter(get_newsletters=True)\ .filter(is_active=True)\ .distinct() diff --git a/meinberlin/apps/users/admin.py b/meinberlin/apps/users/admin.py --- a/meinberlin/apps/users/admin.py +++ b/meinberlin/apps/users/admin.py @@ -12,7 +12,8 @@ fieldsets = ( (None, {'fields': ('username', 'email', 'password', 'groups')}), (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}), - (_('Important dates'), {'fields': ('last_login', 'date_joined')}), + (_('Important dates'), + {'fields': ('last_login', 'date_joined', 'get_newsletters')}), ) add_fieldsets = ( (None, { @@ -20,10 +21,10 @@ 'fields': ('username', 'email', 'password1', 'password2'), }), ) - readonly_fields = ('date_joined', 'last_login') + readonly_fields = ('date_joined', 'last_login', 'get_newsletters') list_display = ( 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff', - 'is_superuser' + 'is_superuser', 'get_newsletters' ) list_filter = ('is_staff', 'is_superuser', 'last_login') search_fields = ('username', 'email', 'id')
{"golden_diff": "diff --git a/meinberlin/apps/newsletters/emails.py b/meinberlin/apps/newsletters/emails.py\n--- a/meinberlin/apps/newsletters/emails.py\n+++ b/meinberlin/apps/newsletters/emails.py\n@@ -50,5 +50,6 @@\n \n def get_receivers(self):\n return User.objects\\\n+ .filter(get_newsletters=True)\\\n .filter(is_active=True)\\\n .distinct()\ndiff --git a/meinberlin/apps/users/admin.py b/meinberlin/apps/users/admin.py\n--- a/meinberlin/apps/users/admin.py\n+++ b/meinberlin/apps/users/admin.py\n@@ -12,7 +12,8 @@\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password', 'groups')}),\n (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),\n- (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n+ (_('Important dates'),\n+ {'fields': ('last_login', 'date_joined', 'get_newsletters')}),\n )\n add_fieldsets = (\n (None, {\n@@ -20,10 +21,10 @@\n 'fields': ('username', 'email', 'password1', 'password2'),\n }),\n )\n- readonly_fields = ('date_joined', 'last_login')\n+ readonly_fields = ('date_joined', 'last_login', 'get_newsletters')\n list_display = (\n 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',\n- 'is_superuser'\n+ 'is_superuser', 'get_newsletters'\n )\n list_filter = ('is_staff', 'is_superuser', 'last_login')\n search_fields = ('username', 'email', 'id')\n", "issue": "voting in brainstorming\nthe votings are shown on pop up for ideas within brainstorming although there is no voting.\r\n\r\n<img width=\"332\" alt=\"bildschirmfoto 2019-02-05 um 15 01 57\" src=\"https://user-images.githubusercontent.com/35491681/52278354-20299380-2957-11e9-8368-dfb42c142a3a.png\">\r\n\n", "before_files": [{"content": "from email.mime.image import MIMEImage\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.contrib import auth\n\nfrom adhocracy4.emails.mixins import ReportToAdminEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\n\nOrganisation = apps.get_model(settings.A4_ORGANISATIONS_MODEL)\nUser = auth.get_user_model()\n\n\nclass NewsletterEmail(ReportToAdminEmailMixin, Email):\n template_name = 'meinberlin_newsletters/emails/newsletter_email'\n\n def dispatch(self, object, *args, **kwargs):\n organisation_pk = kwargs.pop('organisation_pk', None)\n organisation = None\n if organisation_pk:\n organisation = Organisation.objects.get(pk=organisation_pk)\n kwargs['organisation'] = organisation\n\n return super().dispatch(object, *args, **kwargs)\n\n def get_reply_to(self):\n return ['{} <{}>'.format(self.object.sender_name, self.object.sender)]\n\n def get_receivers(self):\n return User.objects\\\n .filter(id__in=self.kwargs['participant_ids'])\\\n .filter(get_newsletters=True)\\\n .filter(is_active=True)\\\n .distinct()\n\n def get_attachments(self):\n attachments = super().get_attachments()\n\n organisation = self.kwargs['organisation']\n if organisation and organisation.logo:\n f = open(organisation.logo.path, 'rb')\n logo = MIMEImage(f.read())\n logo.add_header('Content-ID', '<{}>'.format('organisation_logo'))\n attachments += [logo]\n\n return attachments\n\n\nclass NewsletterEmailAll(NewsletterEmail):\n\n def get_receivers(self):\n return User.objects\\\n .filter(is_active=True)\\\n .distinct()\n", "path": "meinberlin/apps/newsletters/emails.py"}, {"content": "from django.contrib import admin\nfrom django.contrib import auth\nfrom django.contrib.auth.models import Group\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import models\nfrom .forms import UserAdminForm\n\n\nclass UserAdmin(auth.admin.UserAdmin):\n form = UserAdminForm\n fieldsets = (\n (None, {'fields': ('username', 'email', 'password', 'groups')}),\n (_('Permissions'), {'fields': ('is_staff', 'is_superuser')}),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username', 'email', 'password1', 'password2'),\n }),\n )\n readonly_fields = ('date_joined', 'last_login')\n list_display = (\n 'id', 'username', 'email', 'date_joined', 'last_login', 'is_staff',\n 'is_superuser'\n )\n list_filter = ('is_staff', 'is_superuser', 'last_login')\n search_fields = ('username', 'email', 'id')\n\n\nclass GroupAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {'fields': ('name', )}),\n )\n\n\nadmin.site.register(models.User, UserAdmin)\nadmin.site.unregister(Group)\nadmin.site.register(Group, GroupAdmin)\n", "path": "meinberlin/apps/users/admin.py"}]}
1,518
402
gh_patches_debug_20696
rasdani/github-patches
git_diff
DataDog__dd-trace-py-887
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tests.internal.runtime.test_runtime_metrics.TestRuntimeWorker.test_worker_metrics fails randomly ``` def test_worker_metrics(self): self.tracer.configure(collect_metrics=True) with self.override_global_tracer(self.tracer): self.tracer._dogstatsd_client = DogStatsd() self.tracer._dogstatsd_client.socket = FakeSocket() root = self.start_span('parent', service='parent') context = root.context self.start_span('child', service='child', child_of=context) self.worker = RuntimeWorker(self.tracer._dogstatsd_client) self.worker.start() self.worker.stop() # get all received metrics received = [] while True: new = self.tracer._dogstatsd_client.socket.recv() if not new: break received.append(new) # DEV: sleep since metrics will still be getting collected and written time.sleep(.5) # expect received all default metrics > self.assertEqual(len(received), len(DEFAULT_RUNTIME_METRICS)) E AssertionError: 0 != 10 tests/internal/runtime/test_runtime_metrics.py:75: AssertionError ``` https://circleci.com/gh/DataDog/dd-trace-py/114364 </issue> <code> [start of ddtrace/internal/runtime/runtime_metrics.py] 1 import threading 2 import time 3 import itertools 4 5 from ..logger import get_logger 6 from .constants import ( 7 DEFAULT_RUNTIME_METRICS, 8 DEFAULT_RUNTIME_TAGS, 9 ) 10 from .metric_collectors import ( 11 GCRuntimeMetricCollector, 12 PSUtilRuntimeMetricCollector, 13 ) 14 from .tag_collectors import ( 15 TracerTagCollector, 16 ) 17 18 log = get_logger(__name__) 19 20 21 class RuntimeCollectorsIterable(object): 22 def __init__(self, enabled=None): 23 self._enabled = enabled or self.ENABLED 24 # Initialize the collectors. 25 self._collectors = [c() for c in self.COLLECTORS] 26 27 def __iter__(self): 28 collected = ( 29 collector.collect(self._enabled) 30 for collector in self._collectors 31 ) 32 return itertools.chain.from_iterable(collected) 33 34 def __repr__(self): 35 return '{}(enabled={})'.format( 36 self.__class__.__name__, 37 self._enabled, 38 ) 39 40 41 class RuntimeTags(RuntimeCollectorsIterable): 42 ENABLED = DEFAULT_RUNTIME_TAGS 43 COLLECTORS = [ 44 TracerTagCollector, 45 ] 46 47 48 class RuntimeMetrics(RuntimeCollectorsIterable): 49 ENABLED = DEFAULT_RUNTIME_METRICS 50 COLLECTORS = [ 51 GCRuntimeMetricCollector, 52 PSUtilRuntimeMetricCollector, 53 ] 54 55 56 class RuntimeWorker(object): 57 """ Worker thread for collecting and writing runtime metrics to a DogStatsd 58 client. 59 """ 60 61 FLUSH_INTERVAL = 10 62 63 def __init__(self, statsd_client, flush_interval=None): 64 self._stay_alive = None 65 self._thread = None 66 self._flush_interval = flush_interval or self.FLUSH_INTERVAL 67 self._statsd_client = statsd_client 68 self._runtime_metrics = RuntimeMetrics() 69 70 def _target(self): 71 while self._stay_alive: 72 self.flush() 73 time.sleep(self._flush_interval) 74 75 def start(self): 76 if not self._thread: 77 log.debug('Starting {}'.format(self)) 78 self._stay_alive = True 79 self._thread = threading.Thread(target=self._target) 80 self._thread.setDaemon(True) 81 self._thread.start() 82 83 def stop(self): 84 if self._thread and self._stay_alive: 85 log.debug('Stopping {}'.format(self)) 86 self._stay_alive = False 87 88 def _write_metric(self, key, value): 89 log.debug('Writing metric {}:{}'.format(key, value)) 90 self._statsd_client.gauge(key, value) 91 92 def flush(self): 93 if not self._statsd_client: 94 log.warn('Attempted flush with uninitialized or failed statsd client') 95 return 96 97 for key, value in self._runtime_metrics: 98 self._write_metric(key, value) 99 100 def reset(self): 101 self._runtime_metrics = RuntimeMetrics() 102 103 def __repr__(self): 104 return '{}(runtime_metrics={})'.format( 105 self.__class__.__name__, 106 self._runtime_metrics, 107 ) 108 [end of ddtrace/internal/runtime/runtime_metrics.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/internal/runtime/runtime_metrics.py b/ddtrace/internal/runtime/runtime_metrics.py --- a/ddtrace/internal/runtime/runtime_metrics.py +++ b/ddtrace/internal/runtime/runtime_metrics.py @@ -60,10 +60,10 @@ FLUSH_INTERVAL = 10 - def __init__(self, statsd_client, flush_interval=None): + def __init__(self, statsd_client, flush_interval=FLUSH_INTERVAL): self._stay_alive = None self._thread = None - self._flush_interval = flush_interval or self.FLUSH_INTERVAL + self._flush_interval = flush_interval self._statsd_client = statsd_client self._runtime_metrics = RuntimeMetrics() @@ -85,6 +85,10 @@ log.debug('Stopping {}'.format(self)) self._stay_alive = False + def join(self, timeout=None): + if self._thread: + return self._thread.join(timeout) + def _write_metric(self, key, value): log.debug('Writing metric {}:{}'.format(key, value)) self._statsd_client.gauge(key, value)
{"golden_diff": "diff --git a/ddtrace/internal/runtime/runtime_metrics.py b/ddtrace/internal/runtime/runtime_metrics.py\n--- a/ddtrace/internal/runtime/runtime_metrics.py\n+++ b/ddtrace/internal/runtime/runtime_metrics.py\n@@ -60,10 +60,10 @@\n \n FLUSH_INTERVAL = 10\n \n- def __init__(self, statsd_client, flush_interval=None):\n+ def __init__(self, statsd_client, flush_interval=FLUSH_INTERVAL):\n self._stay_alive = None\n self._thread = None\n- self._flush_interval = flush_interval or self.FLUSH_INTERVAL\n+ self._flush_interval = flush_interval\n self._statsd_client = statsd_client\n self._runtime_metrics = RuntimeMetrics()\n \n@@ -85,6 +85,10 @@\n log.debug('Stopping {}'.format(self))\n self._stay_alive = False\n \n+ def join(self, timeout=None):\n+ if self._thread:\n+ return self._thread.join(timeout)\n+\n def _write_metric(self, key, value):\n log.debug('Writing metric {}:{}'.format(key, value))\n self._statsd_client.gauge(key, value)\n", "issue": "tests.internal.runtime.test_runtime_metrics.TestRuntimeWorker.test_worker_metrics fails randomly\n```\r\n def test_worker_metrics(self):\r\n self.tracer.configure(collect_metrics=True)\r\n \r\n with self.override_global_tracer(self.tracer):\r\n self.tracer._dogstatsd_client = DogStatsd()\r\n self.tracer._dogstatsd_client.socket = FakeSocket()\r\n \r\n root = self.start_span('parent', service='parent')\r\n context = root.context\r\n self.start_span('child', service='child', child_of=context)\r\n \r\n self.worker = RuntimeWorker(self.tracer._dogstatsd_client)\r\n self.worker.start()\r\n self.worker.stop()\r\n \r\n # get all received metrics\r\n received = []\r\n while True:\r\n new = self.tracer._dogstatsd_client.socket.recv()\r\n if not new:\r\n break\r\n \r\n received.append(new)\r\n # DEV: sleep since metrics will still be getting collected and written\r\n time.sleep(.5)\r\n \r\n # expect received all default metrics\r\n> self.assertEqual(len(received), len(DEFAULT_RUNTIME_METRICS))\r\nE AssertionError: 0 != 10\r\n\r\ntests/internal/runtime/test_runtime_metrics.py:75: AssertionError\r\n```\r\n\r\nhttps://circleci.com/gh/DataDog/dd-trace-py/114364\n", "before_files": [{"content": "import threading\nimport time\nimport itertools\n\nfrom ..logger import get_logger\nfrom .constants import (\n DEFAULT_RUNTIME_METRICS,\n DEFAULT_RUNTIME_TAGS,\n)\nfrom .metric_collectors import (\n GCRuntimeMetricCollector,\n PSUtilRuntimeMetricCollector,\n)\nfrom .tag_collectors import (\n TracerTagCollector,\n)\n\nlog = get_logger(__name__)\n\n\nclass RuntimeCollectorsIterable(object):\n def __init__(self, enabled=None):\n self._enabled = enabled or self.ENABLED\n # Initialize the collectors.\n self._collectors = [c() for c in self.COLLECTORS]\n\n def __iter__(self):\n collected = (\n collector.collect(self._enabled)\n for collector in self._collectors\n )\n return itertools.chain.from_iterable(collected)\n\n def __repr__(self):\n return '{}(enabled={})'.format(\n self.__class__.__name__,\n self._enabled,\n )\n\n\nclass RuntimeTags(RuntimeCollectorsIterable):\n ENABLED = DEFAULT_RUNTIME_TAGS\n COLLECTORS = [\n TracerTagCollector,\n ]\n\n\nclass RuntimeMetrics(RuntimeCollectorsIterable):\n ENABLED = DEFAULT_RUNTIME_METRICS\n COLLECTORS = [\n GCRuntimeMetricCollector,\n PSUtilRuntimeMetricCollector,\n ]\n\n\nclass RuntimeWorker(object):\n \"\"\" Worker thread for collecting and writing runtime metrics to a DogStatsd\n client.\n \"\"\"\n\n FLUSH_INTERVAL = 10\n\n def __init__(self, statsd_client, flush_interval=None):\n self._stay_alive = None\n self._thread = None\n self._flush_interval = flush_interval or self.FLUSH_INTERVAL\n self._statsd_client = statsd_client\n self._runtime_metrics = RuntimeMetrics()\n\n def _target(self):\n while self._stay_alive:\n self.flush()\n time.sleep(self._flush_interval)\n\n def start(self):\n if not self._thread:\n log.debug('Starting {}'.format(self))\n self._stay_alive = True\n self._thread = threading.Thread(target=self._target)\n self._thread.setDaemon(True)\n self._thread.start()\n\n def stop(self):\n if self._thread and self._stay_alive:\n log.debug('Stopping {}'.format(self))\n self._stay_alive = False\n\n def _write_metric(self, key, value):\n log.debug('Writing metric {}:{}'.format(key, value))\n self._statsd_client.gauge(key, value)\n\n def flush(self):\n if not self._statsd_client:\n log.warn('Attempted flush with uninitialized or failed statsd client')\n return\n\n for key, value in self._runtime_metrics:\n self._write_metric(key, value)\n\n def reset(self):\n self._runtime_metrics = RuntimeMetrics()\n\n def __repr__(self):\n return '{}(runtime_metrics={})'.format(\n self.__class__.__name__,\n self._runtime_metrics,\n )\n", "path": "ddtrace/internal/runtime/runtime_metrics.py"}]}
1,663
254
gh_patches_debug_14602
rasdani/github-patches
git_diff
akvo__akvo-rsr-3173
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error in disaggregation view The PGView for disaggregation is incorrect. It includes data from all updates rather than just approved updates. </issue> <code> [start of akvo/rsr/models/result/indicator_period_aggregation.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo Reporting is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 from django.db import models 8 9 from django_pgviews import view as pg 10 11 12 ACTUAL_VALUE_SQL = """ 13 SELECT 14 -- row_number() OVER... creates an artificial "pk" column, without which Django will protest 15 row_number() OVER (ORDER BY period.id) AS id, 16 period.id AS period_id, 17 indicator.measure as measure, 18 sum((update.value) :: DECIMAL(20,2)) AS value, 19 sum((update.numerator) :: DECIMAL(20,2)) AS numerator, 20 sum((update.denominator) :: DECIMAL(20,2)) AS denominator 21 FROM 22 rsr_indicatorperiod period, 23 rsr_indicator indicator, 24 rsr_indicatorperioddata update 25 WHERE 26 ( 27 (((indicator.id = period.indicator_id) AND 28 (period.id = update.period_id)) AND 29 ((update.status) :: TEXT = 'A' :: TEXT)) AND 30 ((update.value) :: TEXT ~ '^\d+\.?\d{0,2}$' :: TEXT OR update.value IS NULL) 31 ) 32 GROUP BY period.id, indicator.measure; 33 """ 34 35 36 class PeriodActualValue(pg.View): 37 # on_delete=models.DO_NOTHING is needed to prevent problems with PG trying to delete views' data 38 period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING) 39 measure = models.CharField(max_length=1) 40 value = models.IntegerField() 41 numerator = models.IntegerField() 42 denominator = models.IntegerField() 43 44 sql = ACTUAL_VALUE_SQL 45 46 class Meta: 47 app_label = 'rsr' 48 db_table = 'rsr_indicator_period_actual_value' 49 managed = False 50 51 52 DISAGG_SQL = """ 53 WITH aggregated_disaggs AS ( 54 SELECT 55 dimension_id, 56 sum(("value") :: DECIMAL(20,2)) AS value, 57 sum((numerator) :: DECIMAL(20,2)) AS numerator, 58 sum((denominator) :: DECIMAL(20,2)) AS denominator 59 FROM 60 rsr_disaggregation 61 GROUP BY 62 dimension_id 63 ), 64 period_disaggs AS ( 65 SELECT DISTINCT 66 indicator.id AS indicator_id, 67 period.id AS period_id, 68 dimension.name AS dimension_name, 69 dimension.value AS dimension_value, 70 agg.value, 71 agg.numerator, 72 agg.denominator 73 FROM 74 rsr_indicator indicator, 75 rsr_indicatorperiod period, 76 rsr_indicatorperioddata update, 77 aggregated_disaggs agg, 78 rsr_indicatordimension dimension 79 WHERE 80 indicator.id = period.indicator_id AND 81 period.id = update.period_id AND 82 indicator.id = dimension.indicator_id AND 83 dimension.id = agg.dimension_id 84 ) 85 SELECT 86 row_number() OVER (ORDER BY indicator_id) AS id, 87 * 88 FROM period_disaggs 89 """ 90 91 92 class PeriodDisaggregation(pg.View): 93 indicator = models.ForeignKey('Indicator', on_delete=models.DO_NOTHING) 94 period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING) 95 dimension_name = models.CharField(max_length=100) 96 dimension_value = models.CharField(max_length=100) 97 value = models.IntegerField() 98 numerator = models.IntegerField() 99 denominator = models.IntegerField() 100 101 sql = DISAGG_SQL 102 103 class Meta: 104 app_label = 'rsr' 105 db_table = 'rsr_indicator_period_disaggregation' 106 managed = False 107 [end of akvo/rsr/models/result/indicator_period_aggregation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rsr/models/result/indicator_period_aggregation.py b/akvo/rsr/models/result/indicator_period_aggregation.py --- a/akvo/rsr/models/result/indicator_period_aggregation.py +++ b/akvo/rsr/models/result/indicator_period_aggregation.py @@ -52,12 +52,16 @@ DISAGG_SQL = """ WITH aggregated_disaggs AS ( SELECT - dimension_id, - sum(("value") :: DECIMAL(20,2)) AS value, - sum((numerator) :: DECIMAL(20,2)) AS numerator, - sum((denominator) :: DECIMAL(20,2)) AS denominator + disagg.dimension_id AS dimension_id, + sum((disagg.value) :: DECIMAL(20,2)) AS value, + sum((disagg.numerator) :: DECIMAL(20,2)) AS numerator, + sum((disagg.denominator) :: DECIMAL(20,2)) AS denominator FROM - rsr_disaggregation + rsr_disaggregation disagg, + rsr_indicatorperioddata "update" + WHERE + update.status = 'A' AND + disagg.update_id = update.id GROUP BY dimension_id ),
{"golden_diff": "diff --git a/akvo/rsr/models/result/indicator_period_aggregation.py b/akvo/rsr/models/result/indicator_period_aggregation.py\n--- a/akvo/rsr/models/result/indicator_period_aggregation.py\n+++ b/akvo/rsr/models/result/indicator_period_aggregation.py\n@@ -52,12 +52,16 @@\n DISAGG_SQL = \"\"\"\n WITH aggregated_disaggs AS (\n SELECT\n- dimension_id,\n- sum((\"value\") :: DECIMAL(20,2)) AS value,\n- sum((numerator) :: DECIMAL(20,2)) AS numerator,\n- sum((denominator) :: DECIMAL(20,2)) AS denominator\n+ disagg.dimension_id AS dimension_id,\n+ sum((disagg.value) :: DECIMAL(20,2)) AS value,\n+ sum((disagg.numerator) :: DECIMAL(20,2)) AS numerator,\n+ sum((disagg.denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n- rsr_disaggregation\n+ rsr_disaggregation disagg,\n+ rsr_indicatorperioddata \"update\"\n+ WHERE\n+ update.status = 'A' AND\n+ disagg.update_id = update.id\n GROUP BY\n dimension_id\n ),\n", "issue": "Error in disaggregation view\nThe PGView for disaggregation is incorrect. It includes data from all updates rather than just approved updates.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db import models\n\nfrom django_pgviews import view as pg\n\n\nACTUAL_VALUE_SQL = \"\"\"\n SELECT\n -- row_number() OVER... creates an artificial \"pk\" column, without which Django will protest\n row_number() OVER (ORDER BY period.id) AS id,\n period.id AS period_id,\n indicator.measure as measure,\n sum((update.value) :: DECIMAL(20,2)) AS value,\n sum((update.numerator) :: DECIMAL(20,2)) AS numerator,\n sum((update.denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n rsr_indicatorperiod period,\n rsr_indicator indicator,\n rsr_indicatorperioddata update\n WHERE\n (\n (((indicator.id = period.indicator_id) AND\n (period.id = update.period_id)) AND\n ((update.status) :: TEXT = 'A' :: TEXT)) AND\n ((update.value) :: TEXT ~ '^\\d+\\.?\\d{0,2}$' :: TEXT OR update.value IS NULL)\n )\n GROUP BY period.id, indicator.measure;\n\"\"\"\n\n\nclass PeriodActualValue(pg.View):\n # on_delete=models.DO_NOTHING is needed to prevent problems with PG trying to delete views' data\n period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)\n measure = models.CharField(max_length=1)\n value = models.IntegerField()\n numerator = models.IntegerField()\n denominator = models.IntegerField()\n\n sql = ACTUAL_VALUE_SQL\n\n class Meta:\n app_label = 'rsr'\n db_table = 'rsr_indicator_period_actual_value'\n managed = False\n\n\nDISAGG_SQL = \"\"\"\n WITH aggregated_disaggs AS (\n SELECT\n dimension_id,\n sum((\"value\") :: DECIMAL(20,2)) AS value,\n sum((numerator) :: DECIMAL(20,2)) AS numerator,\n sum((denominator) :: DECIMAL(20,2)) AS denominator\n FROM\n rsr_disaggregation\n GROUP BY\n dimension_id\n ),\n period_disaggs AS (\n SELECT DISTINCT\n indicator.id AS indicator_id,\n period.id AS period_id,\n dimension.name AS dimension_name,\n dimension.value AS dimension_value,\n agg.value,\n agg.numerator,\n agg.denominator\n FROM\n rsr_indicator indicator,\n rsr_indicatorperiod period,\n rsr_indicatorperioddata update,\n aggregated_disaggs agg,\n rsr_indicatordimension dimension\n WHERE\n indicator.id = period.indicator_id AND\n period.id = update.period_id AND\n indicator.id = dimension.indicator_id AND\n dimension.id = agg.dimension_id\n )\n SELECT\n row_number() OVER (ORDER BY indicator_id) AS id,\n *\n FROM period_disaggs\n\"\"\"\n\n\nclass PeriodDisaggregation(pg.View):\n indicator = models.ForeignKey('Indicator', on_delete=models.DO_NOTHING)\n period = models.ForeignKey('IndicatorPeriod', on_delete=models.DO_NOTHING)\n dimension_name = models.CharField(max_length=100)\n dimension_value = models.CharField(max_length=100)\n value = models.IntegerField()\n numerator = models.IntegerField()\n denominator = models.IntegerField()\n\n sql = DISAGG_SQL\n\n class Meta:\n app_label = 'rsr'\n db_table = 'rsr_indicator_period_disaggregation'\n managed = False\n", "path": "akvo/rsr/models/result/indicator_period_aggregation.py"}]}
1,596
294
gh_patches_debug_15246
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1194
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update baggage header name As per the spec, baggage propagation must use the header as specified in the w3c baggage specification https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/baggage/api.md#baggage-propagation </issue> <code> [start of opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 import typing 16 import urllib.parse 17 18 from opentelemetry import baggage 19 from opentelemetry.context import get_current 20 from opentelemetry.context.context import Context 21 from opentelemetry.trace.propagation import textmap 22 23 24 class BaggagePropagator(textmap.TextMapPropagator): 25 MAX_HEADER_LENGTH = 8192 26 MAX_PAIR_LENGTH = 4096 27 MAX_PAIRS = 180 28 _BAGGAGE_HEADER_NAME = "otcorrelations" 29 30 def extract( 31 self, 32 get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT], 33 carrier: textmap.TextMapPropagatorT, 34 context: typing.Optional[Context] = None, 35 ) -> Context: 36 """Extract Baggage from the carrier. 37 38 See 39 `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract` 40 """ 41 42 if context is None: 43 context = get_current() 44 45 header = _extract_first_element( 46 get_from_carrier(carrier, self._BAGGAGE_HEADER_NAME) 47 ) 48 49 if not header or len(header) > self.MAX_HEADER_LENGTH: 50 return context 51 52 baggage_entries = header.split(",") 53 total_baggage_entries = self.MAX_PAIRS 54 for entry in baggage_entries: 55 if total_baggage_entries <= 0: 56 return context 57 total_baggage_entries -= 1 58 if len(entry) > self.MAX_PAIR_LENGTH: 59 continue 60 try: 61 name, value = entry.split("=", 1) 62 except Exception: # pylint: disable=broad-except 63 continue 64 context = baggage.set_baggage( 65 urllib.parse.unquote(name).strip(), 66 urllib.parse.unquote(value).strip(), 67 context=context, 68 ) 69 70 return context 71 72 def inject( 73 self, 74 set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT], 75 carrier: textmap.TextMapPropagatorT, 76 context: typing.Optional[Context] = None, 77 ) -> None: 78 """Injects Baggage into the carrier. 79 80 See 81 `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject` 82 """ 83 baggage_entries = baggage.get_all(context=context) 84 if not baggage_entries: 85 return 86 87 baggage_string = _format_baggage(baggage_entries) 88 set_in_carrier( 89 carrier, self._BAGGAGE_HEADER_NAME, baggage_string, 90 ) 91 92 93 def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str: 94 return ",".join( 95 key + "=" + urllib.parse.quote_plus(str(value)) 96 for key, value in baggage_entries.items() 97 ) 98 99 100 def _extract_first_element( 101 items: typing.Iterable[textmap.TextMapPropagatorT], 102 ) -> typing.Optional[textmap.TextMapPropagatorT]: 103 if items is None: 104 return None 105 return next(iter(items), None) 106 [end of opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py --- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py +++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py @@ -25,7 +25,7 @@ MAX_HEADER_LENGTH = 8192 MAX_PAIR_LENGTH = 4096 MAX_PAIRS = 180 - _BAGGAGE_HEADER_NAME = "otcorrelations" + _BAGGAGE_HEADER_NAME = "baggage" def extract( self, @@ -85,9 +85,7 @@ return baggage_string = _format_baggage(baggage_entries) - set_in_carrier( - carrier, self._BAGGAGE_HEADER_NAME, baggage_string, - ) + set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string) def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n@@ -25,7 +25,7 @@\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n- _BAGGAGE_HEADER_NAME = \"otcorrelations\"\n+ _BAGGAGE_HEADER_NAME = \"baggage\"\n \n def extract(\n self,\n@@ -85,9 +85,7 @@\n return\n \n baggage_string = _format_baggage(baggage_entries)\n- set_in_carrier(\n- carrier, self._BAGGAGE_HEADER_NAME, baggage_string,\n- )\n+ set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n \n \n def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n", "issue": "Update baggage header name\nAs per the spec, baggage propagation must use the header as specified in the w3c baggage specification https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/baggage/api.md#baggage-propagation\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nimport urllib.parse\n\nfrom opentelemetry import baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.trace.propagation import textmap\n\n\nclass BaggagePropagator(textmap.TextMapPropagator):\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"otcorrelations\"\n\n def extract(\n self,\n get_from_carrier: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.trace.propagation.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n get_from_carrier(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self.MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self.MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self.MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = baggage.set_baggage(\n urllib.parse.unquote(name).strip(),\n urllib.parse.unquote(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.trace.propagation.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = baggage.get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n set_in_carrier(\n carrier, self._BAGGAGE_HEADER_NAME, baggage_string,\n )\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Iterable[textmap.TextMapPropagatorT],\n) -> typing.Optional[textmap.TextMapPropagatorT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}]}
1,585
261
gh_patches_debug_456
rasdani/github-patches
git_diff
dbt-labs__dbt-core-2537
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python 3.6.2 doesn't work with dbt 0.17.0 ### Describe the bug Running dbt on python <= 3.6.2 results in an error that `name 'TimestampSnapshotConfig' is not defined`. 3.6.3 is unaffected. ### Steps To Reproduce Install python 3.6.2 Install dbt Try to use dbt ### Expected behavior dbt should run, not crash, etc ### System information **Which database are you using dbt with?** Any **The output of `dbt --version`:** ``` 0.17.0 ``` **The operating system you're using:** macos, linux **The output of `python --version`:** `Python 3.6.2` </issue> <code> [start of core/setup.py] 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 from setuptools import setup 6 try: 7 from setuptools import find_namespace_packages 8 except ImportError: 9 # the user has a downlevel version of setuptools. 10 print('Error: dbt requires setuptools v40.1.0 or higher.') 11 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' 12 'and try again') 13 sys.exit(1) 14 15 16 def read(fname): 17 return open(os.path.join(os.path.dirname(__file__), fname)).read() 18 19 20 package_name = "dbt-core" 21 package_version = "0.17.1a1" 22 description = """dbt (data build tool) is a command line tool that helps \ 23 analysts and engineers transform data in their warehouse more effectively""" 24 25 26 setup( 27 name=package_name, 28 version=package_version, 29 description=description, 30 long_description=description, 31 author="Fishtown Analytics", 32 author_email="[email protected]", 33 url="https://github.com/fishtown-analytics/dbt", 34 packages=find_namespace_packages(include=['dbt', 'dbt.*']), 35 package_data={ 36 'dbt': [ 37 'include/index.html', 38 'include/global_project/dbt_project.yml', 39 'include/global_project/docs/*.md', 40 'include/global_project/macros/*.sql', 41 'include/global_project/macros/**/*.sql', 42 'include/global_project/macros/**/**/*.sql', 43 'py.typed', 44 ] 45 }, 46 test_suite='test', 47 entry_points={ 48 'console_scripts': [ 49 'dbt = dbt.main:main', 50 ], 51 }, 52 scripts=[ 53 'scripts/dbt', 54 ], 55 install_requires=[ 56 'Jinja2==2.11.2', 57 'PyYAML>=3.11', 58 'sqlparse>=0.2.3,<0.4', 59 'networkx>=2.3,<3', 60 'minimal-snowplow-tracker==0.0.2', 61 'colorama>=0.3.9,<0.5', 62 'agate>=1.6,<2', 63 'isodate>=0.6,<0.7', 64 'json-rpc>=1.12,<2', 65 'werkzeug>=0.15,<0.17', 66 'dataclasses==0.6;python_version<"3.7"', 67 'hologram==0.0.7', 68 'logbook>=1.5,<1.6', 69 'typing-extensions>=3.7.4,<3.8', 70 # the following are all to match snowflake-connector-python 71 'requests>=2.18.0,<2.23.0', 72 'idna<2.9', 73 'cffi>=1.9,<1.14', 74 ], 75 zip_safe=False, 76 classifiers=[ 77 'Development Status :: 5 - Production/Stable', 78 79 'License :: OSI Approved :: Apache Software License', 80 81 'Operating System :: Microsoft :: Windows', 82 'Operating System :: MacOS :: MacOS X', 83 'Operating System :: POSIX :: Linux', 84 85 'Programming Language :: Python :: 3.6', 86 'Programming Language :: Python :: 3.7', 87 'Programming Language :: Python :: 3.8', 88 ], 89 python_requires=">=3.6.2", 90 ) 91 [end of core/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/setup.py b/core/setup.py --- a/core/setup.py +++ b/core/setup.py @@ -86,5 +86,5 @@ 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], - python_requires=">=3.6.2", + python_requires=">=3.6.3", )
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -86,5 +86,5 @@\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n- python_requires=\">=3.6.2\",\n+ python_requires=\">=3.6.3\",\n )\n", "issue": "Python 3.6.2 doesn't work with dbt 0.17.0\n### Describe the bug\r\nRunning dbt on python <= 3.6.2 results in an error that `name 'TimestampSnapshotConfig' is not defined`. 3.6.3 is unaffected.\r\n\r\n### Steps To Reproduce\r\nInstall python 3.6.2\r\nInstall dbt\r\nTry to use dbt\r\n\r\n### Expected behavior\r\ndbt should run, not crash, etc\r\n\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\nAny\r\n\r\n**The output of `dbt --version`:**\r\n```\r\n0.17.0\r\n```\r\n\r\n**The operating system you're using:**\r\nmacos, linux\r\n\r\n**The output of `python --version`:**\r\n`Python 3.6.2`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.17.1a1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.5',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<0.17',\n 'dataclasses==0.6;python_version<\"3.7\"',\n 'hologram==0.0.7',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.23.0',\n 'idna<2.9',\n 'cffi>=1.9,<1.14',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "core/setup.py"}]}
1,604
88
gh_patches_debug_31382
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2814
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider pricerite is broken During the global build at 2021-05-26-14-42-23, spider **pricerite** failed with **0 features** and **2 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/pricerite.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson)) </issue> <code> [start of locations/spiders/pricerite.py] 1 # -*- coding: utf-8 -*- 2 import json 3 import re 4 5 import scrapy 6 7 from locations.items import GeojsonPointItem 8 from locations.hours import OpeningHours 9 10 11 class PriceRiteSpider(scrapy.Spider): 12 name = "pricerite" 13 item_attributes = { 'brand': "PriceRite" } 14 allowed_domains = ["priceritesupermarkets.com"] 15 16 start_urls = ( 17 "https://www.priceritesupermarkets.com/locations/", 18 ) 19 20 def parse(self, response): 21 script = response.xpath('//script[contains(text(), "var stores")]').extract_first() 22 stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0]) 23 24 for store in stores: 25 properties = { 26 "ref": store["storeNumber"], 27 "name": store["name"], 28 "lat": store["latitude"], 29 "lon": store["longitude"], 30 "addr_full": store["address1"], 31 "city": store["city"], 32 "state": store["state"], 33 "postcode": store["zipCode"], 34 } 35 36 yield GeojsonPointItem(**properties) 37 38 [end of locations/spiders/pricerite.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/pricerite.py b/locations/spiders/pricerite.py --- a/locations/spiders/pricerite.py +++ b/locations/spiders/pricerite.py @@ -5,32 +5,36 @@ import scrapy from locations.items import GeojsonPointItem -from locations.hours import OpeningHours class PriceRiteSpider(scrapy.Spider): name = "pricerite" item_attributes = { 'brand': "PriceRite" } - allowed_domains = ["priceritesupermarkets.com"] + allowed_domains = ["priceritemarketplace.com"] start_urls = ( - "https://www.priceritesupermarkets.com/locations/", + "https://www.priceritemarketplace.com/", ) def parse(self, response): - script = response.xpath('//script[contains(text(), "var stores")]').extract_first() - stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0]) + script = response.xpath('//script[contains(text(), "__PRELOADED_STATE__")]/text()').extract_first() + script = script[script.index('{'):] + stores = json.loads(script)['stores']['availablePlanningStores']['items'] for store in stores: + ref = store["retailerStoreId"] properties = { - "ref": store["storeNumber"], + "ref": ref, + "website": f"https://www.priceritemarketplace.com/sm/planning/rsid/{ref}", "name": store["name"], - "lat": store["latitude"], - "lon": store["longitude"], - "addr_full": store["address1"], + "lat": store["location"]["latitude"], + "lon": store["location"]["longitude"], + "addr_full": store["addressLine1"], "city": store["city"], - "state": store["state"], - "postcode": store["zipCode"], + "state": store["countyProvinceState"], + "postcode": store["postCode"], + "phone": store["phone"], + "opening_hours": store["openingHours"], } yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/pricerite.py b/locations/spiders/pricerite.py\n--- a/locations/spiders/pricerite.py\n+++ b/locations/spiders/pricerite.py\n@@ -5,32 +5,36 @@\n import scrapy\n \n from locations.items import GeojsonPointItem\n-from locations.hours import OpeningHours\n \n \n class PriceRiteSpider(scrapy.Spider):\n name = \"pricerite\"\n item_attributes = { 'brand': \"PriceRite\" }\n- allowed_domains = [\"priceritesupermarkets.com\"]\n+ allowed_domains = [\"priceritemarketplace.com\"]\n \n start_urls = (\n- \"https://www.priceritesupermarkets.com/locations/\",\n+ \"https://www.priceritemarketplace.com/\",\n )\n \n def parse(self, response):\n- script = response.xpath('//script[contains(text(), \"var stores\")]').extract_first()\n- stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0])\n+ script = response.xpath('//script[contains(text(), \"__PRELOADED_STATE__\")]/text()').extract_first()\n+ script = script[script.index('{'):]\n+ stores = json.loads(script)['stores']['availablePlanningStores']['items']\n \n for store in stores:\n+ ref = store[\"retailerStoreId\"]\n properties = {\n- \"ref\": store[\"storeNumber\"],\n+ \"ref\": ref,\n+ \"website\": f\"https://www.priceritemarketplace.com/sm/planning/rsid/{ref}\",\n \"name\": store[\"name\"],\n- \"lat\": store[\"latitude\"],\n- \"lon\": store[\"longitude\"],\n- \"addr_full\": store[\"address1\"],\n+ \"lat\": store[\"location\"][\"latitude\"],\n+ \"lon\": store[\"location\"][\"longitude\"],\n+ \"addr_full\": store[\"addressLine1\"],\n \"city\": store[\"city\"],\n- \"state\": store[\"state\"],\n- \"postcode\": store[\"zipCode\"],\n+ \"state\": store[\"countyProvinceState\"],\n+ \"postcode\": store[\"postCode\"],\n+ \"phone\": store[\"phone\"],\n+ \"opening_hours\": store[\"openingHours\"],\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider pricerite is broken\nDuring the global build at 2021-05-26-14-42-23, spider **pricerite** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/pricerite.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/pricerite.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PriceRiteSpider(scrapy.Spider):\n name = \"pricerite\"\n item_attributes = { 'brand': \"PriceRite\" }\n allowed_domains = [\"priceritesupermarkets.com\"]\n\n start_urls = (\n \"https://www.priceritesupermarkets.com/locations/\",\n )\n\n def parse(self, response):\n script = response.xpath('//script[contains(text(), \"var stores\")]').extract_first()\n stores = json.loads(re.search(r'var stores = (.*?);', script).groups()[0])\n\n for store in stores:\n properties = {\n \"ref\": store[\"storeNumber\"],\n \"name\": store[\"name\"],\n \"lat\": store[\"latitude\"],\n \"lon\": store[\"longitude\"],\n \"addr_full\": store[\"address1\"],\n \"city\": store[\"city\"],\n \"state\": store[\"state\"],\n \"postcode\": store[\"zipCode\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n", "path": "locations/spiders/pricerite.py"}]}
1,037
490
gh_patches_debug_64324
rasdani/github-patches
git_diff
pex-tool__pex-630
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 1.6.0 On the docket: + (longterm fix) unhandled AttributeError during pex bootstrapping with PEX_PATH #598 + Vendor setuptools / wheel. #607 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '1.5.3' 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '1.5.3' +__version__ = '1.6.0'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.5.3'\n+__version__ = '1.6.0'\n", "issue": "Release 1.6.0\nOn the docket:\r\n+ (longterm fix) unhandled AttributeError during pex bootstrapping with PEX_PATH #598\r\n+ Vendor setuptools / wheel. #607\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.5.3'\n", "path": "pex/version.py"}]}
630
95
gh_patches_debug_28222
rasdani/github-patches
git_diff
scikit-hep__awkward-1650
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ak.fields (v2) passes a RecordArray's internal fields by reference Okay, so I hadn't noticed that Awkward v2's fields are passed by reference, which exposes them to the danger that someone might modify them downstream: v1: ```python >>> array = awkward.Array([{"x": 1, "y": 1.1}]) >>> fields = awkward.fields(array) >>> array <Array [{x: 1, y: 1.1}] type='1 * {"x": int64, "y": float64}'> >>> fields ['x', 'y'] >>> fields[0] = "XXX" >>> fields ['XXX', 'y'] >>> array <Array [{x: 1, y: 1.1}] type='1 * {"x": int64, "y": float64}'> ``` v2: ```python >>> array = awkward._v2.Array([{"x": 1, "y": 1.1}]) >>> fields = awkward._v2.fields(array) >>> array <Array [{x: 1, y: 1.1}] type='1 * {x: int64, y: float64}'> >>> fields ['x', 'y'] >>> fields[0] = "XXX" >>> fields ['XXX', 'y'] >>> array <Array [{XXX: 1, y: 1.1}] type='1 * {XXX: int64, y: float64}'> ``` It could be fixed [here, in Awkward](https://github.com/scikit-hep/awkward/blob/352b0dead74846ad2a56d385be4694ec87072a08/src/awkward/_v2/contents/recordarray.py#L162), or maybe [here](https://github.com/scikit-hep/awkward/blob/352b0dead74846ad2a56d385be4694ec87072a08/src/awkward/_v2/operations/ak_fields.py#L30) (to only suffer the list-copy when handing it off to a user, so that internal uses can still be by reference). I'll use this comment to open an issue in Awkward. Once `awkward.fields` is guarded, your `.copy()` can be removed, but it can also not be removed with no consequences but a little performance. _Originally posted by @jpivarski in https://github.com/scikit-hep/vector/pull/226#discussion_r958660705_ </issue> <code> [start of src/awkward/_v2/operations/ak_fields.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 3 import awkward as ak 4 5 np = ak.nplike.NumpyMetadata.instance() 6 7 8 def fields(array): 9 """ 10 Extracts record fields or tuple slot numbers from `array` (many types 11 supported, including all Awkward Arrays and Records). 12 13 If the array contains nested records, only the outermost record is 14 queried. If it contains tuples instead of records, this function outputs 15 string representations of integers, such as `"0"`, `"1"`, `"2"`, etc. 16 The records or tuples may be within multiple layers of nested lists. 17 18 If the array contains neither tuples nor records, this returns an empty 19 list. 20 """ 21 with ak._v2._util.OperationErrorContext( 22 "ak._v2.fields", 23 dict(array=array), 24 ): 25 return _impl(array) 26 27 28 def _impl(array): 29 layout = ak._v2.operations.to_layout(array, allow_record=True, allow_other=False) 30 return layout.fields 31 [end of src/awkward/_v2/operations/ak_fields.py] [start of src/awkward/_v2/operations/ak_parameters.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 3 import awkward as ak 4 5 np = ak.nplike.NumpyMetadata.instance() 6 7 8 def parameters(array): 9 """ 10 Extracts parameters from the outermost array node of `array` (many types 11 supported, including all Awkward Arrays and Records). 12 13 Parameters are a dict from str to JSON-like objects, usually strings. 14 Every #ak.layout.Content node has a different set of parameters. Some 15 key names are special, such as `"__record__"` and `"__array__"` that name 16 particular records and arrays as capable of supporting special behaviors. 17 18 See #ak.Array and #ak.behavior for a more complete description of 19 behaviors. 20 """ 21 with ak._v2._util.OperationErrorContext( 22 "ak._v2.parameters", 23 dict(array=array), 24 ): 25 return _impl(array) 26 27 28 def _impl(array): 29 if isinstance(array, (ak._v2.highlevel.Array, ak._v2.highlevel.Record)): 30 return array.layout.parameters 31 32 elif isinstance( 33 array, 34 (ak._v2.contents.Content, ak._v2.record.Record), 35 ): 36 return array.parameters 37 38 elif isinstance(array, ak._v2.highlevel.ArrayBuilder): 39 return array.snapshot().layout.parameters 40 41 elif isinstance(array, ak.layout.ArrayBuilder): 42 return array.snapshot().parameters 43 44 else: 45 return {} 46 [end of src/awkward/_v2/operations/ak_parameters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/awkward/_v2/operations/ak_fields.py b/src/awkward/_v2/operations/ak_fields.py --- a/src/awkward/_v2/operations/ak_fields.py +++ b/src/awkward/_v2/operations/ak_fields.py @@ -27,4 +27,4 @@ def _impl(array): layout = ak._v2.operations.to_layout(array, allow_record=True, allow_other=False) - return layout.fields + return layout.fields.copy() diff --git a/src/awkward/_v2/operations/ak_parameters.py b/src/awkward/_v2/operations/ak_parameters.py --- a/src/awkward/_v2/operations/ak_parameters.py +++ b/src/awkward/_v2/operations/ak_parameters.py @@ -1,5 +1,8 @@ # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE +import copy +import numbers + import awkward as ak np = ak.nplike.NumpyMetadata.instance() @@ -27,13 +30,13 @@ def _impl(array): if isinstance(array, (ak._v2.highlevel.Array, ak._v2.highlevel.Record)): - return array.layout.parameters + return _copy(array.layout.parameters) elif isinstance( array, (ak._v2.contents.Content, ak._v2.record.Record), ): - return array.parameters + return _copy(array.parameters) elif isinstance(array, ak._v2.highlevel.ArrayBuilder): return array.snapshot().layout.parameters @@ -43,3 +46,10 @@ else: return {} + + +def _copy(what): + if all(isinstance(x, (str, numbers.Real)) for x in what.values()): + return what.copy() + else: + return copy.deepcopy(what)
{"golden_diff": "diff --git a/src/awkward/_v2/operations/ak_fields.py b/src/awkward/_v2/operations/ak_fields.py\n--- a/src/awkward/_v2/operations/ak_fields.py\n+++ b/src/awkward/_v2/operations/ak_fields.py\n@@ -27,4 +27,4 @@\n \n def _impl(array):\n layout = ak._v2.operations.to_layout(array, allow_record=True, allow_other=False)\n- return layout.fields\n+ return layout.fields.copy()\ndiff --git a/src/awkward/_v2/operations/ak_parameters.py b/src/awkward/_v2/operations/ak_parameters.py\n--- a/src/awkward/_v2/operations/ak_parameters.py\n+++ b/src/awkward/_v2/operations/ak_parameters.py\n@@ -1,5 +1,8 @@\n # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n \n+import copy\n+import numbers\n+\n import awkward as ak\n \n np = ak.nplike.NumpyMetadata.instance()\n@@ -27,13 +30,13 @@\n \n def _impl(array):\n if isinstance(array, (ak._v2.highlevel.Array, ak._v2.highlevel.Record)):\n- return array.layout.parameters\n+ return _copy(array.layout.parameters)\n \n elif isinstance(\n array,\n (ak._v2.contents.Content, ak._v2.record.Record),\n ):\n- return array.parameters\n+ return _copy(array.parameters)\n \n elif isinstance(array, ak._v2.highlevel.ArrayBuilder):\n return array.snapshot().layout.parameters\n@@ -43,3 +46,10 @@\n \n else:\n return {}\n+\n+\n+def _copy(what):\n+ if all(isinstance(x, (str, numbers.Real)) for x in what.values()):\n+ return what.copy()\n+ else:\n+ return copy.deepcopy(what)\n", "issue": "ak.fields (v2) passes a RecordArray's internal fields by reference\nOkay, so I hadn't noticed that Awkward v2's fields are passed by reference, which exposes them to the danger that someone might modify them downstream:\r\n\r\nv1:\r\n\r\n```python\r\n>>> array = awkward.Array([{\"x\": 1, \"y\": 1.1}])\r\n>>> fields = awkward.fields(array)\r\n>>> array\r\n<Array [{x: 1, y: 1.1}] type='1 * {\"x\": int64, \"y\": float64}'>\r\n>>> fields\r\n['x', 'y']\r\n>>> fields[0] = \"XXX\"\r\n>>> fields\r\n['XXX', 'y']\r\n>>> array\r\n<Array [{x: 1, y: 1.1}] type='1 * {\"x\": int64, \"y\": float64}'>\r\n```\r\n\r\nv2:\r\n\r\n```python\r\n>>> array = awkward._v2.Array([{\"x\": 1, \"y\": 1.1}])\r\n>>> fields = awkward._v2.fields(array)\r\n>>> array\r\n<Array [{x: 1, y: 1.1}] type='1 * {x: int64, y: float64}'>\r\n>>> fields\r\n['x', 'y']\r\n>>> fields[0] = \"XXX\"\r\n>>> fields\r\n['XXX', 'y']\r\n>>> array\r\n<Array [{XXX: 1, y: 1.1}] type='1 * {XXX: int64, y: float64}'>\r\n```\r\n\r\nIt could be fixed [here, in Awkward](https://github.com/scikit-hep/awkward/blob/352b0dead74846ad2a56d385be4694ec87072a08/src/awkward/_v2/contents/recordarray.py#L162), or maybe [here](https://github.com/scikit-hep/awkward/blob/352b0dead74846ad2a56d385be4694ec87072a08/src/awkward/_v2/operations/ak_fields.py#L30) (to only suffer the list-copy when handing it off to a user, so that internal uses can still be by reference).\r\n\r\nI'll use this comment to open an issue in Awkward. Once `awkward.fields` is guarded, your `.copy()` can be removed, but it can also not be removed with no consequences but a little performance.\r\n\r\n_Originally posted by @jpivarski in https://github.com/scikit-hep/vector/pull/226#discussion_r958660705_\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\ndef fields(array):\n \"\"\"\n Extracts record fields or tuple slot numbers from `array` (many types\n supported, including all Awkward Arrays and Records).\n\n If the array contains nested records, only the outermost record is\n queried. If it contains tuples instead of records, this function outputs\n string representations of integers, such as `\"0\"`, `\"1\"`, `\"2\"`, etc.\n The records or tuples may be within multiple layers of nested lists.\n\n If the array contains neither tuples nor records, this returns an empty\n list.\n \"\"\"\n with ak._v2._util.OperationErrorContext(\n \"ak._v2.fields\",\n dict(array=array),\n ):\n return _impl(array)\n\n\ndef _impl(array):\n layout = ak._v2.operations.to_layout(array, allow_record=True, allow_other=False)\n return layout.fields\n", "path": "src/awkward/_v2/operations/ak_fields.py"}, {"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\ndef parameters(array):\n \"\"\"\n Extracts parameters from the outermost array node of `array` (many types\n supported, including all Awkward Arrays and Records).\n\n Parameters are a dict from str to JSON-like objects, usually strings.\n Every #ak.layout.Content node has a different set of parameters. Some\n key names are special, such as `\"__record__\"` and `\"__array__\"` that name\n particular records and arrays as capable of supporting special behaviors.\n\n See #ak.Array and #ak.behavior for a more complete description of\n behaviors.\n \"\"\"\n with ak._v2._util.OperationErrorContext(\n \"ak._v2.parameters\",\n dict(array=array),\n ):\n return _impl(array)\n\n\ndef _impl(array):\n if isinstance(array, (ak._v2.highlevel.Array, ak._v2.highlevel.Record)):\n return array.layout.parameters\n\n elif isinstance(\n array,\n (ak._v2.contents.Content, ak._v2.record.Record),\n ):\n return array.parameters\n\n elif isinstance(array, ak._v2.highlevel.ArrayBuilder):\n return array.snapshot().layout.parameters\n\n elif isinstance(array, ak.layout.ArrayBuilder):\n return array.snapshot().parameters\n\n else:\n return {}\n", "path": "src/awkward/_v2/operations/ak_parameters.py"}]}
1,856
429
gh_patches_debug_7366
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cfn-lint throws error when !ToJsonString contains int value ### CloudFormation Lint Version 0.76.2 ### What operating system are you using? Ubuntu ### Describe the bug Unexpected internal error during linting of rule E1031, involving `ToJsonString` of numerical value ``` 2023-04-06 20:20:31,922 - cfnlint - DEBUG - Completed linting of file: templates/lambda.yml E0002 Unknown exception while processing rule E1031: Traceback (most recent call last): File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py", line 320, in run_check return check(*args) File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py", line 44, in wrapper results = match_function(self, filename, cfn, *args, **kwargs) File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py", line 202, in matchall return self.match(cfn) # pylint: disable=E1102 File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/functions/ToJsonString.py", line 39, in match LanguageExtensions.validate_pseudo_parameters( File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py", line 32, in validate_pseudo_parameters ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val] File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py", line 32, in <listcomp> ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val] TypeError: argument of type 'int' is not iterable cfn-secrets-stack.yml:1:1 E0002 Unknown exception while processing rule E1031: Traceback (most recent call last): File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py", line 320, in run_check return check(*args) File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py", line 44, in wrapper results = match_function(self, filename, cfn, *args, **kwargs) File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py", line 202, in matchall return self.match(cfn) # pylint: disable=E1102 File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/functions/ToJsonString.py", line 39, in match LanguageExtensions.validate_pseudo_parameters( File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py", line 32, in validate_pseudo_parameters ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val] File "/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py", line 32, in <listcomp> ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val] TypeError: argument of type 'int' is not iterable cfn-secrets-stack.yml:1:1 ``` ### Expected behavior String quoted int should work as well as int, both are valid json ### Reproduction template This works ```yaml Resources: DeploymentProperties: Properties: Description: "testing" Name: 'Test' SecretString: !ToJsonString SomeNumber: '3' Type: AWS::SecretsManager::Secret Transform: AWS::LanguageExtensions ``` This does not, with the above error ```yaml Resources: DeploymentProperties: Properties: Description: "testing" Name: 'Test' SecretString: !ToJsonString SomeNumber: 3 Type: AWS::SecretsManager::Secret Transform: AWS::LanguageExtensions ``` </issue> <code> [start of src/cfnlint/languageExtensions.py] 1 from cfnlint.rules import RuleMatch 2 3 4 class LanguageExtensions: 5 """Class for a CloudFormation languageExtensions""" 6 7 def validate_transform_is_declared( 8 self, has_language_extensions_transform, matches, tree, intrinsic_function 9 ): 10 if not has_language_extensions_transform: 11 message = ( 12 "Missing Transform: Declare the AWS::LanguageExtensions Transform globally to enable use" 13 " of the intrinsic function " + intrinsic_function + " at {0}" 14 ) 15 matches.append(RuleMatch(tree[:], message.format("/".join(map(str, tree))))) 16 return matches 17 18 def validate_type(self, fn_object_val, matches, tree, intrinsic_function): 19 if not isinstance(fn_object_val, dict) and not isinstance(fn_object_val, list): 20 message = intrinsic_function + " needs a map or a list at {0}" 21 matches.append(RuleMatch(tree[:], message.format("/".join(map(str, tree))))) 22 elif len(fn_object_val) == 0: 23 message = "Invalid value for " + intrinsic_function + " for {0}" 24 matches.append(RuleMatch(tree[:], message.format("/".join(map(str, tree))))) 25 return matches 26 27 def validate_pseudo_parameters( 28 self, fn_object_val, matches, tree, pseudo_params, intrinsic_function 29 ): 30 if isinstance(fn_object_val, dict): 31 ref = "Ref" 32 ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val] 33 for ref in ref_list: 34 if ref in pseudo_params: 35 message = ( 36 intrinsic_function 37 + " does not support the pseudo parameter " 38 + ref 39 + " for {0}" 40 ) 41 matches.append( 42 RuleMatch(tree[:], message.format("/".join(map(str, tree)))) 43 ) 44 return matches 45 [end of src/cfnlint/languageExtensions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/languageExtensions.py b/src/cfnlint/languageExtensions.py --- a/src/cfnlint/languageExtensions.py +++ b/src/cfnlint/languageExtensions.py @@ -29,7 +29,11 @@ ): if isinstance(fn_object_val, dict): ref = "Ref" - ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val] + ref_list = [ + val[ref] + for _, val in fn_object_val.items() + if hasattr(val, "__iter__") and ref in val + ] for ref in ref_list: if ref in pseudo_params: message = (
{"golden_diff": "diff --git a/src/cfnlint/languageExtensions.py b/src/cfnlint/languageExtensions.py\n--- a/src/cfnlint/languageExtensions.py\n+++ b/src/cfnlint/languageExtensions.py\n@@ -29,7 +29,11 @@\n ):\n if isinstance(fn_object_val, dict):\n ref = \"Ref\"\n- ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val]\n+ ref_list = [\n+ val[ref]\n+ for _, val in fn_object_val.items()\n+ if hasattr(val, \"__iter__\") and ref in val\n+ ]\n for ref in ref_list:\n if ref in pseudo_params:\n message = (\n", "issue": "cfn-lint throws error when !ToJsonString contains int value\n### CloudFormation Lint Version\n\n0.76.2\n\n### What operating system are you using?\n\nUbuntu\n\n### Describe the bug\n\nUnexpected internal error during linting of rule E1031, involving `ToJsonString` of numerical value\r\n\r\n```\r\n2023-04-06 20:20:31,922 - cfnlint - DEBUG - Completed linting of file: templates/lambda.yml\r\nE0002 Unknown exception while processing rule E1031: Traceback (most recent call last):\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py\", line 320, in run_check\r\n return check(*args)\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py\", line 44, in wrapper\r\n results = match_function(self, filename, cfn, *args, **kwargs)\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py\", line 202, in matchall\r\n return self.match(cfn) # pylint: disable=E1102\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/functions/ToJsonString.py\", line 39, in match\r\n LanguageExtensions.validate_pseudo_parameters(\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py\", line 32, in validate_pseudo_parameters\r\n ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val]\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py\", line 32, in <listcomp>\r\n ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val]\r\nTypeError: argument of type 'int' is not iterable\r\n\r\ncfn-secrets-stack.yml:1:1\r\n\r\nE0002 Unknown exception while processing rule E1031: Traceback (most recent call last):\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py\", line 320, in run_check\r\n return check(*args)\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py\", line 44, in wrapper\r\n results = match_function(self, filename, cfn, *args, **kwargs)\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/__init__.py\", line 202, in matchall\r\n return self.match(cfn) # pylint: disable=E1102\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/rules/functions/ToJsonString.py\", line 39, in match\r\n LanguageExtensions.validate_pseudo_parameters(\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py\", line 32, in validate_pseudo_parameters\r\n ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val]\r\n File \"/home/kftse/anaconda3/envs/aws/lib/python3.10/site-packages/cfnlint/languageExtensions.py\", line 32, in <listcomp>\r\n ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val]\r\nTypeError: argument of type 'int' is not iterable\r\n\r\ncfn-secrets-stack.yml:1:1\r\n```\n\n### Expected behavior\n\nString quoted int should work as well as int, both are valid json\n\n### Reproduction template\n\nThis works\r\n```yaml\r\nResources:\r\n DeploymentProperties:\r\n Properties:\r\n Description: \"testing\"\r\n Name: 'Test'\r\n SecretString: !ToJsonString\r\n SomeNumber: '3'\r\n Type: AWS::SecretsManager::Secret\r\nTransform: AWS::LanguageExtensions\r\n```\r\n\r\nThis does not, with the above error\r\n```yaml\r\nResources:\r\n DeploymentProperties:\r\n Properties:\r\n Description: \"testing\"\r\n Name: 'Test'\r\n SecretString: !ToJsonString\r\n SomeNumber: 3\r\n Type: AWS::SecretsManager::Secret\r\nTransform: AWS::LanguageExtensions\r\n```\r\n\n", "before_files": [{"content": "from cfnlint.rules import RuleMatch\n\n\nclass LanguageExtensions:\n \"\"\"Class for a CloudFormation languageExtensions\"\"\"\n\n def validate_transform_is_declared(\n self, has_language_extensions_transform, matches, tree, intrinsic_function\n ):\n if not has_language_extensions_transform:\n message = (\n \"Missing Transform: Declare the AWS::LanguageExtensions Transform globally to enable use\"\n \" of the intrinsic function \" + intrinsic_function + \" at {0}\"\n )\n matches.append(RuleMatch(tree[:], message.format(\"/\".join(map(str, tree)))))\n return matches\n\n def validate_type(self, fn_object_val, matches, tree, intrinsic_function):\n if not isinstance(fn_object_val, dict) and not isinstance(fn_object_val, list):\n message = intrinsic_function + \" needs a map or a list at {0}\"\n matches.append(RuleMatch(tree[:], message.format(\"/\".join(map(str, tree)))))\n elif len(fn_object_val) == 0:\n message = \"Invalid value for \" + intrinsic_function + \" for {0}\"\n matches.append(RuleMatch(tree[:], message.format(\"/\".join(map(str, tree)))))\n return matches\n\n def validate_pseudo_parameters(\n self, fn_object_val, matches, tree, pseudo_params, intrinsic_function\n ):\n if isinstance(fn_object_val, dict):\n ref = \"Ref\"\n ref_list = [val[ref] for key, val in fn_object_val.items() if ref in val]\n for ref in ref_list:\n if ref in pseudo_params:\n message = (\n intrinsic_function\n + \" does not support the pseudo parameter \"\n + ref\n + \" for {0}\"\n )\n matches.append(\n RuleMatch(tree[:], message.format(\"/\".join(map(str, tree))))\n )\n return matches\n", "path": "src/cfnlint/languageExtensions.py"}]}
2,039
155
gh_patches_debug_3322
rasdani/github-patches
git_diff
holoviz__panel-3100
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Django autoload_handle broken #### ALL software version info Panel = 0.13.0a25 Bokeh = 2.4.2 Django = 2.2.14 When loading a Panel app embedded in Django, the `AutoloadJsConsumer` call just hangs. After stepping through the code it appears there is an error, which causes it to enter an eternal loop: ```python > /Users/rditlsc9/miniconda/envs/tethys-vtime/lib/python3.7/site-packages/panel/io/django.py(37)autoload_handle() -> js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url) TypeError: autoload_js_script() missing 1 required positional argument: 'absolute_url' ``` It appears that #2919 changed the signature of `autoload_js_script`, but the call to it in `panel/io/django.py:autoload_handle` wasn't updated accordingly. As a side note - is there a better way to get this type of error to log? I wasn't able to see any indication of an error until I stepped through the code in a debugger. </issue> <code> [start of panel/io/django.py] 1 from urllib.parse import urlparse 2 3 from bokeh.server.django.consumers import DocConsumer, AutoloadJsConsumer 4 5 from .resources import Resources 6 from .server import ( 7 autoload_js_script, server_html_page_for_session 8 ) 9 10 async def doc_handle(self, body): 11 session = await self._get_session() 12 resources = Resources.from_bokeh(self.application.resources()) 13 page = server_html_page_for_session( 14 session, resources=resources, title=session.document.title, 15 template=session.document.template, 16 template_variables=session.document.template_variables 17 ) 18 await self.send_response(200, page.encode(), headers=[(b"Content-Type", b"text/html")]) 19 20 21 async def autoload_handle(self, body): 22 session = await self._get_session() 23 24 element_id = self.get_argument("bokeh-autoload-element", default=None) 25 if not element_id: 26 raise RuntimeError("No bokeh-autoload-element query parameter") 27 28 app_path = self.get_argument("bokeh-app-path", default="/") 29 absolute_url = self.get_argument("bokeh-absolute-url", default=None) 30 31 if absolute_url: 32 server_url = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(absolute_url)) 33 else: 34 server_url = None 35 36 resources = self.resources(server_url) 37 js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url) 38 39 headers = [ 40 (b"Access-Control-Allow-Headers", b"*"), 41 (b"Access-Control-Allow-Methods", b"PUT, GET, OPTIONS"), 42 (b"Access-Control-Allow-Origin", b"*"), 43 (b"Content-Type", b"application/javascript") 44 ] 45 await self.send_response(200, js.encode(), headers=headers) 46 47 48 DocConsumer.handle = doc_handle 49 AutoloadJsConsumer.handle = autoload_handle 50 [end of panel/io/django.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/panel/io/django.py b/panel/io/django.py --- a/panel/io/django.py +++ b/panel/io/django.py @@ -34,7 +34,7 @@ server_url = None resources = self.resources(server_url) - js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url) + js = autoload_js_script(session.document, resources, session.token, element_id, app_path, absolute_url) headers = [ (b"Access-Control-Allow-Headers", b"*"),
{"golden_diff": "diff --git a/panel/io/django.py b/panel/io/django.py\n--- a/panel/io/django.py\n+++ b/panel/io/django.py\n@@ -34,7 +34,7 @@\n server_url = None\n \n resources = self.resources(server_url)\n- js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url)\n+ js = autoload_js_script(session.document, resources, session.token, element_id, app_path, absolute_url)\n \n headers = [\n (b\"Access-Control-Allow-Headers\", b\"*\"),\n", "issue": "Django autoload_handle broken\n#### ALL software version info\r\nPanel = 0.13.0a25\r\nBokeh = 2.4.2\r\nDjango = 2.2.14\r\n\r\nWhen loading a Panel app embedded in Django, the `AutoloadJsConsumer` call just hangs. After stepping through the code it \r\nappears there is an error, which causes it to enter an eternal loop:\r\n\r\n```python\r\n> /Users/rditlsc9/miniconda/envs/tethys-vtime/lib/python3.7/site-packages/panel/io/django.py(37)autoload_handle()\r\n-> js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url)\r\n\r\nTypeError: autoload_js_script() missing 1 required positional argument: 'absolute_url'\r\n```\r\n\r\nIt appears that #2919 changed the signature of `autoload_js_script`, but the call to it in `panel/io/django.py:autoload_handle` wasn't updated accordingly.\r\n\r\n\r\nAs a side note - is there a better way to get this type of error to log? I wasn't able to see any indication of an error until I stepped through the code in a debugger.\r\n\n", "before_files": [{"content": "from urllib.parse import urlparse\n\nfrom bokeh.server.django.consumers import DocConsumer, AutoloadJsConsumer\n\nfrom .resources import Resources\nfrom .server import (\n autoload_js_script, server_html_page_for_session\n)\n\nasync def doc_handle(self, body):\n session = await self._get_session()\n resources = Resources.from_bokeh(self.application.resources())\n page = server_html_page_for_session(\n session, resources=resources, title=session.document.title,\n template=session.document.template,\n template_variables=session.document.template_variables\n )\n await self.send_response(200, page.encode(), headers=[(b\"Content-Type\", b\"text/html\")])\n\n\nasync def autoload_handle(self, body):\n session = await self._get_session()\n\n element_id = self.get_argument(\"bokeh-autoload-element\", default=None)\n if not element_id:\n raise RuntimeError(\"No bokeh-autoload-element query parameter\")\n\n app_path = self.get_argument(\"bokeh-app-path\", default=\"/\")\n absolute_url = self.get_argument(\"bokeh-absolute-url\", default=None)\n\n if absolute_url:\n server_url = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(absolute_url))\n else:\n server_url = None\n\n resources = self.resources(server_url)\n js = autoload_js_script(resources, session.token, element_id, app_path, absolute_url)\n\n headers = [\n (b\"Access-Control-Allow-Headers\", b\"*\"),\n (b\"Access-Control-Allow-Methods\", b\"PUT, GET, OPTIONS\"),\n (b\"Access-Control-Allow-Origin\", b\"*\"),\n (b\"Content-Type\", b\"application/javascript\")\n ]\n await self.send_response(200, js.encode(), headers=headers)\n\n\nDocConsumer.handle = doc_handle\nAutoloadJsConsumer.handle = autoload_handle\n", "path": "panel/io/django.py"}]}
1,271
125
gh_patches_debug_875
rasdani/github-patches
git_diff
dbt-labs__dbt-core-5507
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CT-876] Could we also now remove our upper bound on `MarkupSafe`, which we put in place earlier this year due to incompatibility with Jinja2? Remove our upper bound on `MarkupSafe`, which we put in place earlier this year due to incompatibility with Jinja2(#4745). Also bump minimum requirement to match [Jinja2's requirements](https://github.com/pallets/jinja/blob/1c4066a4fad5aaeb2ac55809d1d38477cd23a0f6/setup.py#L6). </issue> <code> [start of core/setup.py] 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 if sys.version_info < (3, 7, 2): 6 print("Error: dbt does not support this version of Python.") 7 print("Please upgrade to Python 3.7.2 or higher.") 8 sys.exit(1) 9 10 11 from setuptools import setup 12 13 try: 14 from setuptools import find_namespace_packages 15 except ImportError: 16 # the user has a downlevel version of setuptools. 17 print("Error: dbt requires setuptools v40.1.0 or higher.") 18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again") 19 sys.exit(1) 20 21 22 this_directory = os.path.abspath(os.path.dirname(__file__)) 23 with open(os.path.join(this_directory, "README.md")) as f: 24 long_description = f.read() 25 26 27 package_name = "dbt-core" 28 package_version = "1.3.0a1" 29 description = """With dbt, data analysts and engineers can build analytics \ 30 the way engineers build applications.""" 31 32 33 setup( 34 name=package_name, 35 version=package_version, 36 description=description, 37 long_description=long_description, 38 long_description_content_type="text/markdown", 39 author="dbt Labs", 40 author_email="[email protected]", 41 url="https://github.com/dbt-labs/dbt-core", 42 packages=find_namespace_packages(include=["dbt", "dbt.*"]), 43 include_package_data=True, 44 test_suite="test", 45 entry_points={ 46 "console_scripts": [ 47 "dbt = dbt.main:main", 48 ], 49 }, 50 install_requires=[ 51 "Jinja2==3.1.2", 52 "MarkupSafe>=0.23,<2.1", 53 "agate>=1.6,<1.6.4", 54 "click>=7.0,<9", 55 "colorama>=0.3.9,<0.4.6", 56 "hologram>=0.0.14,<=0.0.15", 57 "isodate>=0.6,<0.7", 58 "logbook>=1.5,<1.6", 59 "mashumaro[msgpack]==3.0.3", 60 "minimal-snowplow-tracker==0.0.2", 61 "networkx>=2.3,<2.8.1;python_version<'3.8'", 62 "networkx>=2.3,<3;python_version>='3.8'", 63 "packaging>=20.9,<22.0", 64 "sqlparse>=0.2.3,<0.5", 65 "dbt-extractor~=0.4.1", 66 "typing-extensions>=3.7.4", 67 "werkzeug>=1,<3", 68 # the following are all to match snowflake-connector-python 69 "requests<3.0.0", 70 "idna>=2.5,<4", 71 "cffi>=1.9,<2.0.0", 72 "pyyaml>=6.0", 73 ], 74 zip_safe=False, 75 classifiers=[ 76 "Development Status :: 5 - Production/Stable", 77 "License :: OSI Approved :: Apache Software License", 78 "Operating System :: Microsoft :: Windows", 79 "Operating System :: MacOS :: MacOS X", 80 "Operating System :: POSIX :: Linux", 81 "Programming Language :: Python :: 3.7", 82 "Programming Language :: Python :: 3.8", 83 "Programming Language :: Python :: 3.9", 84 "Programming Language :: Python :: 3.10", 85 ], 86 python_requires=">=3.7.2", 87 ) 88 [end of core/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/setup.py b/core/setup.py --- a/core/setup.py +++ b/core/setup.py @@ -49,7 +49,6 @@ }, install_requires=[ "Jinja2==3.1.2", - "MarkupSafe>=0.23,<2.1", "agate>=1.6,<1.6.4", "click>=7.0,<9", "colorama>=0.3.9,<0.4.6",
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -49,7 +49,6 @@\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n- \"MarkupSafe>=0.23,<2.1\",\n \"agate>=1.6,<1.6.4\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.6\",\n", "issue": "[CT-876] Could we also now remove our upper bound on `MarkupSafe`, which we put in place earlier this year due to incompatibility with Jinja2?\nRemove our upper bound on `MarkupSafe`, which we put in place earlier this year due to incompatibility with Jinja2(#4745). Also bump minimum requirement to match [Jinja2's requirements](https://github.com/pallets/jinja/blob/1c4066a4fad5aaeb2ac55809d1d38477cd23a0f6/setup.py#L6).\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.3.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\n \"dbt = dbt.main:main\",\n ],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"MarkupSafe>=0.23,<2.1\",\n \"agate>=1.6,<1.6.4\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.0.3\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>=20.9,<22.0\",\n \"sqlparse>=0.2.3,<0.5\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]}
1,618
111
gh_patches_debug_29931
rasdani/github-patches
git_diff
deepset-ai__haystack-5811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change `SentenceTransformersTextEmbedder` to non-batch mode </issue> <code> [start of haystack/preview/components/embedders/sentence_transformers_text_embedder.py] 1 from typing import List, Optional, Union, Dict, Any 2 3 from haystack.preview import component, default_to_dict, default_from_dict 4 from haystack.preview.embedding_backends.sentence_transformers_backend import ( 5 _SentenceTransformersEmbeddingBackendFactory, 6 ) 7 8 9 @component 10 class SentenceTransformersTextEmbedder: 11 """ 12 A component for embedding strings using Sentence Transformers models. 13 """ 14 15 def __init__( 16 self, 17 model_name_or_path: str = "sentence-transformers/all-mpnet-base-v2", 18 device: Optional[str] = None, 19 use_auth_token: Union[bool, str, None] = None, 20 prefix: str = "", 21 suffix: str = "", 22 batch_size: int = 32, 23 progress_bar: bool = True, 24 normalize_embeddings: bool = False, 25 ): 26 """ 27 Create a SentenceTransformersTextEmbedder component. 28 29 :param model_name_or_path: Local path or name of the model in Hugging Face's model hub, such as ``'sentence-transformers/all-mpnet-base-v2'``. 30 :param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used. 31 :param use_auth_token: The API token used to download private models from Hugging Face. 32 If this parameter is set to `True`, then the token generated when running 33 `transformers-cli login` (stored in ~/.huggingface) will be used. 34 :param prefix: A string to add to the beginning of each text. 35 :param suffix: A string to add to the end of each text. 36 :param batch_size: Number of strings to encode at once. 37 :param progress_bar: If true, displays progress bar during embedding. 38 :param normalize_embeddings: If set to true, returned vectors will have length 1. 39 """ 40 41 self.model_name_or_path = model_name_or_path 42 # TODO: remove device parameter and use Haystack's device management once migrated 43 self.device = device or "cpu" 44 self.use_auth_token = use_auth_token 45 self.prefix = prefix 46 self.suffix = suffix 47 self.batch_size = batch_size 48 self.progress_bar = progress_bar 49 self.normalize_embeddings = normalize_embeddings 50 51 def to_dict(self) -> Dict[str, Any]: 52 """ 53 Serialize this component to a dictionary. 54 """ 55 return default_to_dict( 56 self, 57 model_name_or_path=self.model_name_or_path, 58 device=self.device, 59 use_auth_token=self.use_auth_token, 60 prefix=self.prefix, 61 suffix=self.suffix, 62 batch_size=self.batch_size, 63 progress_bar=self.progress_bar, 64 normalize_embeddings=self.normalize_embeddings, 65 ) 66 67 @classmethod 68 def from_dict(cls, data: Dict[str, Any]) -> "SentenceTransformersTextEmbedder": 69 """ 70 Deserialize this component from a dictionary. 71 """ 72 return default_from_dict(cls, data) 73 74 def warm_up(self): 75 """ 76 Load the embedding backend. 77 """ 78 if not hasattr(self, "embedding_backend"): 79 self.embedding_backend = _SentenceTransformersEmbeddingBackendFactory.get_embedding_backend( 80 model_name_or_path=self.model_name_or_path, device=self.device, use_auth_token=self.use_auth_token 81 ) 82 83 @component.output_types(embeddings=List[List[float]]) 84 def run(self, texts: List[str]): 85 """Embed a list of strings.""" 86 if not isinstance(texts, list) or not isinstance(texts[0], str): 87 raise TypeError( 88 "SentenceTransformersTextEmbedder expects a list of strings as input." 89 "In case you want to embed a list of Documents, please use the SentenceTransformersDocumentEmbedder." 90 ) 91 if not hasattr(self, "embedding_backend"): 92 raise RuntimeError("The embedding model has not been loaded. Please call warm_up() before running.") 93 94 texts_to_embed = [self.prefix + text + self.suffix for text in texts] 95 embeddings = self.embedding_backend.embed( 96 texts_to_embed, 97 batch_size=self.batch_size, 98 show_progress_bar=self.progress_bar, 99 normalize_embeddings=self.normalize_embeddings, 100 ) 101 return {"embeddings": embeddings} 102 [end of haystack/preview/components/embedders/sentence_transformers_text_embedder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/preview/components/embedders/sentence_transformers_text_embedder.py b/haystack/preview/components/embedders/sentence_transformers_text_embedder.py --- a/haystack/preview/components/embedders/sentence_transformers_text_embedder.py +++ b/haystack/preview/components/embedders/sentence_transformers_text_embedder.py @@ -80,22 +80,22 @@ model_name_or_path=self.model_name_or_path, device=self.device, use_auth_token=self.use_auth_token ) - @component.output_types(embeddings=List[List[float]]) - def run(self, texts: List[str]): - """Embed a list of strings.""" - if not isinstance(texts, list) or not isinstance(texts[0], str): + @component.output_types(embedding=List[float]) + def run(self, text: str): + """Embed a string.""" + if not isinstance(text, str): raise TypeError( - "SentenceTransformersTextEmbedder expects a list of strings as input." + "SentenceTransformersTextEmbedder expects a string as input." "In case you want to embed a list of Documents, please use the SentenceTransformersDocumentEmbedder." ) if not hasattr(self, "embedding_backend"): raise RuntimeError("The embedding model has not been loaded. Please call warm_up() before running.") - texts_to_embed = [self.prefix + text + self.suffix for text in texts] - embeddings = self.embedding_backend.embed( - texts_to_embed, + text_to_embed = self.prefix + text + self.suffix + embedding = self.embedding_backend.embed( + [text_to_embed], batch_size=self.batch_size, show_progress_bar=self.progress_bar, normalize_embeddings=self.normalize_embeddings, - ) - return {"embeddings": embeddings} + )[0] + return {"embedding": embedding}
{"golden_diff": "diff --git a/haystack/preview/components/embedders/sentence_transformers_text_embedder.py b/haystack/preview/components/embedders/sentence_transformers_text_embedder.py\n--- a/haystack/preview/components/embedders/sentence_transformers_text_embedder.py\n+++ b/haystack/preview/components/embedders/sentence_transformers_text_embedder.py\n@@ -80,22 +80,22 @@\n model_name_or_path=self.model_name_or_path, device=self.device, use_auth_token=self.use_auth_token\n )\n \n- @component.output_types(embeddings=List[List[float]])\n- def run(self, texts: List[str]):\n- \"\"\"Embed a list of strings.\"\"\"\n- if not isinstance(texts, list) or not isinstance(texts[0], str):\n+ @component.output_types(embedding=List[float])\n+ def run(self, text: str):\n+ \"\"\"Embed a string.\"\"\"\n+ if not isinstance(text, str):\n raise TypeError(\n- \"SentenceTransformersTextEmbedder expects a list of strings as input.\"\n+ \"SentenceTransformersTextEmbedder expects a string as input.\"\n \"In case you want to embed a list of Documents, please use the SentenceTransformersDocumentEmbedder.\"\n )\n if not hasattr(self, \"embedding_backend\"):\n raise RuntimeError(\"The embedding model has not been loaded. Please call warm_up() before running.\")\n \n- texts_to_embed = [self.prefix + text + self.suffix for text in texts]\n- embeddings = self.embedding_backend.embed(\n- texts_to_embed,\n+ text_to_embed = self.prefix + text + self.suffix\n+ embedding = self.embedding_backend.embed(\n+ [text_to_embed],\n batch_size=self.batch_size,\n show_progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n- )\n- return {\"embeddings\": embeddings}\n+ )[0]\n+ return {\"embedding\": embedding}\n", "issue": "Change `SentenceTransformersTextEmbedder` to non-batch mode\n\n", "before_files": [{"content": "from typing import List, Optional, Union, Dict, Any\n\nfrom haystack.preview import component, default_to_dict, default_from_dict\nfrom haystack.preview.embedding_backends.sentence_transformers_backend import (\n _SentenceTransformersEmbeddingBackendFactory,\n)\n\n\n@component\nclass SentenceTransformersTextEmbedder:\n \"\"\"\n A component for embedding strings using Sentence Transformers models.\n \"\"\"\n\n def __init__(\n self,\n model_name_or_path: str = \"sentence-transformers/all-mpnet-base-v2\",\n device: Optional[str] = None,\n use_auth_token: Union[bool, str, None] = None,\n prefix: str = \"\",\n suffix: str = \"\",\n batch_size: int = 32,\n progress_bar: bool = True,\n normalize_embeddings: bool = False,\n ):\n \"\"\"\n Create a SentenceTransformersTextEmbedder component.\n\n :param model_name_or_path: Local path or name of the model in Hugging Face's model hub, such as ``'sentence-transformers/all-mpnet-base-v2'``.\n :param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.\n :param use_auth_token: The API token used to download private models from Hugging Face.\n If this parameter is set to `True`, then the token generated when running\n `transformers-cli login` (stored in ~/.huggingface) will be used.\n :param prefix: A string to add to the beginning of each text.\n :param suffix: A string to add to the end of each text.\n :param batch_size: Number of strings to encode at once.\n :param progress_bar: If true, displays progress bar during embedding.\n :param normalize_embeddings: If set to true, returned vectors will have length 1.\n \"\"\"\n\n self.model_name_or_path = model_name_or_path\n # TODO: remove device parameter and use Haystack's device management once migrated\n self.device = device or \"cpu\"\n self.use_auth_token = use_auth_token\n self.prefix = prefix\n self.suffix = suffix\n self.batch_size = batch_size\n self.progress_bar = progress_bar\n self.normalize_embeddings = normalize_embeddings\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n return default_to_dict(\n self,\n model_name_or_path=self.model_name_or_path,\n device=self.device,\n use_auth_token=self.use_auth_token,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"SentenceTransformersTextEmbedder\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n return default_from_dict(cls, data)\n\n def warm_up(self):\n \"\"\"\n Load the embedding backend.\n \"\"\"\n if not hasattr(self, \"embedding_backend\"):\n self.embedding_backend = _SentenceTransformersEmbeddingBackendFactory.get_embedding_backend(\n model_name_or_path=self.model_name_or_path, device=self.device, use_auth_token=self.use_auth_token\n )\n\n @component.output_types(embeddings=List[List[float]])\n def run(self, texts: List[str]):\n \"\"\"Embed a list of strings.\"\"\"\n if not isinstance(texts, list) or not isinstance(texts[0], str):\n raise TypeError(\n \"SentenceTransformersTextEmbedder expects a list of strings as input.\"\n \"In case you want to embed a list of Documents, please use the SentenceTransformersDocumentEmbedder.\"\n )\n if not hasattr(self, \"embedding_backend\"):\n raise RuntimeError(\"The embedding model has not been loaded. Please call warm_up() before running.\")\n\n texts_to_embed = [self.prefix + text + self.suffix for text in texts]\n embeddings = self.embedding_backend.embed(\n texts_to_embed,\n batch_size=self.batch_size,\n show_progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n )\n return {\"embeddings\": embeddings}\n", "path": "haystack/preview/components/embedders/sentence_transformers_text_embedder.py"}]}
1,652
413
gh_patches_debug_6558
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-615
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> strawberry.utils.typing.get_optional_annotation fails when provided an `Optional[Union]` `strawberry.utils.typing.get_optional_annotation` fails when provided an `Optional[Union]` ```pycon >>> from typing import Optional, Union >>> from strawberry.utils.typing import get_optional_annotation >>> get_optional_annotation(Optional[Union[int, str]]) <class 'int'> ``` This should return `Union[int, str]` instead </issue> <code> [start of strawberry/utils/typing.py] 1 import typing 2 from collections.abc import AsyncGenerator, Callable 3 from typing import Type, TypeVar 4 5 6 try: 7 from typing import ForwardRef # type: ignore 8 except ImportError: # pragma: no cover 9 # ForwardRef is private in python 3.6 and 3.7 10 from typing import _ForwardRef as ForwardRef # type: ignore 11 12 13 def is_list(annotation: Type) -> bool: 14 """Returns True if annotation is a List""" 15 16 annotation_origin = getattr(annotation, "__origin__", None) 17 18 return annotation_origin == list 19 20 21 def is_union(annotation: Type) -> bool: 22 """Returns True if annotation is a Union""" 23 24 annotation_origin = getattr(annotation, "__origin__", None) 25 26 return annotation_origin == typing.Union 27 28 29 def is_optional(annotation: Type) -> bool: 30 """Returns True if the annotation is Optional[SomeType]""" 31 32 # Optionals are represented as unions 33 34 if not is_union(annotation): 35 return False 36 37 types = annotation.__args__ 38 39 # A Union to be optional needs to have at least one None type 40 return any([x == None.__class__ for x in types]) # noqa:E711 41 42 43 def get_optional_annotation(annotation: Type) -> Type: 44 types = annotation.__args__ 45 non_none_types = [x for x in types if x != None.__class__] # noqa:E711 46 47 return non_none_types[0] 48 49 50 def get_list_annotation(annotation: Type) -> Type: 51 return annotation.__args__[0] 52 53 54 def is_async_generator(annotation: Type) -> bool: 55 return getattr(annotation, "__origin__", None) == AsyncGenerator 56 57 58 def get_async_generator_annotation(annotation: Type) -> Type: 59 return annotation.__args__[0] 60 61 62 def is_generic(annotation: Type) -> bool: 63 """Returns True if the annotation is or extends a generic.""" 64 return ( 65 isinstance(annotation, type) 66 and issubclass(annotation, typing.Generic) # type:ignore 67 or isinstance(annotation, typing._GenericAlias) # type:ignore 68 and annotation.__origin__ 69 not in ( 70 list, 71 typing.Union, 72 tuple, 73 typing.ClassVar, 74 AsyncGenerator, 75 ) 76 ) 77 78 79 def is_type_var(annotation: Type) -> bool: 80 """Returns True if the annotation is a TypeVar.""" 81 82 return isinstance(annotation, TypeVar) # type:ignore 83 84 85 def has_type_var(annotation: Type) -> bool: 86 """ 87 Returns True if the annotation or any of 88 its argument have a TypeVar as argument. 89 """ 90 return any( 91 is_type_var(arg) or has_type_var(arg) 92 for arg in getattr(annotation, "__args__", []) 93 ) 94 95 96 def get_parameters(annotation: Type): 97 if ( 98 isinstance(annotation, typing._GenericAlias) # type:ignore 99 or isinstance(annotation, type) 100 and issubclass(annotation, typing.Generic) # type:ignore 101 and annotation is not typing.Generic 102 ): 103 return annotation.__parameters__ 104 else: 105 return () # pragma: no cover 106 107 108 def get_origin(annotation: Type): 109 if isinstance(annotation, typing._GenericAlias): # type:ignore 110 return ( 111 annotation.__origin__ 112 if annotation.__origin__ is not typing.ClassVar 113 else None 114 ) 115 116 if annotation is typing.Generic: # pragma: no cover 117 return typing.Generic 118 119 return None # pragma: no cover 120 121 122 def get_args(annotation: Type): 123 if isinstance(annotation, typing._GenericAlias): # type:ignore 124 res = annotation.__args__ 125 126 if ( 127 get_origin(annotation) is Callable and res[0] is not Ellipsis 128 ): # pragma: no cover 129 res = (list(res[:-1]), res[-1]) 130 131 return res 132 133 return () 134 135 136 def is_forward_ref(annotation: Type) -> bool: 137 return isinstance(annotation, ForwardRef) 138 [end of strawberry/utils/typing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/utils/typing.py b/strawberry/utils/typing.py --- a/strawberry/utils/typing.py +++ b/strawberry/utils/typing.py @@ -42,7 +42,14 @@ def get_optional_annotation(annotation: Type) -> Type: types = annotation.__args__ - non_none_types = [x for x in types if x != None.__class__] # noqa:E711 + + non_none_types = tuple(x for x in types if x != None.__class__) # noqa:E711 + + # if we have multiple non none types we want to return a copy of this + # type (normally a Union type). + + if len(non_none_types) > 1: + return annotation.copy_with(non_none_types) return non_none_types[0]
{"golden_diff": "diff --git a/strawberry/utils/typing.py b/strawberry/utils/typing.py\n--- a/strawberry/utils/typing.py\n+++ b/strawberry/utils/typing.py\n@@ -42,7 +42,14 @@\n \n def get_optional_annotation(annotation: Type) -> Type:\n types = annotation.__args__\n- non_none_types = [x for x in types if x != None.__class__] # noqa:E711\n+\n+ non_none_types = tuple(x for x in types if x != None.__class__) # noqa:E711\n+\n+ # if we have multiple non none types we want to return a copy of this\n+ # type (normally a Union type).\n+\n+ if len(non_none_types) > 1:\n+ return annotation.copy_with(non_none_types)\n \n return non_none_types[0]\n", "issue": "strawberry.utils.typing.get_optional_annotation fails when provided an `Optional[Union]`\n`strawberry.utils.typing.get_optional_annotation` fails when provided an `Optional[Union]`\r\n\r\n```pycon\r\n>>> from typing import Optional, Union\r\n>>> from strawberry.utils.typing import get_optional_annotation\r\n\r\n>>> get_optional_annotation(Optional[Union[int, str]])\r\n<class 'int'>\r\n```\r\nThis should return `Union[int, str]` instead \n", "before_files": [{"content": "import typing\nfrom collections.abc import AsyncGenerator, Callable\nfrom typing import Type, TypeVar\n\n\ntry:\n from typing import ForwardRef # type: ignore\nexcept ImportError: # pragma: no cover\n # ForwardRef is private in python 3.6 and 3.7\n from typing import _ForwardRef as ForwardRef # type: ignore\n\n\ndef is_list(annotation: Type) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n\ndef is_union(annotation: Type) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == typing.Union\n\n\ndef is_optional(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n\n if not is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any([x == None.__class__ for x in types]) # noqa:E711\n\n\ndef get_optional_annotation(annotation: Type) -> Type:\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E711\n\n return non_none_types[0]\n\n\ndef get_list_annotation(annotation: Type) -> Type:\n return annotation.__args__[0]\n\n\ndef is_async_generator(annotation: Type) -> bool:\n return getattr(annotation, \"__origin__\", None) == AsyncGenerator\n\n\ndef get_async_generator_annotation(annotation: Type) -> Type:\n return annotation.__args__[0]\n\n\ndef is_generic(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is or extends a generic.\"\"\"\n return (\n isinstance(annotation, type)\n and issubclass(annotation, typing.Generic) # type:ignore\n or isinstance(annotation, typing._GenericAlias) # type:ignore\n and annotation.__origin__\n not in (\n list,\n typing.Union,\n tuple,\n typing.ClassVar,\n AsyncGenerator,\n )\n )\n\n\ndef is_type_var(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is a TypeVar.\"\"\"\n\n return isinstance(annotation, TypeVar) # type:ignore\n\n\ndef has_type_var(annotation: Type) -> bool:\n \"\"\"\n Returns True if the annotation or any of\n its argument have a TypeVar as argument.\n \"\"\"\n return any(\n is_type_var(arg) or has_type_var(arg)\n for arg in getattr(annotation, \"__args__\", [])\n )\n\n\ndef get_parameters(annotation: Type):\n if (\n isinstance(annotation, typing._GenericAlias) # type:ignore\n or isinstance(annotation, type)\n and issubclass(annotation, typing.Generic) # type:ignore\n and annotation is not typing.Generic\n ):\n return annotation.__parameters__\n else:\n return () # pragma: no cover\n\n\ndef get_origin(annotation: Type):\n if isinstance(annotation, typing._GenericAlias): # type:ignore\n return (\n annotation.__origin__\n if annotation.__origin__ is not typing.ClassVar\n else None\n )\n\n if annotation is typing.Generic: # pragma: no cover\n return typing.Generic\n\n return None # pragma: no cover\n\n\ndef get_args(annotation: Type):\n if isinstance(annotation, typing._GenericAlias): # type:ignore\n res = annotation.__args__\n\n if (\n get_origin(annotation) is Callable and res[0] is not Ellipsis\n ): # pragma: no cover\n res = (list(res[:-1]), res[-1])\n\n return res\n\n return ()\n\n\ndef is_forward_ref(annotation: Type) -> bool:\n return isinstance(annotation, ForwardRef)\n", "path": "strawberry/utils/typing.py"}]}
1,801
195
gh_patches_debug_32082
rasdani/github-patches
git_diff
aws__aws-cli-900
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Provide better error message for invalid endpoint urls The error message could provide more context about what exactly went wrong with the request. For example: ``` $ aws s3api list-buckets --endpoint-url example.com Invalid URL u'/': No schema supplied ``` A better error message would be something like: ``` $ aws s3api list-buckets --endpoint-url example.com Bad value for --endpoint-url "example.com": scheme is missing. Must be of the form http://<hostname>/ or https://<hostname>/ ``` </issue> <code> [start of awscli/compat.py] 1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 7 # http://aws.amazon.com/apache2.0/ 8 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 import sys 14 import six 15 16 if six.PY3: 17 import locale 18 19 def get_stdout_text_writer(): 20 return sys.stdout 21 22 def compat_open(filename, mode='r', encoding=None): 23 """Back-port open() that accepts an encoding argument. 24 25 In python3 this uses the built in open() and in python2 this 26 uses the io.open() function. 27 28 If the file is not being opened in binary mode, then we'll 29 use locale.getpreferredencoding() to find the preferred 30 encoding. 31 32 """ 33 if 'b' not in mode: 34 encoding = locale.getpreferredencoding() 35 return open(filename, mode, encoding=encoding) 36 37 else: 38 import codecs 39 import locale 40 import io 41 42 def get_stdout_text_writer(): 43 # In python3, all the sys.stdout/sys.stderr streams are in text 44 # mode. This means they expect unicode, and will encode the 45 # unicode automatically before actually writing to stdout/stderr. 46 # In python2, that's not the case. In order to provide a consistent 47 # interface, we can create a wrapper around sys.stdout that will take 48 # unicode, and automatically encode it to the preferred encoding. 49 # That way consumers can just call get_stdout_text_writer() and write 50 # unicode to the returned stream. Note that get_stdout_text_writer 51 # just returns sys.stdout in the PY3 section above because python3 52 # handles this. 53 return codecs.getwriter(locale.getpreferredencoding())(sys.stdout) 54 55 def compat_open(filename, mode='r', encoding=None): 56 # See docstring for compat_open in the PY3 section above. 57 if 'b' not in mode: 58 encoding = locale.getpreferredencoding() 59 return io.open(filename, mode, encoding=encoding) 60 [end of awscli/compat.py] [start of awscli/customizations/globalargs.py] 1 # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 import sys 14 import os 15 16 import jmespath 17 18 19 def register_parse_global_args(cli): 20 cli.register('top-level-args-parsed', resolve_types) 21 cli.register('top-level-args-parsed', no_sign_request) 22 23 24 def resolve_types(parsed_args, **kwargs): 25 # This emulates the "type" arg from argparse, but does so in a way 26 # that plugins can also hook into this process. 27 _resolve_arg(parsed_args, 'query') 28 _resolve_arg(parsed_args, 'verify_ssl') 29 30 31 def _resolve_arg(parsed_args, name): 32 value = getattr(parsed_args, name, None) 33 if value is not None: 34 new_value = getattr(sys.modules[__name__], '_resolve_%s' % name)(value) 35 setattr(parsed_args, name, new_value) 36 37 38 def _resolve_query(value): 39 try: 40 return jmespath.compile(value) 41 except Exception as e: 42 raise ValueError("Bad value for --query %s: %s" % (value, str(e))) 43 44 45 def _resolve_verify_ssl(value): 46 verify = None 47 if not value: 48 verify = False 49 else: 50 verify = os.environ.get('AWS_CA_BUNDLE') 51 return verify 52 53 54 def no_sign_request(parsed_args, session, **kwargs): 55 if not parsed_args.sign_request: 56 # In order to make signing disabled for all requests 57 # we need to set the signature_version to None for 58 # any service created. This ensures that get_endpoint() 59 # will not look for auth. 60 session.register('service-created', disable_signing) 61 62 63 def disable_signing(service, **kwargs): 64 service.signature_version = None 65 [end of awscli/customizations/globalargs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awscli/compat.py b/awscli/compat.py --- a/awscli/compat.py +++ b/awscli/compat.py @@ -15,6 +15,7 @@ if six.PY3: import locale + import urllib.parse as urlparse def get_stdout_text_writer(): return sys.stdout @@ -38,6 +39,7 @@ import codecs import locale import io + import urlparse def get_stdout_text_writer(): # In python3, all the sys.stdout/sys.stderr streams are in text diff --git a/awscli/customizations/globalargs.py b/awscli/customizations/globalargs.py --- a/awscli/customizations/globalargs.py +++ b/awscli/customizations/globalargs.py @@ -15,6 +15,8 @@ import jmespath +from awscli.compat import urlparse + def register_parse_global_args(cli): cli.register('top-level-args-parsed', resolve_types) @@ -26,6 +28,7 @@ # that plugins can also hook into this process. _resolve_arg(parsed_args, 'query') _resolve_arg(parsed_args, 'verify_ssl') + _resolve_arg(parsed_args, 'endpoint_url') def _resolve_arg(parsed_args, name): @@ -51,6 +54,17 @@ return verify +def _resolve_endpoint_url(value): + parsed = urlparse.urlparse(value) + # Our http library requires you specify an endpoint url + # that contains a scheme, so we'll verify that up front. + if not parsed.scheme: + raise ValueError('Bad value for --endpoint-url "%s": scheme is ' + 'missing. Must be of the form ' + 'http://<hostname>/ or https://<hostname>/' % value) + return value + + def no_sign_request(parsed_args, session, **kwargs): if not parsed_args.sign_request: # In order to make signing disabled for all requests
{"golden_diff": "diff --git a/awscli/compat.py b/awscli/compat.py\n--- a/awscli/compat.py\n+++ b/awscli/compat.py\n@@ -15,6 +15,7 @@\n \n if six.PY3:\n import locale\n+ import urllib.parse as urlparse\n \n def get_stdout_text_writer():\n return sys.stdout\n@@ -38,6 +39,7 @@\n import codecs\n import locale\n import io\n+ import urlparse\n \n def get_stdout_text_writer():\n # In python3, all the sys.stdout/sys.stderr streams are in text\ndiff --git a/awscli/customizations/globalargs.py b/awscli/customizations/globalargs.py\n--- a/awscli/customizations/globalargs.py\n+++ b/awscli/customizations/globalargs.py\n@@ -15,6 +15,8 @@\n \n import jmespath\n \n+from awscli.compat import urlparse\n+\n \n def register_parse_global_args(cli):\n cli.register('top-level-args-parsed', resolve_types)\n@@ -26,6 +28,7 @@\n # that plugins can also hook into this process.\n _resolve_arg(parsed_args, 'query')\n _resolve_arg(parsed_args, 'verify_ssl')\n+ _resolve_arg(parsed_args, 'endpoint_url')\n \n \n def _resolve_arg(parsed_args, name):\n@@ -51,6 +54,17 @@\n return verify\n \n \n+def _resolve_endpoint_url(value):\n+ parsed = urlparse.urlparse(value)\n+ # Our http library requires you specify an endpoint url\n+ # that contains a scheme, so we'll verify that up front.\n+ if not parsed.scheme:\n+ raise ValueError('Bad value for --endpoint-url \"%s\": scheme is '\n+ 'missing. Must be of the form '\n+ 'http://<hostname>/ or https://<hostname>/' % value)\n+ return value\n+\n+\n def no_sign_request(parsed_args, session, **kwargs):\n if not parsed_args.sign_request:\n # In order to make signing disabled for all requests\n", "issue": "Provide better error message for invalid endpoint urls\nThe error message could provide more context about what exactly went wrong with the request. For example:\n\n```\n$ aws s3api list-buckets --endpoint-url example.com\n\nInvalid URL u'/': No schema supplied\n```\n\nA better error message would be something like:\n\n```\n$ aws s3api list-buckets --endpoint-url example.com\n\nBad value for --endpoint-url \"example.com\": scheme is missing. Must be of the form http://<hostname>/ or https://<hostname>/\n```\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport six\n\nif six.PY3:\n import locale\n\n def get_stdout_text_writer():\n return sys.stdout\n\n def compat_open(filename, mode='r', encoding=None):\n \"\"\"Back-port open() that accepts an encoding argument.\n\n In python3 this uses the built in open() and in python2 this\n uses the io.open() function.\n\n If the file is not being opened in binary mode, then we'll\n use locale.getpreferredencoding() to find the preferred\n encoding.\n\n \"\"\"\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return open(filename, mode, encoding=encoding)\n\nelse:\n import codecs\n import locale\n import io\n\n def get_stdout_text_writer():\n # In python3, all the sys.stdout/sys.stderr streams are in text\n # mode. This means they expect unicode, and will encode the\n # unicode automatically before actually writing to stdout/stderr.\n # In python2, that's not the case. In order to provide a consistent\n # interface, we can create a wrapper around sys.stdout that will take\n # unicode, and automatically encode it to the preferred encoding.\n # That way consumers can just call get_stdout_text_writer() and write\n # unicode to the returned stream. Note that get_stdout_text_writer\n # just returns sys.stdout in the PY3 section above because python3\n # handles this.\n return codecs.getwriter(locale.getpreferredencoding())(sys.stdout)\n\n def compat_open(filename, mode='r', encoding=None):\n # See docstring for compat_open in the PY3 section above.\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return io.open(filename, mode, encoding=encoding)\n", "path": "awscli/compat.py"}, {"content": "# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport os\n\nimport jmespath\n\n\ndef register_parse_global_args(cli):\n cli.register('top-level-args-parsed', resolve_types)\n cli.register('top-level-args-parsed', no_sign_request)\n\n\ndef resolve_types(parsed_args, **kwargs):\n # This emulates the \"type\" arg from argparse, but does so in a way\n # that plugins can also hook into this process.\n _resolve_arg(parsed_args, 'query')\n _resolve_arg(parsed_args, 'verify_ssl')\n\n\ndef _resolve_arg(parsed_args, name):\n value = getattr(parsed_args, name, None)\n if value is not None:\n new_value = getattr(sys.modules[__name__], '_resolve_%s' % name)(value)\n setattr(parsed_args, name, new_value)\n\n\ndef _resolve_query(value):\n try:\n return jmespath.compile(value)\n except Exception as e:\n raise ValueError(\"Bad value for --query %s: %s\" % (value, str(e)))\n\n\ndef _resolve_verify_ssl(value):\n verify = None\n if not value:\n verify = False\n else:\n verify = os.environ.get('AWS_CA_BUNDLE')\n return verify\n\n\ndef no_sign_request(parsed_args, session, **kwargs):\n if not parsed_args.sign_request:\n # In order to make signing disabled for all requests\n # we need to set the signature_version to None for\n # any service created. This ensures that get_endpoint()\n # will not look for auth.\n session.register('service-created', disable_signing)\n\n\ndef disable_signing(service, **kwargs):\n service.signature_version = None\n", "path": "awscli/customizations/globalargs.py"}]}
1,928
441
gh_patches_debug_7632
rasdani/github-patches
git_diff
aws__aws-cli-4308
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws emr create-cluster help command returns error how to reproduce 1. upgrade to awscli 1.16.190 or 1.16.194 or 1.16.196 at the moment it's enough to install via pip either on macOS(1.16.194) or on linux(1.16.196), or using Homebrew(1.16.190) on macOS ``` # on Ubuntu 16.04 linux $ pip install --upgrade awscli <... output skipped - but it was successful, no errors ...> $ aws --version aws-cli/1.16.196 Python/2.7.12 Linux/4.4.0-97-generic botocore/1.12.186 $ aws emr create-cluster help [Errno 2] No such file or directory: '/usr/local/lib/python2.7/dist-packages/awscli/examples/emr/create-cluster-synopsis.txt' #or on macOS just for example using the one installed via Homebrew $ brew install awscli <... output skipped - but it was successful, no errors ...> $ aws --version aws-cli/1.16.190 Python/3.7.4 Darwin/18.6.0 botocore/1.12.180 $ aws emr create-cluster help [Errno 2] No such file or directory: '/usr/local/Cellar/awscli/1.16.190/libexec/lib/python3.7/site-packages/awscli/examples/emr/create-cluster-synopsis.txt' #or on macOS using aws installed via pip3 $ aws emr create-cluster help [Errno 2] No such file or directory: '/usr/local/lib/python3.7/site-packages/awscli/examples/emr/create-cluster-synopsis.txt' ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import codecs 3 import os.path 4 import re 5 import sys 6 7 from setuptools import setup, find_packages 8 9 10 here = os.path.abspath(os.path.dirname(__file__)) 11 12 13 def read(*parts): 14 return codecs.open(os.path.join(here, *parts), 'r').read() 15 16 17 def find_version(*file_paths): 18 version_file = read(*file_paths) 19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 20 version_file, re.M) 21 if version_match: 22 return version_match.group(1) 23 raise RuntimeError("Unable to find version string.") 24 25 26 requires = ['botocore==1.12.187', 27 'colorama>=0.2.5,<=0.3.9', 28 'docutils>=0.10', 29 'rsa>=3.1.2,<=3.5.0', 30 's3transfer>=0.2.0,<0.3.0'] 31 32 33 if sys.version_info[:2] == (2, 6): 34 # For python2.6 we have to require argparse since it 35 # was not in stdlib until 2.7. 36 requires.append('argparse>=1.1') 37 38 # For Python 2.6, we have to require a different verion of PyYAML since the latest 39 # versions dropped support for Python 2.6. 40 requires.append('PyYAML>=3.10,<=3.13') 41 else: 42 requires.append('PyYAML>=3.10,<=5.1') 43 44 45 setup_options = dict( 46 name='awscli', 47 version=find_version("awscli", "__init__.py"), 48 description='Universal Command Line Environment for AWS.', 49 long_description=read('README.rst'), 50 author='Amazon Web Services', 51 url='http://aws.amazon.com/cli/', 52 scripts=['bin/aws', 'bin/aws.cmd', 53 'bin/aws_completer', 'bin/aws_zsh_completer.sh', 54 'bin/aws_bash_completer'], 55 packages=find_packages(exclude=['tests*']), 56 package_data={'awscli': ['data/*.json', 'examples/*/*.rst', 57 'examples/*/*/*.rst', 'topics/*.rst', 58 'topics/*.json']}, 59 install_requires=requires, 60 extras_require={ 61 ':python_version=="2.6"': [ 62 'argparse>=1.1', 63 ] 64 }, 65 license="Apache License 2.0", 66 classifiers=[ 67 'Development Status :: 5 - Production/Stable', 68 'Intended Audience :: Developers', 69 'Intended Audience :: System Administrators', 70 'Natural Language :: English', 71 'License :: OSI Approved :: Apache Software License', 72 'Programming Language :: Python', 73 'Programming Language :: Python :: 2', 74 'Programming Language :: Python :: 2.6', 75 'Programming Language :: Python :: 2.7', 76 'Programming Language :: Python :: 3', 77 'Programming Language :: Python :: 3.3', 78 'Programming Language :: Python :: 3.4', 79 'Programming Language :: Python :: 3.5', 80 'Programming Language :: Python :: 3.6', 81 'Programming Language :: Python :: 3.7', 82 ], 83 ) 84 85 if 'py2exe' in sys.argv: 86 # This will actually give us a py2exe command. 87 import py2exe 88 # And we have some py2exe specific options. 89 setup_options['options'] = { 90 'py2exe': { 91 'optimize': 0, 92 'skip_archive': True, 93 'dll_excludes': ['crypt32.dll'], 94 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser', 95 'awscli', 'ConfigParser', 'xml.etree', 'pipes'], 96 } 97 } 98 setup_options['console'] = ['bin/aws'] 99 100 101 setup(**setup_options) 102 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -54,6 +54,7 @@ 'bin/aws_bash_completer'], packages=find_packages(exclude=['tests*']), package_data={'awscli': ['data/*.json', 'examples/*/*.rst', + 'examples/*/*.txt', 'examples/*/*/*.txt', 'examples/*/*/*.rst', 'topics/*.rst', 'topics/*.json']}, install_requires=requires,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,6 +54,7 @@\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n+ 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n", "issue": "aws emr create-cluster help command returns error\nhow to reproduce \r\n\r\n1. upgrade to awscli 1.16.190 or 1.16.194 or 1.16.196\r\nat the moment it's enough to install via pip either on macOS(1.16.194) or on linux(1.16.196), or using Homebrew(1.16.190) on macOS\r\n```\r\n# on Ubuntu 16.04 linux \r\n$ pip install --upgrade awscli\r\n<... output skipped - but it was successful, no errors ...>\r\n\r\n$ aws --version\r\naws-cli/1.16.196 Python/2.7.12 Linux/4.4.0-97-generic botocore/1.12.186\r\n\r\n$ aws emr create-cluster help\r\n\r\n[Errno 2] No such file or directory: '/usr/local/lib/python2.7/dist-packages/awscli/examples/emr/create-cluster-synopsis.txt'\r\n\r\n\r\n\r\n#or on macOS just for example using the one installed via Homebrew\r\n$ brew install awscli\r\n<... output skipped - but it was successful, no errors ...>\r\n\r\n$ aws --version\r\naws-cli/1.16.190 Python/3.7.4 Darwin/18.6.0 botocore/1.12.180\r\n\r\n$ aws emr create-cluster help\r\n[Errno 2] No such file or directory: '/usr/local/Cellar/awscli/1.16.190/libexec/lib/python3.7/site-packages/awscli/examples/emr/create-cluster-synopsis.txt'\r\n\r\n#or on macOS using aws installed via pip3\r\n$ aws emr create-cluster help\r\n\r\n[Errno 2] No such file or directory: '/usr/local/lib/python3.7/site-packages/awscli/examples/emr/create-cluster-synopsis.txt'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.187',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n requires.append('PyYAML>=3.10,<=3.13')\nelse:\n requires.append('PyYAML>=3.10,<=5.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]}
1,997
111
gh_patches_debug_6935
rasdani/github-patches
git_diff
googleapis__google-auth-library-python-51
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create system tests for service account-based credentials </issue> <code> [start of setup.py] 1 # Copyright 2014 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import find_packages 16 from setuptools import setup 17 18 19 DEPENDENCIES = ( 20 'pyasn1>=0.1.7', 21 'pyasn1-modules>=0.0.5', 22 'rsa>=3.1.4', 23 'six>=1.9.0', 24 ) 25 26 27 with open('README.rst', 'r') as fh: 28 long_description = fh.read() 29 30 setup( 31 name='google-auth', 32 version='0.0.1', 33 author='Google Cloud Platform', 34 author_email='[email protected]', 35 description='Google Authentication Library', 36 long_description=long_description, 37 url='https://github.com/GoogleCloudPlatform/google-auth-library-python', 38 packages=find_packages(exclude='tests'), 39 namespace_packages=('google',), 40 install_requires=DEPENDENCIES, 41 license='Apache 2.0', 42 keywords='google auth oauth client', 43 classifiers=( 44 'Programming Language :: Python :: 2', 45 'Programming Language :: Python :: 2.7', 46 'Programming Language :: Python :: 3', 47 'Programming Language :: Python :: 3.4', 48 'Programming Language :: Python :: 3.5', 49 'Development Status :: 3 - Alpha', 50 'Intended Audience :: Developers', 51 'License :: OSI Approved :: Apache Software License', 52 'Operating System :: POSIX', 53 'Operating System :: Microsoft :: Windows', 54 'Operating System :: MacOS :: MacOS X', 55 'Operating System :: OS Independent', 56 'Topic :: Internet :: WWW/HTTP', 57 ), 58 ) 59 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ description='Google Authentication Library', long_description=long_description, url='https://github.com/GoogleCloudPlatform/google-auth-library-python', - packages=find_packages(exclude='tests'), + packages=find_packages(exclude=('tests', 'system_tests')), namespace_packages=('google',), install_requires=DEPENDENCIES, license='Apache 2.0',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,7 +35,7 @@\n description='Google Authentication Library',\n long_description=long_description,\n url='https://github.com/GoogleCloudPlatform/google-auth-library-python',\n- packages=find_packages(exclude='tests'),\n+ packages=find_packages(exclude=('tests', 'system_tests')),\n namespace_packages=('google',),\n install_requires=DEPENDENCIES,\n license='Apache 2.0',\n", "issue": "Create system tests for service account-based credentials\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n 'pyasn1>=0.1.7',\n 'pyasn1-modules>=0.0.5',\n 'rsa>=3.1.4',\n 'six>=1.9.0',\n)\n\n\nwith open('README.rst', 'r') as fh:\n long_description = fh.read()\n\nsetup(\n name='google-auth',\n version='0.0.1',\n author='Google Cloud Platform',\n author_email='[email protected]',\n description='Google Authentication Library',\n long_description=long_description,\n url='https://github.com/GoogleCloudPlatform/google-auth-library-python',\n packages=find_packages(exclude='tests'),\n namespace_packages=('google',),\n install_requires=DEPENDENCIES,\n license='Apache 2.0',\n keywords='google auth oauth client',\n classifiers=(\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ),\n)\n", "path": "setup.py"}]}
1,108
110
gh_patches_debug_40166
rasdani/github-patches
git_diff
learningequality__kolibri-2092
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Setup wizard is broken ## Summary * Submitting the setup wizard returns `{language_code: ["This field is required."]}` ## System information - Version: 0.6 ## How to reproduce 1. Go through setup wizard ## Real-life consequences Sadness </issue> <code> [start of kolibri/core/device/serializers.py] 1 from django.db import transaction 2 from django.utils.translation import check_for_language, ugettext_lazy as _ 3 from kolibri.auth.constants.facility_presets import choices, mappings 4 from kolibri.auth.constants.role_kinds import ADMIN 5 from kolibri.auth.models import Facility, FacilityUser 6 from kolibri.auth.serializers import FacilitySerializer, FacilityUserSerializer 7 from rest_framework import serializers 8 9 from .models import DevicePermissions, DeviceSettings 10 11 12 class DevicePermissionsSerializer(serializers.ModelSerializer): 13 14 class Meta: 15 model = DevicePermissions 16 fields = ( 17 'user', 'is_superuser', 'can_manage_content', 18 ) 19 20 class NoFacilityFacilityUserSerializer(FacilityUserSerializer): 21 22 class Meta: 23 model = FacilityUser 24 fields = ('id', 'username', 'full_name', 'password', ) 25 26 27 class DeviceProvisionSerializer(serializers.Serializer): 28 facility = FacilitySerializer() 29 preset = serializers.ChoiceField(choices=choices) 30 superuser = NoFacilityFacilityUserSerializer() 31 language_code = serializers.CharField(max_length=15) 32 33 class Meta: 34 fields = ('facility', 'dataset', 'superuser', 'language_code') 35 36 def validate_language_code(self, language_code): 37 """ 38 Check that the language_code is supported by Kolibri 39 """ 40 if not check_for_language(language_code): 41 raise serializers.ValidationError(_("Language is not supported by Kolibri")) 42 return language_code 43 44 def create(self, validated_data): 45 """ 46 Endpoint for initial setup of a device. 47 Expects a value for: 48 default language - the default language of this Kolibri device 49 facility - the required fields for setting up a facility 50 facilitydataset - facility configuration options 51 superuser - the required fields for a facilityuser who will be set as the super user for this device 52 """ 53 with transaction.atomic(): 54 facility = Facility.objects.create(**validated_data.pop('facility')) 55 preset = validated_data.pop('preset') 56 dataset_data = mappings[preset] 57 for key, value in dataset_data.items(): 58 setattr(facility.dataset, key, value) 59 facility.dataset.save() 60 superuser_data = validated_data.pop('superuser') 61 superuser_data['facility'] = facility 62 superuser = FacilityUserSerializer(data=superuser_data).create(superuser_data) 63 facility.add_role(superuser, ADMIN) 64 DevicePermissions.objects.create(user=superuser, is_superuser=True) 65 language_code = validated_data.pop('language_code') 66 device_settings, created = DeviceSettings.objects.get_or_create() 67 device_settings.is_provisioned = True 68 device_settings.language_code = language_code 69 device_settings.save() 70 return { 71 "facility": facility, 72 "preset": preset, 73 "superuser": superuser, 74 "language_code": language_code 75 } 76 [end of kolibri/core/device/serializers.py] [start of kolibri/core/device/models.py] 1 from django.conf import settings 2 from django.db import models 3 from kolibri.auth.models import FacilityUser 4 5 from .permissions import UserCanManageDevicePermissions 6 7 8 class DevicePermissions(models.Model): 9 """ 10 This class stores metadata about device permissions for FacilityUsers. 11 """ 12 13 permissions = UserCanManageDevicePermissions() 14 15 user = models.OneToOneField(FacilityUser, on_delete=models.CASCADE, related_name='devicepermissions', blank=False, null=False, primary_key=True) 16 is_superuser = models.BooleanField(default=False) 17 can_manage_content = models.BooleanField(default=False) 18 19 20 class DeviceSettings(models.Model): 21 """ 22 This class stores data about settings particular to this device 23 """ 24 25 is_provisioned = models.BooleanField(default=False) 26 language_code = models.CharField(max_length=15, default=settings.LANGUAGE_CODE) 27 28 def save(self, *args, **kwargs): 29 self.pk = 1 30 super(DeviceSettings, self).save(*args, **kwargs) 31 [end of kolibri/core/device/models.py] [start of kolibri/core/device/migrations/0001_initial.py] 1 # -*- coding: utf-8 -*- 2 # Generated by Django 1.9.7 on 2017-08-16 23:05 3 from __future__ import unicode_literals 4 5 import django.db.models.deletion 6 from django.conf import settings 7 from django.db import migrations, models 8 9 10 class Migration(migrations.Migration): 11 12 initial = True 13 14 dependencies = [ 15 ('kolibriauth', '0003_auto_20170621_0958'), 16 ] 17 18 operations = [ 19 migrations.CreateModel( 20 name='DevicePermissions', 21 fields=[ 22 ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='devicepermissions', serialize=False, to=settings.AUTH_USER_MODEL)), 23 ('is_superuser', models.BooleanField(default=False)), 24 ('can_manage_content', models.BooleanField(default=False)), 25 ], 26 ), 27 migrations.CreateModel( 28 name='DeviceSettings', 29 fields=[ 30 ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 31 ('is_provisioned', models.BooleanField(default=False)), 32 ('language_code', models.CharField(default='en', max_length=15)), 33 ], 34 ), 35 ] 36 [end of kolibri/core/device/migrations/0001_initial.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/device/migrations/0001_initial.py b/kolibri/core/device/migrations/0001_initial.py --- a/kolibri/core/device/migrations/0001_initial.py +++ b/kolibri/core/device/migrations/0001_initial.py @@ -29,7 +29,7 @@ fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_provisioned', models.BooleanField(default=False)), - ('language_code', models.CharField(default='en', max_length=15)), + ('language_id', models.CharField(default='en', max_length=15)), ], ), ] diff --git a/kolibri/core/device/models.py b/kolibri/core/device/models.py --- a/kolibri/core/device/models.py +++ b/kolibri/core/device/models.py @@ -23,7 +23,7 @@ """ is_provisioned = models.BooleanField(default=False) - language_code = models.CharField(max_length=15, default=settings.LANGUAGE_CODE) + language_id = models.CharField(max_length=15, default=settings.LANGUAGE_CODE) def save(self, *args, **kwargs): self.pk = 1 diff --git a/kolibri/core/device/serializers.py b/kolibri/core/device/serializers.py --- a/kolibri/core/device/serializers.py +++ b/kolibri/core/device/serializers.py @@ -28,18 +28,18 @@ facility = FacilitySerializer() preset = serializers.ChoiceField(choices=choices) superuser = NoFacilityFacilityUserSerializer() - language_code = serializers.CharField(max_length=15) + language_id = serializers.CharField(max_length=15) class Meta: - fields = ('facility', 'dataset', 'superuser', 'language_code') + fields = ('facility', 'dataset', 'superuser', 'language_id') - def validate_language_code(self, language_code): + def validate_language_id(self, language_id): """ - Check that the language_code is supported by Kolibri + Check that the language_id is supported by Kolibri """ - if not check_for_language(language_code): + if not check_for_language(language_id): raise serializers.ValidationError(_("Language is not supported by Kolibri")) - return language_code + return language_id def create(self, validated_data): """ @@ -62,14 +62,14 @@ superuser = FacilityUserSerializer(data=superuser_data).create(superuser_data) facility.add_role(superuser, ADMIN) DevicePermissions.objects.create(user=superuser, is_superuser=True) - language_code = validated_data.pop('language_code') + language_id = validated_data.pop('language_id') device_settings, created = DeviceSettings.objects.get_or_create() device_settings.is_provisioned = True - device_settings.language_code = language_code + device_settings.language_id = language_id device_settings.save() return { "facility": facility, "preset": preset, "superuser": superuser, - "language_code": language_code + "language_id": language_id }
{"golden_diff": "diff --git a/kolibri/core/device/migrations/0001_initial.py b/kolibri/core/device/migrations/0001_initial.py\n--- a/kolibri/core/device/migrations/0001_initial.py\n+++ b/kolibri/core/device/migrations/0001_initial.py\n@@ -29,7 +29,7 @@\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_provisioned', models.BooleanField(default=False)),\n- ('language_code', models.CharField(default='en', max_length=15)),\n+ ('language_id', models.CharField(default='en', max_length=15)),\n ],\n ),\n ]\ndiff --git a/kolibri/core/device/models.py b/kolibri/core/device/models.py\n--- a/kolibri/core/device/models.py\n+++ b/kolibri/core/device/models.py\n@@ -23,7 +23,7 @@\n \"\"\"\n \n is_provisioned = models.BooleanField(default=False)\n- language_code = models.CharField(max_length=15, default=settings.LANGUAGE_CODE)\n+ language_id = models.CharField(max_length=15, default=settings.LANGUAGE_CODE)\n \n def save(self, *args, **kwargs):\n self.pk = 1\ndiff --git a/kolibri/core/device/serializers.py b/kolibri/core/device/serializers.py\n--- a/kolibri/core/device/serializers.py\n+++ b/kolibri/core/device/serializers.py\n@@ -28,18 +28,18 @@\n facility = FacilitySerializer()\n preset = serializers.ChoiceField(choices=choices)\n superuser = NoFacilityFacilityUserSerializer()\n- language_code = serializers.CharField(max_length=15)\n+ language_id = serializers.CharField(max_length=15)\n \n class Meta:\n- fields = ('facility', 'dataset', 'superuser', 'language_code')\n+ fields = ('facility', 'dataset', 'superuser', 'language_id')\n \n- def validate_language_code(self, language_code):\n+ def validate_language_id(self, language_id):\n \"\"\"\n- Check that the language_code is supported by Kolibri\n+ Check that the language_id is supported by Kolibri\n \"\"\"\n- if not check_for_language(language_code):\n+ if not check_for_language(language_id):\n raise serializers.ValidationError(_(\"Language is not supported by Kolibri\"))\n- return language_code\n+ return language_id\n \n def create(self, validated_data):\n \"\"\"\n@@ -62,14 +62,14 @@\n superuser = FacilityUserSerializer(data=superuser_data).create(superuser_data)\n facility.add_role(superuser, ADMIN)\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n- language_code = validated_data.pop('language_code')\n+ language_id = validated_data.pop('language_id')\n device_settings, created = DeviceSettings.objects.get_or_create()\n device_settings.is_provisioned = True\n- device_settings.language_code = language_code\n+ device_settings.language_id = language_id\n device_settings.save()\n return {\n \"facility\": facility,\n \"preset\": preset,\n \"superuser\": superuser,\n- \"language_code\": language_code\n+ \"language_id\": language_id\n }\n", "issue": "Setup wizard is broken\n## Summary\r\n\r\n* Submitting the setup wizard returns `{language_code: [\"This field is required.\"]}`\r\n\r\n## System information\r\n\r\n - Version: 0.6\r\n\r\n## How to reproduce\r\n\r\n1. Go through setup wizard\r\n\r\n## Real-life consequences\r\n\r\nSadness\n", "before_files": [{"content": "from django.db import transaction\nfrom django.utils.translation import check_for_language, ugettext_lazy as _\nfrom kolibri.auth.constants.facility_presets import choices, mappings\nfrom kolibri.auth.constants.role_kinds import ADMIN\nfrom kolibri.auth.models import Facility, FacilityUser\nfrom kolibri.auth.serializers import FacilitySerializer, FacilityUserSerializer\nfrom rest_framework import serializers\n\nfrom .models import DevicePermissions, DeviceSettings\n\n\nclass DevicePermissionsSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = DevicePermissions\n fields = (\n 'user', 'is_superuser', 'can_manage_content',\n )\n\nclass NoFacilityFacilityUserSerializer(FacilityUserSerializer):\n\n class Meta:\n model = FacilityUser\n fields = ('id', 'username', 'full_name', 'password', )\n\n\nclass DeviceProvisionSerializer(serializers.Serializer):\n facility = FacilitySerializer()\n preset = serializers.ChoiceField(choices=choices)\n superuser = NoFacilityFacilityUserSerializer()\n language_code = serializers.CharField(max_length=15)\n\n class Meta:\n fields = ('facility', 'dataset', 'superuser', 'language_code')\n\n def validate_language_code(self, language_code):\n \"\"\"\n Check that the language_code is supported by Kolibri\n \"\"\"\n if not check_for_language(language_code):\n raise serializers.ValidationError(_(\"Language is not supported by Kolibri\"))\n return language_code\n\n def create(self, validated_data):\n \"\"\"\n Endpoint for initial setup of a device.\n Expects a value for:\n default language - the default language of this Kolibri device\n facility - the required fields for setting up a facility\n facilitydataset - facility configuration options\n superuser - the required fields for a facilityuser who will be set as the super user for this device\n \"\"\"\n with transaction.atomic():\n facility = Facility.objects.create(**validated_data.pop('facility'))\n preset = validated_data.pop('preset')\n dataset_data = mappings[preset]\n for key, value in dataset_data.items():\n setattr(facility.dataset, key, value)\n facility.dataset.save()\n superuser_data = validated_data.pop('superuser')\n superuser_data['facility'] = facility\n superuser = FacilityUserSerializer(data=superuser_data).create(superuser_data)\n facility.add_role(superuser, ADMIN)\n DevicePermissions.objects.create(user=superuser, is_superuser=True)\n language_code = validated_data.pop('language_code')\n device_settings, created = DeviceSettings.objects.get_or_create()\n device_settings.is_provisioned = True\n device_settings.language_code = language_code\n device_settings.save()\n return {\n \"facility\": facility,\n \"preset\": preset,\n \"superuser\": superuser,\n \"language_code\": language_code\n }\n", "path": "kolibri/core/device/serializers.py"}, {"content": "from django.conf import settings\nfrom django.db import models\nfrom kolibri.auth.models import FacilityUser\n\nfrom .permissions import UserCanManageDevicePermissions\n\n\nclass DevicePermissions(models.Model):\n \"\"\"\n This class stores metadata about device permissions for FacilityUsers.\n \"\"\"\n\n permissions = UserCanManageDevicePermissions()\n\n user = models.OneToOneField(FacilityUser, on_delete=models.CASCADE, related_name='devicepermissions', blank=False, null=False, primary_key=True)\n is_superuser = models.BooleanField(default=False)\n can_manage_content = models.BooleanField(default=False)\n\n\nclass DeviceSettings(models.Model):\n \"\"\"\n This class stores data about settings particular to this device\n \"\"\"\n\n is_provisioned = models.BooleanField(default=False)\n language_code = models.CharField(max_length=15, default=settings.LANGUAGE_CODE)\n\n def save(self, *args, **kwargs):\n self.pk = 1\n super(DeviceSettings, self).save(*args, **kwargs)\n", "path": "kolibri/core/device/models.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.7 on 2017-08-16 23:05\nfrom __future__ import unicode_literals\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('kolibriauth', '0003_auto_20170621_0958'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DevicePermissions',\n fields=[\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='devicepermissions', serialize=False, to=settings.AUTH_USER_MODEL)),\n ('is_superuser', models.BooleanField(default=False)),\n ('can_manage_content', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='DeviceSettings',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_provisioned', models.BooleanField(default=False)),\n ('language_code', models.CharField(default='en', max_length=15)),\n ],\n ),\n ]\n", "path": "kolibri/core/device/migrations/0001_initial.py"}]}
1,970
719
gh_patches_debug_21936
rasdani/github-patches
git_diff
beeware__toga-1373
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use Alpha Version of Pythonnet **Description** Pythonnet has released a few days ago an [alpha version](https://pypi.org/project/pythonnet/3.0.0a1/) of Pythonnet 3.0. ATM we use a hashed version (8d93c39d) of Pythonnet instead of an official release. In the case that we don't want to wait until an official version of Pythonnet is released (which we don't have any approximation when this would happen), I think we should at least use the alpha version. **Describe alternatives you've considered** An alternative is to keep the hashed version as it is :) </issue> <code> [start of src/winforms/setup.py] 1 #!/usr/bin/env python 2 import re 3 4 from setuptools import setup 5 6 # Version handline needs to be programatic because 7 # we can't import toga_winforms to compute the version; 8 # and to support versioned subpackage dependencies 9 with open('toga_winforms/__init__.py', encoding='utf8') as version_file: 10 version_match = re.search( 11 r"^__version__ = ['\"]([^'\"]*)['\"]", 12 version_file.read(), 13 re.M 14 ) 15 if version_match: 16 version = version_match.group(1) 17 else: 18 raise RuntimeError("Unable to find version string.") 19 20 setup( 21 version=version, 22 install_requires=[ 23 # The Python.net team hasn't published 2.X wheels for Python 3.9 or 3.10, 24 # and their development effort seems to be focussed on the 3.X branch; 25 # they've indicated they're not planning to make the 2.X branch compatible 26 # with Python 3.10. If we want to be able to support "current" Python, 27 # we need to work off a source release until they formally release 3.0. 28 # 29 # The 8d93c39d hash is, as best as I can work out, what was in the 30 # 3.0.0-preview2021-10-05 release published to nuget - but they didn't 31 # tag anything for that release. That release contained a bug 32 # (https://github.com/pythonnet/pythonnet/issues/1613) that didn't play well 33 # with pip 21.3, so we use 94b1a71c which was released about a month later. 34 'pythonnet @ git+https://github.com/pythonnet/pythonnet@94b1a71c#egg=pythonnet', 35 'toga-core==%s' % version, 36 ], 37 test_suite='tests', 38 test_require=[ 39 'toga-dummy==%s' % version, 40 ] 41 ) 42 [end of src/winforms/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/winforms/setup.py b/src/winforms/setup.py --- a/src/winforms/setup.py +++ b/src/winforms/setup.py @@ -24,14 +24,11 @@ # and their development effort seems to be focussed on the 3.X branch; # they've indicated they're not planning to make the 2.X branch compatible # with Python 3.10. If we want to be able to support "current" Python, - # we need to work off a source release until they formally release 3.0. + # we need to use the 3.0 branch. # - # The 8d93c39d hash is, as best as I can work out, what was in the - # 3.0.0-preview2021-10-05 release published to nuget - but they didn't - # tag anything for that release. That release contained a bug - # (https://github.com/pythonnet/pythonnet/issues/1613) that didn't play well - # with pip 21.3, so we use 94b1a71c which was released about a month later. - 'pythonnet @ git+https://github.com/pythonnet/pythonnet@94b1a71c#egg=pythonnet', + # At time of writing, the most recent (and only) version of Python.net 3.0 + # that has been released is the alpha version 3.0.0a1. + 'pythonnet>=3.0.0a1', 'toga-core==%s' % version, ], test_suite='tests',
{"golden_diff": "diff --git a/src/winforms/setup.py b/src/winforms/setup.py\n--- a/src/winforms/setup.py\n+++ b/src/winforms/setup.py\n@@ -24,14 +24,11 @@\n # and their development effort seems to be focussed on the 3.X branch;\n # they've indicated they're not planning to make the 2.X branch compatible\n # with Python 3.10. If we want to be able to support \"current\" Python,\n- # we need to work off a source release until they formally release 3.0.\n+ # we need to use the 3.0 branch.\n #\n- # The 8d93c39d hash is, as best as I can work out, what was in the\n- # 3.0.0-preview2021-10-05 release published to nuget - but they didn't\n- # tag anything for that release. That release contained a bug\n- # (https://github.com/pythonnet/pythonnet/issues/1613) that didn't play well\n- # with pip 21.3, so we use 94b1a71c which was released about a month later.\n- 'pythonnet @ git+https://github.com/pythonnet/pythonnet@94b1a71c#egg=pythonnet',\n+ # At time of writing, the most recent (and only) version of Python.net 3.0\n+ # that has been released is the alpha version 3.0.0a1.\n+ 'pythonnet>=3.0.0a1',\n 'toga-core==%s' % version,\n ],\n test_suite='tests',\n", "issue": "Use Alpha Version of Pythonnet\n**Description**\r\nPythonnet has released a few days ago an [alpha version](https://pypi.org/project/pythonnet/3.0.0a1/) of Pythonnet 3.0.\r\nATM we use a hashed version (8d93c39d) of Pythonnet instead of an official release.\r\n\r\nIn the case that we don't want to wait until an official version of Pythonnet is released (which we don't have any approximation when this would happen), I think we should at least use the alpha version.\r\n\r\n**Describe alternatives you've considered**\r\nAn alternative is to keep the hashed version as it is :)\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_winforms to compute the version;\n# and to support versioned subpackage dependencies\nwith open('toga_winforms/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(),\n re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n # The Python.net team hasn't published 2.X wheels for Python 3.9 or 3.10,\n # and their development effort seems to be focussed on the 3.X branch;\n # they've indicated they're not planning to make the 2.X branch compatible\n # with Python 3.10. If we want to be able to support \"current\" Python,\n # we need to work off a source release until they formally release 3.0.\n #\n # The 8d93c39d hash is, as best as I can work out, what was in the\n # 3.0.0-preview2021-10-05 release published to nuget - but they didn't\n # tag anything for that release. That release contained a bug\n # (https://github.com/pythonnet/pythonnet/issues/1613) that didn't play well\n # with pip 21.3, so we use 94b1a71c which was released about a month later.\n 'pythonnet @ git+https://github.com/pythonnet/pythonnet@94b1a71c#egg=pythonnet',\n 'toga-core==%s' % version,\n ],\n test_suite='tests',\n test_require=[\n 'toga-dummy==%s' % version,\n ]\n)\n", "path": "src/winforms/setup.py"}]}
1,195
382
gh_patches_debug_23370
rasdani/github-patches
git_diff
python-gitlab__python-gitlab-1373
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot list package files ## Description of the problem, including code/CLI snippet [Listing package files](https://docs.gitlab.com/ee/api/packages.html#list-package-files) appears to be unsupported. The API endpoint was introduced in GitLab 11.8. ## Expected Behavior Listing package files should be possible. ## Actual Behavior Listing package files is not possible. ## Specifications - python-gitlab version: 2.6.0 - API version you are using (v3/v4): v4 - Gitlab server version (or gitlab.com): gitlab.com PR incoming. </issue> <code> [start of gitlab/v4/objects/packages.py] 1 from gitlab.base import RESTManager, RESTObject 2 from gitlab.mixins import DeleteMixin, GetMixin, ListMixin, ObjectDeleteMixin 3 4 5 __all__ = [ 6 "GroupPackage", 7 "GroupPackageManager", 8 "ProjectPackage", 9 "ProjectPackageManager", 10 ] 11 12 13 class GroupPackage(RESTObject): 14 pass 15 16 17 class GroupPackageManager(ListMixin, RESTManager): 18 _path = "/groups/%(group_id)s/packages" 19 _obj_cls = GroupPackage 20 _from_parent_attrs = {"group_id": "id"} 21 _list_filters = ( 22 "exclude_subgroups", 23 "order_by", 24 "sort", 25 "package_type", 26 "package_name", 27 ) 28 29 30 class ProjectPackage(ObjectDeleteMixin, RESTObject): 31 pass 32 33 34 class ProjectPackageManager(ListMixin, GetMixin, DeleteMixin, RESTManager): 35 _path = "/projects/%(project_id)s/packages" 36 _obj_cls = ProjectPackage 37 _from_parent_attrs = {"project_id": "id"} 38 _list_filters = ( 39 "order_by", 40 "sort", 41 "package_type", 42 "package_name", 43 ) 44 [end of gitlab/v4/objects/packages.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gitlab/v4/objects/packages.py b/gitlab/v4/objects/packages.py --- a/gitlab/v4/objects/packages.py +++ b/gitlab/v4/objects/packages.py @@ -1,12 +1,13 @@ from gitlab.base import RESTManager, RESTObject from gitlab.mixins import DeleteMixin, GetMixin, ListMixin, ObjectDeleteMixin - __all__ = [ "GroupPackage", "GroupPackageManager", "ProjectPackage", "ProjectPackageManager", + "ProjectPackageFile", + "ProjectPackageFileManager", ] @@ -28,7 +29,7 @@ class ProjectPackage(ObjectDeleteMixin, RESTObject): - pass + _managers = (("package_files", "ProjectPackageFileManager"),) class ProjectPackageManager(ListMixin, GetMixin, DeleteMixin, RESTManager): @@ -41,3 +42,13 @@ "package_type", "package_name", ) + + +class ProjectPackageFile(RESTObject): + pass + + +class ProjectPackageFileManager(ListMixin, RESTManager): + _path = "/projects/%(project_id)s/packages/%(package_id)s/package_files" + _obj_cls = ProjectPackageFile + _from_parent_attrs = {"project_id": "project_id", "package_id": "id"}
{"golden_diff": "diff --git a/gitlab/v4/objects/packages.py b/gitlab/v4/objects/packages.py\n--- a/gitlab/v4/objects/packages.py\n+++ b/gitlab/v4/objects/packages.py\n@@ -1,12 +1,13 @@\n from gitlab.base import RESTManager, RESTObject\n from gitlab.mixins import DeleteMixin, GetMixin, ListMixin, ObjectDeleteMixin\n \n-\n __all__ = [\n \"GroupPackage\",\n \"GroupPackageManager\",\n \"ProjectPackage\",\n \"ProjectPackageManager\",\n+ \"ProjectPackageFile\",\n+ \"ProjectPackageFileManager\",\n ]\n \n \n@@ -28,7 +29,7 @@\n \n \n class ProjectPackage(ObjectDeleteMixin, RESTObject):\n- pass\n+ _managers = ((\"package_files\", \"ProjectPackageFileManager\"),)\n \n \n class ProjectPackageManager(ListMixin, GetMixin, DeleteMixin, RESTManager):\n@@ -41,3 +42,13 @@\n \"package_type\",\n \"package_name\",\n )\n+\n+\n+class ProjectPackageFile(RESTObject):\n+ pass\n+\n+\n+class ProjectPackageFileManager(ListMixin, RESTManager):\n+ _path = \"/projects/%(project_id)s/packages/%(package_id)s/package_files\"\n+ _obj_cls = ProjectPackageFile\n+ _from_parent_attrs = {\"project_id\": \"project_id\", \"package_id\": \"id\"}\n", "issue": "Cannot list package files\n## Description of the problem, including code/CLI snippet\r\n\r\n[Listing package files](https://docs.gitlab.com/ee/api/packages.html#list-package-files) appears to be unsupported. The API endpoint was introduced in GitLab 11.8.\r\n\r\n## Expected Behavior\r\n\r\nListing package files should be possible.\r\n\r\n## Actual Behavior\r\n\r\nListing package files is not possible.\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: 2.6.0\r\n - API version you are using (v3/v4): v4\r\n - Gitlab server version (or gitlab.com): gitlab.com\r\n\r\n\r\nPR incoming.\n", "before_files": [{"content": "from gitlab.base import RESTManager, RESTObject\nfrom gitlab.mixins import DeleteMixin, GetMixin, ListMixin, ObjectDeleteMixin\n\n\n__all__ = [\n \"GroupPackage\",\n \"GroupPackageManager\",\n \"ProjectPackage\",\n \"ProjectPackageManager\",\n]\n\n\nclass GroupPackage(RESTObject):\n pass\n\n\nclass GroupPackageManager(ListMixin, RESTManager):\n _path = \"/groups/%(group_id)s/packages\"\n _obj_cls = GroupPackage\n _from_parent_attrs = {\"group_id\": \"id\"}\n _list_filters = (\n \"exclude_subgroups\",\n \"order_by\",\n \"sort\",\n \"package_type\",\n \"package_name\",\n )\n\n\nclass ProjectPackage(ObjectDeleteMixin, RESTObject):\n pass\n\n\nclass ProjectPackageManager(ListMixin, GetMixin, DeleteMixin, RESTManager):\n _path = \"/projects/%(project_id)s/packages\"\n _obj_cls = ProjectPackage\n _from_parent_attrs = {\"project_id\": \"id\"}\n _list_filters = (\n \"order_by\",\n \"sort\",\n \"package_type\",\n \"package_name\",\n )\n", "path": "gitlab/v4/objects/packages.py"}]}
993
294
gh_patches_debug_29144
rasdani/github-patches
git_diff
qtile__qtile-2235
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No filename provided Image widget causes QTile to crash. # The issue When no filename argument, OR an invalid filename is provided for the Image widget, Qtile seems to crash, and needs to be killed to restart. You are obviously not supposed to provide a non-existant image, but I have doubts that it crashing is intended behavior. What I am describing here as a "crash" is no keyboard input being accepted, and windows from *all* other workspaces being displayed on the workspace you are currently on. If this is not actually a crash, I apologize, but regardless, Qtile becomes unusable until the process is killed and I am kicked back to my Display Manager. # Steps to reproduce In your bar, create a new ``Image`` widget somewhere inside. Either provide a path to an image that does not exist, or do not provide one period. # Qtile version This is the commit hash of the version I am running. 6c4d0557124989d46ffb2bb24f4468db687fcdb2 # Stack traces No stack traces from xsession-errors, or the Qtile log are produced, however I have traced the error (through using the logger provided in the module's file) to the ``_configure`` method of the Image widget, and it seems to be coming the line: ``base._Widget._configure(self, qtile, bar)`` # Configuration https://pastebin.com/qxBq6yPn If there is any information I got wrong here, or some other bit of information I can provide that will help this issue get solved, I will try my best. </issue> <code> [start of libqtile/widget/image.py] 1 # Copyright (c) 2013 dequis 2 # Copyright (c) 2014 Sean Vig 3 # Copyright (c) 2014 Adi Sieker 4 # 5 # Permission is hereby granted, free of charge, to any person obtaining a copy 6 # of this software and associated documentation files (the "Software"), to deal 7 # in the Software without restriction, including without limitation the rights 8 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 # copies of the Software, and to permit persons to whom the Software is 10 # furnished to do so, subject to the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be included in 13 # all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 # SOFTWARE. 22 import os 23 24 from libqtile import bar 25 from libqtile.images import Img 26 from libqtile.log_utils import logger 27 from libqtile.widget import base 28 29 30 class Image(base._Widget, base.MarginMixin): 31 """Display a PNG image on the bar""" 32 orientations = base.ORIENTATION_BOTH 33 defaults = [ 34 ("scale", True, "Enable/Disable image scaling"), 35 ("rotate", 0.0, "rotate the image in degrees counter-clockwise"), 36 ("filename", None, "Image filename. Can contain '~'"), 37 ] 38 39 def __init__(self, length=bar.CALCULATED, width=None, **config): 40 # 'width' was replaced by 'length' since the widget can be installed in 41 # vertical bars 42 if width is not None: 43 logger.warning('width kwarg or positional argument is ' 44 'deprecated. Please use length.') 45 length = width 46 47 base._Widget.__init__(self, length, **config) 48 self.add_defaults(Image.defaults) 49 self.add_defaults(base.MarginMixin.defaults) 50 51 # make the default 0 instead 52 self._variable_defaults["margin"] = 0 53 54 def _configure(self, qtile, bar): 55 base._Widget._configure(self, qtile, bar) 56 57 if not self.filename: 58 raise ValueError("Filename not set!") 59 60 self.filename = os.path.expanduser(self.filename) 61 62 if not os.path.exists(self.filename): 63 raise ValueError("File does not exist: {}".format(self.filename)) 64 65 img = Img.from_path(self.filename) 66 self.img = img 67 img.theta = self.rotate 68 if not self.scale: 69 return 70 if self.bar.horizontal: 71 new_height = self.bar.height - (self.margin_y * 2) 72 img.resize(height=new_height) 73 else: 74 new_width = self.bar.width - (self.margin_x * 2) 75 img.resize(width=new_width) 76 77 def draw(self): 78 self.drawer.clear(self.background or self.bar.background) 79 self.drawer.ctx.save() 80 self.drawer.ctx.translate(self.margin_x, self.margin_y) 81 self.drawer.ctx.set_source(self.img.pattern) 82 self.drawer.ctx.paint() 83 self.drawer.ctx.restore() 84 85 if self.bar.horizontal: 86 self.drawer.draw(offsetx=self.offset, width=self.width) 87 else: 88 self.drawer.draw(offsety=self.offset, height=self.width) 89 90 def calculate_length(self): 91 if self.bar.horizontal: 92 return self.img.width + (self.margin_x * 2) 93 else: 94 return self.img.height + (self.margin_y * 2) 95 [end of libqtile/widget/image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py --- a/libqtile/widget/image.py +++ b/libqtile/widget/image.py @@ -53,14 +53,17 @@ def _configure(self, qtile, bar): base._Widget._configure(self, qtile, bar) + self.img = None if not self.filename: - raise ValueError("Filename not set!") + logger.warning("Image filename not set!") + return self.filename = os.path.expanduser(self.filename) if not os.path.exists(self.filename): - raise ValueError("File does not exist: {}".format(self.filename)) + logger.warning("Image does not exist: {}".format(self.filename)) + return img = Img.from_path(self.filename) self.img = img @@ -75,6 +78,9 @@ img.resize(width=new_width) def draw(self): + if self.img is None: + return + self.drawer.clear(self.background or self.bar.background) self.drawer.ctx.save() self.drawer.ctx.translate(self.margin_x, self.margin_y) @@ -88,6 +94,9 @@ self.drawer.draw(offsety=self.offset, height=self.width) def calculate_length(self): + if self.img is None: + return 0 + if self.bar.horizontal: return self.img.width + (self.margin_x * 2) else:
{"golden_diff": "diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py\n--- a/libqtile/widget/image.py\n+++ b/libqtile/widget/image.py\n@@ -53,14 +53,17 @@\n \n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n+ self.img = None\n \n if not self.filename:\n- raise ValueError(\"Filename not set!\")\n+ logger.warning(\"Image filename not set!\")\n+ return\n \n self.filename = os.path.expanduser(self.filename)\n \n if not os.path.exists(self.filename):\n- raise ValueError(\"File does not exist: {}\".format(self.filename))\n+ logger.warning(\"Image does not exist: {}\".format(self.filename))\n+ return\n \n img = Img.from_path(self.filename)\n self.img = img\n@@ -75,6 +78,9 @@\n img.resize(width=new_width)\n \n def draw(self):\n+ if self.img is None:\n+ return\n+\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n@@ -88,6 +94,9 @@\n self.drawer.draw(offsety=self.offset, height=self.width)\n \n def calculate_length(self):\n+ if self.img is None:\n+ return 0\n+\n if self.bar.horizontal:\n return self.img.width + (self.margin_x * 2)\n else:\n", "issue": "No filename provided Image widget causes QTile to crash.\n# The issue\r\nWhen no filename argument, OR an invalid filename is provided for the Image widget, Qtile seems to crash, and needs to be killed to restart. You are obviously not supposed to provide a non-existant image, but I have doubts that it crashing is intended behavior. \r\n\r\nWhat I am describing here as a \"crash\" is no keyboard input being accepted, and windows from *all* other workspaces being displayed on the workspace you are currently on. If this is not actually a crash, I apologize, but regardless, Qtile becomes unusable until the process is killed and I am kicked back to my Display Manager.\r\n\r\n# Steps to reproduce\r\nIn your bar, create a new ``Image`` widget somewhere inside. Either provide a path to an image that does not exist, or do not provide one period.\r\n\r\n# Qtile version\r\nThis is the commit hash of the version I am running.\r\n6c4d0557124989d46ffb2bb24f4468db687fcdb2\r\n\r\n# Stack traces\r\nNo stack traces from xsession-errors, or the Qtile log are produced, however I have traced the error (through using the logger provided in the module's file) to the ``_configure`` method of the Image widget, and it seems to be coming the line: ``base._Widget._configure(self, qtile, bar)``\r\n\r\n# Configuration\r\nhttps://pastebin.com/qxBq6yPn\r\n\r\nIf there is any information I got wrong here, or some other bit of information I can provide that will help this issue get solved, I will try my best.\n", "before_files": [{"content": "# Copyright (c) 2013 dequis\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nfrom libqtile import bar\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass Image(base._Widget, base.MarginMixin):\n \"\"\"Display a PNG image on the bar\"\"\"\n orientations = base.ORIENTATION_BOTH\n defaults = [\n (\"scale\", True, \"Enable/Disable image scaling\"),\n (\"rotate\", 0.0, \"rotate the image in degrees counter-clockwise\"),\n (\"filename\", None, \"Image filename. Can contain '~'\"),\n ]\n\n def __init__(self, length=bar.CALCULATED, width=None, **config):\n # 'width' was replaced by 'length' since the widget can be installed in\n # vertical bars\n if width is not None:\n logger.warning('width kwarg or positional argument is '\n 'deprecated. Please use length.')\n length = width\n\n base._Widget.__init__(self, length, **config)\n self.add_defaults(Image.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n\n # make the default 0 instead\n self._variable_defaults[\"margin\"] = 0\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n\n if not self.filename:\n raise ValueError(\"Filename not set!\")\n\n self.filename = os.path.expanduser(self.filename)\n\n if not os.path.exists(self.filename):\n raise ValueError(\"File does not exist: {}\".format(self.filename))\n\n img = Img.from_path(self.filename)\n self.img = img\n img.theta = self.rotate\n if not self.scale:\n return\n if self.bar.horizontal:\n new_height = self.bar.height - (self.margin_y * 2)\n img.resize(height=new_height)\n else:\n new_width = self.bar.width - (self.margin_x * 2)\n img.resize(width=new_width)\n\n def draw(self):\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n self.drawer.ctx.set_source(self.img.pattern)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n if self.bar.horizontal:\n self.drawer.draw(offsetx=self.offset, width=self.width)\n else:\n self.drawer.draw(offsety=self.offset, height=self.width)\n\n def calculate_length(self):\n if self.bar.horizontal:\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n", "path": "libqtile/widget/image.py"}]}
1,886
320
gh_patches_debug_37602
rasdani/github-patches
git_diff
arviz-devs__arviz-625
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove load_data and save_data functions before 0.4 `load_data` and `save_data` are currently deprecated (after 0.3.1 release). They need to be removed after 0.4 (assuming next release is going to be 0.3.2). </issue> <code> [start of arviz/data/__init__.py] 1 """Code for loading and manipulating data structures.""" 2 from .inference_data import InferenceData, concat 3 from .io_netcdf import from_netcdf, to_netcdf, load_data, save_data 4 from .datasets import load_arviz_data, list_datasets, clear_data_home 5 from .base import numpy_to_data_array, dict_to_dataset 6 from .converters import convert_to_dataset, convert_to_inference_data 7 from .io_cmdstan import from_cmdstan 8 from .io_dict import from_dict 9 from .io_pymc3 import from_pymc3 10 from .io_pystan import from_pystan 11 from .io_emcee import from_emcee 12 from .io_pyro import from_pyro 13 from .io_tfp import from_tfp 14 15 __all__ = [ 16 "InferenceData", 17 "concat", 18 "load_arviz_data", 19 "list_datasets", 20 "clear_data_home", 21 "numpy_to_data_array", 22 "dict_to_dataset", 23 "convert_to_dataset", 24 "convert_to_inference_data", 25 "from_pymc3", 26 "from_pystan", 27 "from_emcee", 28 "from_cmdstan", 29 "from_dict", 30 "from_pyro", 31 "from_tfp", 32 "from_netcdf", 33 "to_netcdf", 34 "load_data", 35 "save_data", 36 ] 37 [end of arviz/data/__init__.py] [start of arviz/data/io_netcdf.py] 1 """Input and output support for data.""" 2 import warnings 3 from .inference_data import InferenceData 4 from .converters import convert_to_inference_data 5 6 7 def from_netcdf(filename): 8 """Load netcdf file back into an arviz.InferenceData. 9 10 Parameters 11 ---------- 12 filename : str 13 name or path of the file to load trace 14 """ 15 return InferenceData.from_netcdf(filename) 16 17 18 def to_netcdf(data, filename, *, group="posterior", coords=None, dims=None): 19 """Save dataset as a netcdf file. 20 21 WARNING: Only idempotent in case `data` is InferenceData 22 23 Parameters 24 ---------- 25 data : InferenceData, or any object accepted by `convert_to_inference_data` 26 Object to be saved 27 filename : str 28 name or path of the file to load trace 29 group : str (optional) 30 In case `data` is not InferenceData, this is the group it will be saved to 31 coords : dict (optional) 32 See `convert_to_inference_data` 33 dims : dict (optional) 34 See `convert_to_inference_data` 35 36 Returns 37 ------- 38 str 39 filename saved to 40 """ 41 inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims) 42 file_name = inference_data.to_netcdf(filename) 43 return file_name 44 45 46 def load_data(filename): 47 """Load netcdf file back into an arviz.InferenceData. 48 49 Parameters 50 ---------- 51 filename : str 52 name or path of the file to load trace 53 54 Note 55 ---- 56 This function is deprecated and will be removed in 0.4. 57 Use `from_netcdf` instead. 58 """ 59 warnings.warn( 60 "The 'load_data' function is deprecated as of 0.3.2, use 'from_netcdf' instead", 61 DeprecationWarning, 62 ) 63 return from_netcdf(filename=filename) 64 65 66 def save_data(data, filename, *, group="posterior", coords=None, dims=None): 67 """Save dataset as a netcdf file. 68 69 WARNING: Only idempotent in case `data` is InferenceData 70 71 Parameters 72 ---------- 73 data : InferenceData, or any object accepted by `convert_to_inference_data` 74 Object to be saved 75 filename : str 76 name or path of the file to load trace 77 group : str (optional) 78 In case `data` is not InferenceData, this is the group it will be saved to 79 coords : dict (optional) 80 See `convert_to_inference_data` 81 dims : dict (optional) 82 See `convert_to_inference_data` 83 84 Returns 85 ------- 86 str 87 filename saved to 88 89 Note 90 ---- 91 This function is deprecated and will be removed in 0.4. 92 Use `to_netcdf` instead. 93 """ 94 warnings.warn( 95 "The 'save_data' function is deprecated as of 0.3.2, use 'to_netcdf' instead", 96 DeprecationWarning, 97 ) 98 return to_netcdf(data=data, filename=filename, group=group, coords=coords, dims=dims) 99 [end of arviz/data/io_netcdf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/arviz/data/__init__.py b/arviz/data/__init__.py --- a/arviz/data/__init__.py +++ b/arviz/data/__init__.py @@ -1,6 +1,6 @@ """Code for loading and manipulating data structures.""" from .inference_data import InferenceData, concat -from .io_netcdf import from_netcdf, to_netcdf, load_data, save_data +from .io_netcdf import from_netcdf, to_netcdf from .datasets import load_arviz_data, list_datasets, clear_data_home from .base import numpy_to_data_array, dict_to_dataset from .converters import convert_to_dataset, convert_to_inference_data @@ -31,6 +31,4 @@ "from_tfp", "from_netcdf", "to_netcdf", - "load_data", - "save_data", ] diff --git a/arviz/data/io_netcdf.py b/arviz/data/io_netcdf.py --- a/arviz/data/io_netcdf.py +++ b/arviz/data/io_netcdf.py @@ -1,5 +1,5 @@ """Input and output support for data.""" -import warnings + from .inference_data import InferenceData from .converters import convert_to_inference_data @@ -41,58 +41,3 @@ inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims) file_name = inference_data.to_netcdf(filename) return file_name - - -def load_data(filename): - """Load netcdf file back into an arviz.InferenceData. - - Parameters - ---------- - filename : str - name or path of the file to load trace - - Note - ---- - This function is deprecated and will be removed in 0.4. - Use `from_netcdf` instead. - """ - warnings.warn( - "The 'load_data' function is deprecated as of 0.3.2, use 'from_netcdf' instead", - DeprecationWarning, - ) - return from_netcdf(filename=filename) - - -def save_data(data, filename, *, group="posterior", coords=None, dims=None): - """Save dataset as a netcdf file. - - WARNING: Only idempotent in case `data` is InferenceData - - Parameters - ---------- - data : InferenceData, or any object accepted by `convert_to_inference_data` - Object to be saved - filename : str - name or path of the file to load trace - group : str (optional) - In case `data` is not InferenceData, this is the group it will be saved to - coords : dict (optional) - See `convert_to_inference_data` - dims : dict (optional) - See `convert_to_inference_data` - - Returns - ------- - str - filename saved to - - Note - ---- - This function is deprecated and will be removed in 0.4. - Use `to_netcdf` instead. - """ - warnings.warn( - "The 'save_data' function is deprecated as of 0.3.2, use 'to_netcdf' instead", - DeprecationWarning, - ) - return to_netcdf(data=data, filename=filename, group=group, coords=coords, dims=dims)
{"golden_diff": "diff --git a/arviz/data/__init__.py b/arviz/data/__init__.py\n--- a/arviz/data/__init__.py\n+++ b/arviz/data/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"Code for loading and manipulating data structures.\"\"\"\n from .inference_data import InferenceData, concat\n-from .io_netcdf import from_netcdf, to_netcdf, load_data, save_data\n+from .io_netcdf import from_netcdf, to_netcdf\n from .datasets import load_arviz_data, list_datasets, clear_data_home\n from .base import numpy_to_data_array, dict_to_dataset\n from .converters import convert_to_dataset, convert_to_inference_data\n@@ -31,6 +31,4 @@\n \"from_tfp\",\n \"from_netcdf\",\n \"to_netcdf\",\n- \"load_data\",\n- \"save_data\",\n ]\ndiff --git a/arviz/data/io_netcdf.py b/arviz/data/io_netcdf.py\n--- a/arviz/data/io_netcdf.py\n+++ b/arviz/data/io_netcdf.py\n@@ -1,5 +1,5 @@\n \"\"\"Input and output support for data.\"\"\"\n-import warnings\n+\n from .inference_data import InferenceData\n from .converters import convert_to_inference_data\n \n@@ -41,58 +41,3 @@\n inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims)\n file_name = inference_data.to_netcdf(filename)\n return file_name\n-\n-\n-def load_data(filename):\n- \"\"\"Load netcdf file back into an arviz.InferenceData.\n-\n- Parameters\n- ----------\n- filename : str\n- name or path of the file to load trace\n-\n- Note\n- ----\n- This function is deprecated and will be removed in 0.4.\n- Use `from_netcdf` instead.\n- \"\"\"\n- warnings.warn(\n- \"The 'load_data' function is deprecated as of 0.3.2, use 'from_netcdf' instead\",\n- DeprecationWarning,\n- )\n- return from_netcdf(filename=filename)\n-\n-\n-def save_data(data, filename, *, group=\"posterior\", coords=None, dims=None):\n- \"\"\"Save dataset as a netcdf file.\n-\n- WARNING: Only idempotent in case `data` is InferenceData\n-\n- Parameters\n- ----------\n- data : InferenceData, or any object accepted by `convert_to_inference_data`\n- Object to be saved\n- filename : str\n- name or path of the file to load trace\n- group : str (optional)\n- In case `data` is not InferenceData, this is the group it will be saved to\n- coords : dict (optional)\n- See `convert_to_inference_data`\n- dims : dict (optional)\n- See `convert_to_inference_data`\n-\n- Returns\n- -------\n- str\n- filename saved to\n-\n- Note\n- ----\n- This function is deprecated and will be removed in 0.4.\n- Use `to_netcdf` instead.\n- \"\"\"\n- warnings.warn(\n- \"The 'save_data' function is deprecated as of 0.3.2, use 'to_netcdf' instead\",\n- DeprecationWarning,\n- )\n- return to_netcdf(data=data, filename=filename, group=group, coords=coords, dims=dims)\n", "issue": "Remove load_data and save_data functions before 0.4\n`load_data` and `save_data` are currently deprecated (after 0.3.1 release). They need to be removed after 0.4 (assuming next release is going to be 0.3.2).\n", "before_files": [{"content": "\"\"\"Code for loading and manipulating data structures.\"\"\"\nfrom .inference_data import InferenceData, concat\nfrom .io_netcdf import from_netcdf, to_netcdf, load_data, save_data\nfrom .datasets import load_arviz_data, list_datasets, clear_data_home\nfrom .base import numpy_to_data_array, dict_to_dataset\nfrom .converters import convert_to_dataset, convert_to_inference_data\nfrom .io_cmdstan import from_cmdstan\nfrom .io_dict import from_dict\nfrom .io_pymc3 import from_pymc3\nfrom .io_pystan import from_pystan\nfrom .io_emcee import from_emcee\nfrom .io_pyro import from_pyro\nfrom .io_tfp import from_tfp\n\n__all__ = [\n \"InferenceData\",\n \"concat\",\n \"load_arviz_data\",\n \"list_datasets\",\n \"clear_data_home\",\n \"numpy_to_data_array\",\n \"dict_to_dataset\",\n \"convert_to_dataset\",\n \"convert_to_inference_data\",\n \"from_pymc3\",\n \"from_pystan\",\n \"from_emcee\",\n \"from_cmdstan\",\n \"from_dict\",\n \"from_pyro\",\n \"from_tfp\",\n \"from_netcdf\",\n \"to_netcdf\",\n \"load_data\",\n \"save_data\",\n]\n", "path": "arviz/data/__init__.py"}, {"content": "\"\"\"Input and output support for data.\"\"\"\nimport warnings\nfrom .inference_data import InferenceData\nfrom .converters import convert_to_inference_data\n\n\ndef from_netcdf(filename):\n \"\"\"Load netcdf file back into an arviz.InferenceData.\n\n Parameters\n ----------\n filename : str\n name or path of the file to load trace\n \"\"\"\n return InferenceData.from_netcdf(filename)\n\n\ndef to_netcdf(data, filename, *, group=\"posterior\", coords=None, dims=None):\n \"\"\"Save dataset as a netcdf file.\n\n WARNING: Only idempotent in case `data` is InferenceData\n\n Parameters\n ----------\n data : InferenceData, or any object accepted by `convert_to_inference_data`\n Object to be saved\n filename : str\n name or path of the file to load trace\n group : str (optional)\n In case `data` is not InferenceData, this is the group it will be saved to\n coords : dict (optional)\n See `convert_to_inference_data`\n dims : dict (optional)\n See `convert_to_inference_data`\n\n Returns\n -------\n str\n filename saved to\n \"\"\"\n inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims)\n file_name = inference_data.to_netcdf(filename)\n return file_name\n\n\ndef load_data(filename):\n \"\"\"Load netcdf file back into an arviz.InferenceData.\n\n Parameters\n ----------\n filename : str\n name or path of the file to load trace\n\n Note\n ----\n This function is deprecated and will be removed in 0.4.\n Use `from_netcdf` instead.\n \"\"\"\n warnings.warn(\n \"The 'load_data' function is deprecated as of 0.3.2, use 'from_netcdf' instead\",\n DeprecationWarning,\n )\n return from_netcdf(filename=filename)\n\n\ndef save_data(data, filename, *, group=\"posterior\", coords=None, dims=None):\n \"\"\"Save dataset as a netcdf file.\n\n WARNING: Only idempotent in case `data` is InferenceData\n\n Parameters\n ----------\n data : InferenceData, or any object accepted by `convert_to_inference_data`\n Object to be saved\n filename : str\n name or path of the file to load trace\n group : str (optional)\n In case `data` is not InferenceData, this is the group it will be saved to\n coords : dict (optional)\n See `convert_to_inference_data`\n dims : dict (optional)\n See `convert_to_inference_data`\n\n Returns\n -------\n str\n filename saved to\n\n Note\n ----\n This function is deprecated and will be removed in 0.4.\n Use `to_netcdf` instead.\n \"\"\"\n warnings.warn(\n \"The 'save_data' function is deprecated as of 0.3.2, use 'to_netcdf' instead\",\n DeprecationWarning,\n )\n return to_netcdf(data=data, filename=filename, group=group, coords=coords, dims=dims)\n", "path": "arviz/data/io_netcdf.py"}]}
1,873
783
gh_patches_debug_15681
rasdani/github-patches
git_diff
TheAlgorithms__Python-1461
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DIRECTORY.md not being updated by TravisCI - [x] .travis.yml isn't updating DIRECTORY.md automatically as it should - [x] scripts/build_directory_md.py needs can have some minor changes too. #1461 </issue> <code> [start of scripts/build_directory_md.py] 1 #!/usr/bin/env python3 2 3 import os 4 from typing import Iterator 5 6 URL_BASE = "https://github.com/TheAlgorithms/Python/blob/master" 7 8 9 def good_filepaths(top_dir: str = ".") -> Iterator[str]: 10 for dirpath, dirnames, filenames in os.walk(top_dir): 11 dirnames[:] = [d for d in dirnames if d != "scripts" and d[0] not in "._"] 12 for filename in filenames: 13 if filename == "__init__.py": 14 continue 15 if os.path.splitext(filename)[1] in (".py", ".ipynb"): 16 yield os.path.join(dirpath, filename).lstrip("./") 17 18 19 def md_prefix(i): 20 return f"{i * ' '}*" if i else "##" 21 22 23 def print_path(old_path: str, new_path: str) -> str: 24 old_parts = old_path.split(os.sep) 25 for i, new_part in enumerate(new_path.split(os.sep)): 26 if i + 1 > len(old_parts) or old_parts[i] != new_part: 27 if new_part: 28 print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}") 29 return new_path 30 31 32 def print_directory_md(top_dir: str = ".") -> None: 33 old_path = "" 34 for filepath in sorted(good_filepaths()): 35 filepath, filename = os.path.split(filepath) 36 if filepath != old_path: 37 old_path = print_path(old_path, filepath) 38 indent = (filepath.count(os.sep) + 1) if filepath else 0 39 url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20") 40 filename = os.path.splitext(filename.replace("_", " "))[0] 41 print(f"{md_prefix(indent)} [{filename}]({url})") 42 43 44 if __name__ == "__main__": 45 print_directory_md(".") 46 [end of scripts/build_directory_md.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -17,7 +17,7 @@ def md_prefix(i): - return f"{i * ' '}*" if i else "##" + return f"{i * ' '}*" if i else "\n##" def print_path(old_path: str, new_path: str) -> str: @@ -37,7 +37,7 @@ old_path = print_path(old_path, filepath) indent = (filepath.count(os.sep) + 1) if filepath else 0 url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20") - filename = os.path.splitext(filename.replace("_", " "))[0] + filename = os.path.splitext(filename.replace("_", " ").title())[0] print(f"{md_prefix(indent)} [{filename}]({url})")
{"golden_diff": "diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py\n--- a/scripts/build_directory_md.py\n+++ b/scripts/build_directory_md.py\n@@ -17,7 +17,7 @@\n \n \n def md_prefix(i):\n- return f\"{i * ' '}*\" if i else \"##\"\n+ return f\"{i * ' '}*\" if i else \"\\n##\"\n \n \n def print_path(old_path: str, new_path: str) -> str:\n@@ -37,7 +37,7 @@\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n url = \"/\".join((URL_BASE, filepath, filename)).replace(\" \", \"%20\")\n- filename = os.path.splitext(filename.replace(\"_\", \" \"))[0]\n+ filename = os.path.splitext(filename.replace(\"_\", \" \").title())[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n", "issue": "DIRECTORY.md not being updated by TravisCI\n- [x] .travis.yml isn't updating DIRECTORY.md automatically as it should\r\n- [x] scripts/build_directory_md.py needs can have some minor changes too. #1461\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nfrom typing import Iterator\n\nURL_BASE = \"https://github.com/TheAlgorithms/Python/blob/master\"\n\n\ndef good_filepaths(top_dir: str = \".\") -> Iterator[str]:\n for dirpath, dirnames, filenames in os.walk(top_dir):\n dirnames[:] = [d for d in dirnames if d != \"scripts\" and d[0] not in \"._\"]\n for filename in filenames:\n if filename == \"__init__.py\":\n continue\n if os.path.splitext(filename)[1] in (\".py\", \".ipynb\"):\n yield os.path.join(dirpath, filename).lstrip(\"./\")\n\n\ndef md_prefix(i):\n return f\"{i * ' '}*\" if i else \"##\"\n\n\ndef print_path(old_path: str, new_path: str) -> str:\n old_parts = old_path.split(os.sep)\n for i, new_part in enumerate(new_path.split(os.sep)):\n if i + 1 > len(old_parts) or old_parts[i] != new_part:\n if new_part:\n print(f\"{md_prefix(i)} {new_part.replace('_', ' ').title()}\")\n return new_path\n\n\ndef print_directory_md(top_dir: str = \".\") -> None:\n old_path = \"\"\n for filepath in sorted(good_filepaths()):\n filepath, filename = os.path.split(filepath)\n if filepath != old_path:\n old_path = print_path(old_path, filepath)\n indent = (filepath.count(os.sep) + 1) if filepath else 0\n url = \"/\".join((URL_BASE, filepath, filename)).replace(\" \", \"%20\")\n filename = os.path.splitext(filename.replace(\"_\", \" \"))[0]\n print(f\"{md_prefix(indent)} [{filename}]({url})\")\n\n\nif __name__ == \"__main__\":\n print_directory_md(\".\")\n", "path": "scripts/build_directory_md.py"}]}
1,073
214
gh_patches_debug_28663
rasdani/github-patches
git_diff
ray-project__ray-8177
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ray async api is not working with uvloop. <!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant--> ### What is the problem? Current Ray async api uses asyncio event loop's internal attribute to identify if the loop is running in the current current thread. ```python3 loop = asyncio.get_event_loop() if loop.is_running(): if loop._thread_id != threading.get_ident(): # If the loop is runing outside current thread, we actually need # to do this to make sure the context is initialized. asyncio.run_coroutine_threadsafe(_async_init(), loop=loop) ``` This causes a problem when we uses Ray APIs inside Fast API because Fast API uses uvloop as its main event loop, and uvloop doesn't have `_thread_id` attribute. @simon-mo Any good idea to fix this? It doesn't seem to be trivial. What about we do async_init() whenever asyncio loop is created in a different thread instead of checking if the event loop's thread id? I assume the only use case where asyncio loop is defined in a different thread is only inside async actor? ### Reproduction (REQUIRED) Please provide a script that can be run to reproduce the issue. The script should have **no external library dependencies** (i.e., use fake or mock data / environments): ```python3 import time import asyncio ​ import ray import psutil from fastapi import FastAPI, APIRouter ​ ​ app = FastAPI( title="API template", description="Template to build upon for API serving and distributed computation", version="0.1.0", openapi_url="/openapi.json", docs_url="/docs", ) ​ @app.on_event("startup") def startup_event(): ray.init(num_cpus=2) ​ ​ ​ @app.on_event("shutdown") def shutdown_event(): ray.shutdown() ​ ​ @app.get('/async') async def non_seq_async_process(): """ async distributed execution """ @ray.remote def slow_function(i): time.sleep(i) return i ​ start_time = time.time() ​ # result_ids = [] # for i in range(10, 60, 10): # result_ids.append(slow_function.remote(i)) # results = ray.get(result_ids) ​ results = await asyncio.wait([slow_function.remote(i) for i in range(10, 60, 10)]) ​ duration = time.time() - start_time out = "Executing the for loop took {:.3f} seconds.\n".format(duration) out += f"The results are: {results}\n" ``` If we cannot run your script, we cannot fix your issue. - [x] I have verified my script runs in a clean environment and reproduces the issue. - [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html). </issue> <code> [start of python/ray/experimental/async_api.py] 1 # Note: asyncio is only compatible with Python 3 2 3 import asyncio 4 import threading 5 6 import ray 7 from ray.experimental.async_plasma import PlasmaEventHandler 8 from ray.services import logger 9 10 handler = None 11 12 13 async def _async_init(): 14 global handler 15 if handler is None: 16 worker = ray.worker.global_worker 17 loop = asyncio.get_event_loop() 18 handler = PlasmaEventHandler(loop, worker) 19 worker.core_worker.set_plasma_added_callback(handler) 20 logger.debug("AsyncPlasma Connection Created!") 21 22 23 def init(): 24 """ 25 Initialize synchronously. 26 """ 27 assert ray.is_initialized(), "Please call ray.init before async_api.init" 28 29 # Noop when handler is set. 30 if handler is not None: 31 return 32 33 loop = asyncio.get_event_loop() 34 if loop.is_running(): 35 if loop._thread_id != threading.get_ident(): 36 # If the loop is runing outside current thread, we actually need 37 # to do this to make sure the context is initialized. 38 asyncio.run_coroutine_threadsafe(_async_init(), loop=loop) 39 else: 40 async_init_done = asyncio.get_event_loop().create_task( 41 _async_init()) 42 # Block until the async init finishes. 43 async_init_done.done() 44 else: 45 asyncio.get_event_loop().run_until_complete(_async_init()) 46 47 48 def as_future(object_id): 49 """Turn an object_id into a Future object. 50 51 Args: 52 object_id: A Ray object_id. 53 54 Returns: 55 PlasmaObjectFuture: A future object that waits the object_id. 56 """ 57 if handler is None: 58 init() 59 return handler.as_future(object_id) 60 61 62 def shutdown(): 63 """Manually shutdown the async API. 64 65 Cancels all related tasks and all the socket transportation. 66 """ 67 global handler 68 if handler is not None: 69 handler.close() 70 handler = None 71 [end of python/ray/experimental/async_api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/ray/experimental/async_api.py b/python/ray/experimental/async_api.py --- a/python/ray/experimental/async_api.py +++ b/python/ray/experimental/async_api.py @@ -1,7 +1,4 @@ -# Note: asyncio is only compatible with Python 3 - import asyncio -import threading import ray from ray.experimental.async_plasma import PlasmaEventHandler @@ -10,7 +7,10 @@ handler = None -async def _async_init(): +def init(): + """Initialize plasma event handlers for asyncio support.""" + assert ray.is_initialized(), "Please call ray.init before async_api.init" + global handler if handler is None: worker = ray.worker.global_worker @@ -20,31 +20,6 @@ logger.debug("AsyncPlasma Connection Created!") -def init(): - """ - Initialize synchronously. - """ - assert ray.is_initialized(), "Please call ray.init before async_api.init" - - # Noop when handler is set. - if handler is not None: - return - - loop = asyncio.get_event_loop() - if loop.is_running(): - if loop._thread_id != threading.get_ident(): - # If the loop is runing outside current thread, we actually need - # to do this to make sure the context is initialized. - asyncio.run_coroutine_threadsafe(_async_init(), loop=loop) - else: - async_init_done = asyncio.get_event_loop().create_task( - _async_init()) - # Block until the async init finishes. - async_init_done.done() - else: - asyncio.get_event_loop().run_until_complete(_async_init()) - - def as_future(object_id): """Turn an object_id into a Future object.
{"golden_diff": "diff --git a/python/ray/experimental/async_api.py b/python/ray/experimental/async_api.py\n--- a/python/ray/experimental/async_api.py\n+++ b/python/ray/experimental/async_api.py\n@@ -1,7 +1,4 @@\n-# Note: asyncio is only compatible with Python 3\n-\n import asyncio\n-import threading\n \n import ray\n from ray.experimental.async_plasma import PlasmaEventHandler\n@@ -10,7 +7,10 @@\n handler = None\n \n \n-async def _async_init():\n+def init():\n+ \"\"\"Initialize plasma event handlers for asyncio support.\"\"\"\n+ assert ray.is_initialized(), \"Please call ray.init before async_api.init\"\n+\n global handler\n if handler is None:\n worker = ray.worker.global_worker\n@@ -20,31 +20,6 @@\n logger.debug(\"AsyncPlasma Connection Created!\")\n \n \n-def init():\n- \"\"\"\n- Initialize synchronously.\n- \"\"\"\n- assert ray.is_initialized(), \"Please call ray.init before async_api.init\"\n-\n- # Noop when handler is set.\n- if handler is not None:\n- return\n-\n- loop = asyncio.get_event_loop()\n- if loop.is_running():\n- if loop._thread_id != threading.get_ident():\n- # If the loop is runing outside current thread, we actually need\n- # to do this to make sure the context is initialized.\n- asyncio.run_coroutine_threadsafe(_async_init(), loop=loop)\n- else:\n- async_init_done = asyncio.get_event_loop().create_task(\n- _async_init())\n- # Block until the async init finishes.\n- async_init_done.done()\n- else:\n- asyncio.get_event_loop().run_until_complete(_async_init())\n-\n-\n def as_future(object_id):\n \"\"\"Turn an object_id into a Future object.\n", "issue": "Ray async api is not working with uvloop.\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem?\r\n\r\nCurrent Ray async api uses asyncio event loop's internal attribute to identify if the loop is running in the current current thread.\r\n\r\n```python3\r\n loop = asyncio.get_event_loop()\r\n if loop.is_running():\r\n if loop._thread_id != threading.get_ident():\r\n # If the loop is runing outside current thread, we actually need\r\n # to do this to make sure the context is initialized.\r\n asyncio.run_coroutine_threadsafe(_async_init(), loop=loop)\r\n```\r\nThis causes a problem when we uses Ray APIs inside Fast API because Fast API uses uvloop as its main event loop, and uvloop doesn't have `_thread_id` attribute.\r\n\r\n@simon-mo Any good idea to fix this? It doesn't seem to be trivial. What about we do async_init() whenever asyncio loop is created in a different thread instead of checking if the event loop's thread id? I assume the only use case where asyncio loop is defined in a different thread is only inside async actor? \r\n\r\n### Reproduction (REQUIRED)\r\nPlease provide a script that can be run to reproduce the issue. The script should have **no external library dependencies** (i.e., use fake or mock data / environments):\r\n\r\n```python3\r\nimport time\r\nimport asyncio \r\n\u200b\r\nimport ray\r\nimport psutil\r\nfrom fastapi import FastAPI, APIRouter\r\n\u200b\r\n\u200b\r\napp = FastAPI(\r\n title=\"API template\",\r\n description=\"Template to build upon for API serving and distributed computation\",\r\n version=\"0.1.0\",\r\n openapi_url=\"/openapi.json\",\r\n docs_url=\"/docs\",\r\n)\r\n\u200b\r\[email protected]_event(\"startup\")\r\ndef startup_event():\r\n ray.init(num_cpus=2)\r\n\u200b\r\n\u200b\r\n\u200b\r\[email protected]_event(\"shutdown\")\r\ndef shutdown_event():\r\n ray.shutdown()\r\n\u200b\r\n\u200b\r\[email protected]('/async')\r\nasync def non_seq_async_process():\r\n \"\"\"\r\n async distributed execution\r\n \"\"\"\r\n @ray.remote\r\n def slow_function(i):\r\n time.sleep(i)\r\n return i\r\n\u200b\r\n start_time = time.time()\r\n\u200b\r\n # result_ids = []\r\n # for i in range(10, 60, 10):\r\n # result_ids.append(slow_function.remote(i))\r\n \r\n # results = ray.get(result_ids)\r\n\u200b\r\n results = await asyncio.wait([slow_function.remote(i) for i in range(10, 60, 10)])\r\n\u200b\r\n \r\n duration = time.time() - start_time\r\n out = \"Executing the for loop took {:.3f} seconds.\\n\".format(duration)\r\n out += f\"The results are: {results}\\n\"\r\n\r\n```\r\n\r\nIf we cannot run your script, we cannot fix your issue.\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "# Note: asyncio is only compatible with Python 3\n\nimport asyncio\nimport threading\n\nimport ray\nfrom ray.experimental.async_plasma import PlasmaEventHandler\nfrom ray.services import logger\n\nhandler = None\n\n\nasync def _async_init():\n global handler\n if handler is None:\n worker = ray.worker.global_worker\n loop = asyncio.get_event_loop()\n handler = PlasmaEventHandler(loop, worker)\n worker.core_worker.set_plasma_added_callback(handler)\n logger.debug(\"AsyncPlasma Connection Created!\")\n\n\ndef init():\n \"\"\"\n Initialize synchronously.\n \"\"\"\n assert ray.is_initialized(), \"Please call ray.init before async_api.init\"\n\n # Noop when handler is set.\n if handler is not None:\n return\n\n loop = asyncio.get_event_loop()\n if loop.is_running():\n if loop._thread_id != threading.get_ident():\n # If the loop is runing outside current thread, we actually need\n # to do this to make sure the context is initialized.\n asyncio.run_coroutine_threadsafe(_async_init(), loop=loop)\n else:\n async_init_done = asyncio.get_event_loop().create_task(\n _async_init())\n # Block until the async init finishes.\n async_init_done.done()\n else:\n asyncio.get_event_loop().run_until_complete(_async_init())\n\n\ndef as_future(object_id):\n \"\"\"Turn an object_id into a Future object.\n\n Args:\n object_id: A Ray object_id.\n\n Returns:\n PlasmaObjectFuture: A future object that waits the object_id.\n \"\"\"\n if handler is None:\n init()\n return handler.as_future(object_id)\n\n\ndef shutdown():\n \"\"\"Manually shutdown the async API.\n\n Cancels all related tasks and all the socket transportation.\n \"\"\"\n global handler\n if handler is not None:\n handler.close()\n handler = None\n", "path": "python/ray/experimental/async_api.py"}]}
1,730
400
gh_patches_debug_27254
rasdani/github-patches
git_diff
nextcloud__appstore-272
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create documentation section for explaining certificates, signing and how it all works together App devs need a very quick tutorial/walkthrough in the docs on how to generate a new certificate pair, request the public cert to be signed, registering an app id, revoking certs (like registering certs: post it on our issue tracker) and signing apps. Also some background needs to be provided on how the whole certificate mechanism works. </issue> <code> [start of nextcloudappstore/core/forms.py] 1 from django.forms import Form, CharField, Textarea, ChoiceField, RadioSelect, \ 2 BooleanField 3 from django.utils.translation import ugettext_lazy as _ # type: ignore 4 5 from nextcloudappstore.core.models import App, AppRating 6 7 RATING_CHOICES = ( 8 (0.0, _('Bad')), 9 (0.5, _('Ok')), 10 (1.0, _('Good')) 11 ) 12 13 14 class AppReleaseUploadForm(Form): 15 download = CharField(label=_('Download link (tar.gz)'), max_length=256) 16 signature = CharField(widget=Textarea, label=_('SHA512 signature'), 17 help_text=_( 18 'Hint: can be calculated by executing the ' 19 'following command: openssl dgst -sha512 -sign ' 20 '/path/to/private-cert.key /path/to/app.tar.gz ' 21 '| openssl base64')) 22 nightly = BooleanField(label=_('Nightly')) 23 24 25 class AppRatingForm(Form): 26 def __init__(self, *args, **kwargs): 27 self._id = kwargs.pop('id', None) 28 self._user = kwargs.pop('user', None) 29 self._language_code = kwargs.pop('language_code', None) 30 super().__init__(*args, **kwargs) 31 32 rating = ChoiceField(initial=0.5, choices=RATING_CHOICES, 33 widget=RadioSelect) 34 comment = CharField(widget=Textarea, required=False, 35 label=_('Review')) 36 37 class Meta: 38 fields = ('rating', 'comment') 39 40 def save(self): 41 app = App.objects.get(id=self._id) 42 app_rating, created = AppRating.objects.get_or_create(user=self._user, 43 app=app) 44 app_rating.rating = self.cleaned_data['rating'] 45 app_rating.set_current_language(self._language_code) 46 app_rating.comment = self.cleaned_data['comment'] 47 app_rating.save() 48 [end of nextcloudappstore/core/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nextcloudappstore/core/forms.py b/nextcloudappstore/core/forms.py --- a/nextcloudappstore/core/forms.py +++ b/nextcloudappstore/core/forms.py @@ -13,15 +13,32 @@ class AppReleaseUploadForm(Form): download = CharField(label=_('Download link (tar.gz)'), max_length=256) - signature = CharField(widget=Textarea, label=_('SHA512 signature'), - help_text=_( - 'Hint: can be calculated by executing the ' - 'following command: openssl dgst -sha512 -sign ' - '/path/to/private-cert.key /path/to/app.tar.gz ' - '| openssl base64')) + signature = CharField( + widget=Textarea, + label=_('SHA512 signature'), + help_text=_( + 'Hint: can be calculated by executing the ' + 'following command: openssl dgst -sha512 -sign ' + '~/.nextcloud/certificates/APP_ID.key ' + '/path/to/app.tar.gz | openssl base64')) nightly = BooleanField(label=_('Nightly')) +class AppRegisterForm(Form): + certificate = CharField( + widget=Textarea(attrs={'pattern': '-----BEGIN CERTIFICATE-----.*'}), + label=_('Public certificate'), + help_text=_( + 'Usually stored in ~/.nextcloud/certificates/APP_ID.crt')) + signature = CharField( + widget=Textarea, + label=_('SHA512 signature'), + help_text=_( + 'Hint: can be calculated by executing the ' + 'following command: echo -n "APP_ID" | openssl dgst -sha512 -sign ' + '~/.nextcloud/certificates/APP_ID.key | openssl base64')) + + class AppRatingForm(Form): def __init__(self, *args, **kwargs): self._id = kwargs.pop('id', None)
{"golden_diff": "diff --git a/nextcloudappstore/core/forms.py b/nextcloudappstore/core/forms.py\n--- a/nextcloudappstore/core/forms.py\n+++ b/nextcloudappstore/core/forms.py\n@@ -13,15 +13,32 @@\n \n class AppReleaseUploadForm(Form):\n download = CharField(label=_('Download link (tar.gz)'), max_length=256)\n- signature = CharField(widget=Textarea, label=_('SHA512 signature'),\n- help_text=_(\n- 'Hint: can be calculated by executing the '\n- 'following command: openssl dgst -sha512 -sign '\n- '/path/to/private-cert.key /path/to/app.tar.gz '\n- '| openssl base64'))\n+ signature = CharField(\n+ widget=Textarea,\n+ label=_('SHA512 signature'),\n+ help_text=_(\n+ 'Hint: can be calculated by executing the '\n+ 'following command: openssl dgst -sha512 -sign '\n+ '~/.nextcloud/certificates/APP_ID.key '\n+ '/path/to/app.tar.gz | openssl base64'))\n nightly = BooleanField(label=_('Nightly'))\n \n \n+class AppRegisterForm(Form):\n+ certificate = CharField(\n+ widget=Textarea(attrs={'pattern': '-----BEGIN CERTIFICATE-----.*'}),\n+ label=_('Public certificate'),\n+ help_text=_(\n+ 'Usually stored in ~/.nextcloud/certificates/APP_ID.crt'))\n+ signature = CharField(\n+ widget=Textarea,\n+ label=_('SHA512 signature'),\n+ help_text=_(\n+ 'Hint: can be calculated by executing the '\n+ 'following command: echo -n \"APP_ID\" | openssl dgst -sha512 -sign '\n+ '~/.nextcloud/certificates/APP_ID.key | openssl base64'))\n+\n+\n class AppRatingForm(Form):\n def __init__(self, *args, **kwargs):\n self._id = kwargs.pop('id', None)\n", "issue": "Create documentation section for explaining certificates, signing and how it all works together\nApp devs need a very quick tutorial/walkthrough in the docs on how to generate a new certificate pair, request the public cert to be signed, registering an app id, revoking certs (like registering certs: post it on our issue tracker) and signing apps.\n\nAlso some background needs to be provided on how the whole certificate mechanism works.\n\n", "before_files": [{"content": "from django.forms import Form, CharField, Textarea, ChoiceField, RadioSelect, \\\n BooleanField\nfrom django.utils.translation import ugettext_lazy as _ # type: ignore\n\nfrom nextcloudappstore.core.models import App, AppRating\n\nRATING_CHOICES = (\n (0.0, _('Bad')),\n (0.5, _('Ok')),\n (1.0, _('Good'))\n)\n\n\nclass AppReleaseUploadForm(Form):\n download = CharField(label=_('Download link (tar.gz)'), max_length=256)\n signature = CharField(widget=Textarea, label=_('SHA512 signature'),\n help_text=_(\n 'Hint: can be calculated by executing the '\n 'following command: openssl dgst -sha512 -sign '\n '/path/to/private-cert.key /path/to/app.tar.gz '\n '| openssl base64'))\n nightly = BooleanField(label=_('Nightly'))\n\n\nclass AppRatingForm(Form):\n def __init__(self, *args, **kwargs):\n self._id = kwargs.pop('id', None)\n self._user = kwargs.pop('user', None)\n self._language_code = kwargs.pop('language_code', None)\n super().__init__(*args, **kwargs)\n\n rating = ChoiceField(initial=0.5, choices=RATING_CHOICES,\n widget=RadioSelect)\n comment = CharField(widget=Textarea, required=False,\n label=_('Review'))\n\n class Meta:\n fields = ('rating', 'comment')\n\n def save(self):\n app = App.objects.get(id=self._id)\n app_rating, created = AppRating.objects.get_or_create(user=self._user,\n app=app)\n app_rating.rating = self.cleaned_data['rating']\n app_rating.set_current_language(self._language_code)\n app_rating.comment = self.cleaned_data['comment']\n app_rating.save()\n", "path": "nextcloudappstore/core/forms.py"}]}
1,107
439
gh_patches_debug_26500
rasdani/github-patches
git_diff
pypa__setuptools-555
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> upload command doesn't prompt for password; raises TypeError # Problem statement If the `~/.pypirc` file does not contain a password like so: ``` ini [distutils] index-servers = pypitest [pypitest] repository = https://testpypi.python.org/pypi username = my_username ; Note the lack of a password ``` Then uploading the package ``` python setup.py sdist upload -r pypitest ``` Fails to prompt the user for his password and instead raises a TypeError (output truncated) ``` running upload Traceback (most recent call last): File "setup.py", line 16, in <module> keywords=["test", "hello"] File "/usr/lib/python2.7/distutils/core.py", line 151, in setup dist.run_commands() File "/usr/lib/python2.7/distutils/dist.py", line 953, in run_commands self.run_command(cmd) File "/usr/lib/python2.7/distutils/dist.py", line 972, in run_command cmd_obj.run() File "/usr/lib/python2.7/distutils/command/upload.py", line 60, in run self.upload_file(command, pyversion, filename) File "/usr/lib/python2.7/distutils/command/upload.py", line 135, in upload_file self.password) TypeError: cannot concatenate 'str' and 'NoneType' objects ``` **This is different** than the behavior of the `register` command, which prompts the user for a password before continuing. ``` python setup.py sdist register -r pypitest ``` (output truncated) ``` Creating tar archive removing 'HelloPyPi-0.0.1.dev0' (and everything under it) running register Password: ``` > Note that the `register` and the `upload` command exhibit the proper behavior **if you store your password in `~/.pypirc`**, but not if the password is omitted. # Okay, so...? I am aware that you can run ``` python setup.py sdist register -r pypitest upload -r pypitest ``` As a workaround, but it stands to reason that **if you can register a package without uploading it, then you should also be able to upload a package without registering it**, regardless of if a password has been specified in your `~/.pypirc` file. # Steps to reproduce 1. Remove your pypi password from `~/.pypirc` 2. Find a project that you wish to upload to a pypi server (I used [my example repository](https://github.com/brookskindle/hellopypi) for this) 3. Run `python setup.py sdist upload -r target_pypi_server` # Setuptools version setuptools (20.9.0) -- from `pip list` in my virtualenv </issue> <code> [start of setuptools/command/upload.py] 1 from distutils.command import upload as orig 2 3 4 class upload(orig.upload): 5 """ 6 Override default upload behavior to look up password 7 in the keyring if available. 8 """ 9 10 def finalize_options(self): 11 orig.upload.finalize_options(self) 12 self.password or self._load_password_from_keyring() 13 14 def _load_password_from_keyring(self): 15 """ 16 Attempt to load password from keyring. Suppress Exceptions. 17 """ 18 try: 19 keyring = __import__('keyring') 20 self.password = keyring.get_password(self.repository, 21 self.username) 22 except Exception: 23 pass 24 [end of setuptools/command/upload.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setuptools/command/upload.py b/setuptools/command/upload.py --- a/setuptools/command/upload.py +++ b/setuptools/command/upload.py @@ -3,13 +3,18 @@ class upload(orig.upload): """ - Override default upload behavior to look up password - in the keyring if available. + Override default upload behavior to obtain password + in a variety of different ways. """ def finalize_options(self): orig.upload.finalize_options(self) - self.password or self._load_password_from_keyring() + # Attempt to obtain password. Short circuit evaluation at the first + # sign of success. + self.password = ( + self.password or self._load_password_from_keyring() or + self._prompt_for_password() + ) def _load_password_from_keyring(self): """ @@ -17,7 +22,22 @@ """ try: keyring = __import__('keyring') - self.password = keyring.get_password(self.repository, - self.username) + password = keyring.get_password(self.repository, self.username) except Exception: - pass + password = None + finally: + return password + + def _prompt_for_password(self): + """ + Prompt for a password on the tty. Suppress Exceptions. + """ + password = None + try: + import getpass + while not password: + password = getpass.getpass() + except (Exception, KeyboardInterrupt): + password = None + finally: + return password
{"golden_diff": "diff --git a/setuptools/command/upload.py b/setuptools/command/upload.py\n--- a/setuptools/command/upload.py\n+++ b/setuptools/command/upload.py\n@@ -3,13 +3,18 @@\n \n class upload(orig.upload):\n \"\"\"\n- Override default upload behavior to look up password\n- in the keyring if available.\n+ Override default upload behavior to obtain password\n+ in a variety of different ways.\n \"\"\"\n \n def finalize_options(self):\n orig.upload.finalize_options(self)\n- self.password or self._load_password_from_keyring()\n+ # Attempt to obtain password. Short circuit evaluation at the first\n+ # sign of success.\n+ self.password = (\n+ self.password or self._load_password_from_keyring() or\n+ self._prompt_for_password()\n+ )\n \n def _load_password_from_keyring(self):\n \"\"\"\n@@ -17,7 +22,22 @@\n \"\"\"\n try:\n keyring = __import__('keyring')\n- self.password = keyring.get_password(self.repository,\n- self.username)\n+ password = keyring.get_password(self.repository, self.username)\n except Exception:\n- pass\n+ password = None\n+ finally:\n+ return password\n+\n+ def _prompt_for_password(self):\n+ \"\"\"\n+ Prompt for a password on the tty. Suppress Exceptions.\n+ \"\"\"\n+ password = None\n+ try:\n+ import getpass\n+ while not password:\n+ password = getpass.getpass()\n+ except (Exception, KeyboardInterrupt):\n+ password = None\n+ finally:\n+ return password\n", "issue": "upload command doesn't prompt for password; raises TypeError\n# Problem statement\n\nIf the `~/.pypirc` file does not contain a password like so:\n\n``` ini\n[distutils]\nindex-servers = \n pypitest\n\n[pypitest]\nrepository = https://testpypi.python.org/pypi\nusername = my_username\n; Note the lack of a password\n```\n\nThen uploading the package\n\n```\npython setup.py sdist upload -r pypitest\n```\n\nFails to prompt the user for his password and instead raises a TypeError (output truncated)\n\n```\nrunning upload\nTraceback (most recent call last):\n File \"setup.py\", line 16, in <module>\n keywords=[\"test\", \"hello\"]\n File \"/usr/lib/python2.7/distutils/core.py\", line 151, in setup\n dist.run_commands()\n File \"/usr/lib/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/lib/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"/usr/lib/python2.7/distutils/command/upload.py\", line 60, in run\n self.upload_file(command, pyversion, filename)\n File \"/usr/lib/python2.7/distutils/command/upload.py\", line 135, in upload_file\n self.password)\nTypeError: cannot concatenate 'str' and 'NoneType' objects\n```\n\n**This is different** than the behavior of the `register` command, which prompts the user for a password before continuing.\n\n```\npython setup.py sdist register -r pypitest\n```\n\n(output truncated)\n\n```\nCreating tar archive\nremoving 'HelloPyPi-0.0.1.dev0' (and everything under it)\nrunning register\nPassword: \n```\n\n> Note that the `register` and the `upload` command exhibit the proper behavior **if you store your password in `~/.pypirc`**, but not if the password is omitted.\n# Okay, so...?\n\nI am aware that you can run\n\n```\npython setup.py sdist register -r pypitest upload -r pypitest\n```\n\nAs a workaround, but it stands to reason that **if you can register a package without uploading it, then you should also be able to upload a package without registering it**, regardless of if a password has been specified in your `~/.pypirc` file.\n# Steps to reproduce\n1. Remove your pypi password from `~/.pypirc`\n2. Find a project that you wish to upload to a pypi server (I used [my example repository](https://github.com/brookskindle/hellopypi) for this)\n3. Run `python setup.py sdist upload -r target_pypi_server`\n# Setuptools version\n\nsetuptools (20.9.0) -- from `pip list` in my virtualenv\n\n", "before_files": [{"content": "from distutils.command import upload as orig\n\n\nclass upload(orig.upload):\n \"\"\"\n Override default upload behavior to look up password\n in the keyring if available.\n \"\"\"\n\n def finalize_options(self):\n orig.upload.finalize_options(self)\n self.password or self._load_password_from_keyring()\n\n def _load_password_from_keyring(self):\n \"\"\"\n Attempt to load password from keyring. Suppress Exceptions.\n \"\"\"\n try:\n keyring = __import__('keyring')\n self.password = keyring.get_password(self.repository,\n self.username)\n except Exception:\n pass\n", "path": "setuptools/command/upload.py"}]}
1,334
354
gh_patches_debug_15619
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-7002
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better pattern to use PYTEST_OPTIONS In #4095 we incorporate the usage of `PYTEST_OPTIONS` to define a set of options to be environment-dependent. This way, we can extend/override these options used only in tests from outside (for example, corporate repo). Although I like it, I had to write a hack to know if we are running in `readthedocs` or `readthedocsinc` to know which of these options has to be respected by `pytest`. The ugly code is at https://github.com/rtfd/readthedocs.org/pull/4095#discussion_r198927773 and we need to find a better pattern for this. </issue> <code> [start of readthedocs/conftest.py] 1 import pytest 2 from rest_framework.test import APIClient 3 4 5 try: 6 # TODO: this file is read/executed even when called from ``readthedocsinc``, 7 # so it's overriding the options that we are defining in the ``conftest.py`` 8 # from the corporate site. We need to find a better way to avoid this. 9 import readthedocsinc 10 PYTEST_OPTIONS = () 11 except ImportError: 12 PYTEST_OPTIONS = ( 13 # Options to set test environment 14 ('community', True), 15 ('corporate', False), 16 ('environment', 'readthedocs'), 17 ) 18 19 20 def pytest_configure(config): 21 for option, value in PYTEST_OPTIONS: 22 setattr(config.option, option, value) 23 24 25 @pytest.fixture(autouse=True) 26 def settings_modification(settings): 27 settings.CELERY_ALWAYS_EAGER = True 28 29 30 @pytest.fixture 31 def api_client(): 32 return APIClient() 33 [end of readthedocs/conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/conftest.py b/readthedocs/conftest.py --- a/readthedocs/conftest.py +++ b/readthedocs/conftest.py @@ -1,32 +1,6 @@ import pytest from rest_framework.test import APIClient - -try: - # TODO: this file is read/executed even when called from ``readthedocsinc``, - # so it's overriding the options that we are defining in the ``conftest.py`` - # from the corporate site. We need to find a better way to avoid this. - import readthedocsinc - PYTEST_OPTIONS = () -except ImportError: - PYTEST_OPTIONS = ( - # Options to set test environment - ('community', True), - ('corporate', False), - ('environment', 'readthedocs'), - ) - - -def pytest_configure(config): - for option, value in PYTEST_OPTIONS: - setattr(config.option, option, value) - - [email protected](autouse=True) -def settings_modification(settings): - settings.CELERY_ALWAYS_EAGER = True - - @pytest.fixture def api_client(): return APIClient()
{"golden_diff": "diff --git a/readthedocs/conftest.py b/readthedocs/conftest.py\n--- a/readthedocs/conftest.py\n+++ b/readthedocs/conftest.py\n@@ -1,32 +1,6 @@\n import pytest\n from rest_framework.test import APIClient\n \n-\n-try:\n- # TODO: this file is read/executed even when called from ``readthedocsinc``,\n- # so it's overriding the options that we are defining in the ``conftest.py``\n- # from the corporate site. We need to find a better way to avoid this.\n- import readthedocsinc\n- PYTEST_OPTIONS = ()\n-except ImportError:\n- PYTEST_OPTIONS = (\n- # Options to set test environment\n- ('community', True),\n- ('corporate', False),\n- ('environment', 'readthedocs'),\n- )\n-\n-\n-def pytest_configure(config):\n- for option, value in PYTEST_OPTIONS:\n- setattr(config.option, option, value)\n-\n-\[email protected](autouse=True)\n-def settings_modification(settings):\n- settings.CELERY_ALWAYS_EAGER = True\n-\n-\n @pytest.fixture\n def api_client():\n return APIClient()\n", "issue": "Better pattern to use PYTEST_OPTIONS\nIn #4095 we incorporate the usage of `PYTEST_OPTIONS` to define a set of options to be environment-dependent. This way, we can extend/override these options used only in tests from outside (for example, corporate repo).\r\n\r\nAlthough I like it, I had to write a hack to know if we are running in `readthedocs` or `readthedocsinc` to know which of these options has to be respected by `pytest`.\r\n\r\nThe ugly code is at https://github.com/rtfd/readthedocs.org/pull/4095#discussion_r198927773 and we need to find a better pattern for this.\n", "before_files": [{"content": "import pytest\nfrom rest_framework.test import APIClient\n\n\ntry:\n # TODO: this file is read/executed even when called from ``readthedocsinc``,\n # so it's overriding the options that we are defining in the ``conftest.py``\n # from the corporate site. We need to find a better way to avoid this.\n import readthedocsinc\n PYTEST_OPTIONS = ()\nexcept ImportError:\n PYTEST_OPTIONS = (\n # Options to set test environment\n ('community', True),\n ('corporate', False),\n ('environment', 'readthedocs'),\n )\n\n\ndef pytest_configure(config):\n for option, value in PYTEST_OPTIONS:\n setattr(config.option, option, value)\n\n\[email protected](autouse=True)\ndef settings_modification(settings):\n settings.CELERY_ALWAYS_EAGER = True\n\n\[email protected]\ndef api_client():\n return APIClient()\n", "path": "readthedocs/conftest.py"}]}
943
264
gh_patches_debug_5965
rasdani/github-patches
git_diff
wagtail__wagtail-940
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dropping Python 3.2 support Python 3.2 is quite old and many projects are dropping support for it (`libsass` and `treebeard` both have already). Should we consider dropping support as well? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 import sys, os 4 5 from wagtail.wagtailcore import __version__ 6 7 8 try: 9 from setuptools import setup, find_packages 10 except ImportError: 11 from distutils.core import setup 12 13 14 # Hack to prevent "TypeError: 'NoneType' object is not callable" error 15 # in multiprocessing/util.py _exit_function when setup.py exits 16 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) 17 try: 18 import multiprocessing 19 except ImportError: 20 pass 21 22 23 # Disable parallel builds, because Pillow 2.5.3 does some crazy monkeypatching of 24 # the build process on multicore systems, which breaks installation of libsass 25 os.environ['MAX_CONCURRENCY'] = '1' 26 27 PY3 = sys.version_info[0] == 3 28 29 30 install_requires = [ 31 "Django>=1.7.0,<1.8", 32 "django-compressor>=1.4", 33 "django-libsass>=0.2", 34 "django-modelcluster>=0.4", 35 "django-taggit==0.12.2", 36 "django-treebeard==2.0", 37 "Pillow>=2.6.1", 38 "beautifulsoup4>=4.3.2", 39 "html5lib==0.999", 40 "Unidecode>=0.04.14", 41 "six>=1.7.0", 42 'requests>=2.0.0', 43 "Willow==0.1", 44 ] 45 46 47 if not PY3: 48 install_requires += [ 49 "unicodecsv>=0.9.4" 50 ] 51 52 53 setup( 54 name='wagtail', 55 version=__version__, 56 description='A Django content management system focused on flexibility and user experience', 57 author='Matthew Westcott', 58 author_email='[email protected]', 59 url='http://wagtail.io/', 60 packages=find_packages(), 61 include_package_data=True, 62 license='BSD', 63 long_description=open('README.rst').read(), 64 classifiers=[ 65 'Development Status :: 5 - Production/Stable', 66 'Environment :: Web Environment', 67 'Intended Audience :: Developers', 68 'License :: OSI Approved :: BSD License', 69 'Operating System :: OS Independent', 70 'Programming Language :: Python', 71 'Programming Language :: Python :: 2', 72 'Programming Language :: Python :: 2.7', 73 'Programming Language :: Python :: 3', 74 'Programming Language :: Python :: 3.2', 75 'Programming Language :: Python :: 3.3', 76 'Programming Language :: Python :: 3.4', 77 'Framework :: Django', 78 'Topic :: Internet :: WWW/HTTP :: Site Management', 79 ], 80 install_requires=install_requires, 81 entry_points=""" 82 [console_scripts] 83 wagtail=wagtail.bin.wagtail:main 84 """, 85 zip_safe=False, 86 ) 87 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -71,7 +71,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Framework :: Django',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,7 +71,6 @@\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Framework :: Django',\n", "issue": "Dropping Python 3.2 support\nPython 3.2 is quite old and many projects are dropping support for it (`libsass` and `treebeard` both have already). Should we consider dropping support as well?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport sys, os\n\nfrom wagtail.wagtailcore import __version__\n\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\n\n# Disable parallel builds, because Pillow 2.5.3 does some crazy monkeypatching of\n# the build process on multicore systems, which breaks installation of libsass\nos.environ['MAX_CONCURRENCY'] = '1'\n\nPY3 = sys.version_info[0] == 3\n\n\ninstall_requires = [\n \"Django>=1.7.0,<1.8\",\n \"django-compressor>=1.4\",\n \"django-libsass>=0.2\",\n \"django-modelcluster>=0.4\",\n \"django-taggit==0.12.2\",\n \"django-treebeard==2.0\",\n \"Pillow>=2.6.1\",\n \"beautifulsoup4>=4.3.2\",\n \"html5lib==0.999\",\n \"Unidecode>=0.04.14\",\n \"six>=1.7.0\",\n 'requests>=2.0.0',\n \"Willow==0.1\",\n]\n\n\nif not PY3:\n install_requires += [\n \"unicodecsv>=0.9.4\"\n ]\n\n\nsetup(\n name='wagtail',\n version=__version__,\n description='A Django content management system focused on flexibility and user experience',\n author='Matthew Westcott',\n author_email='[email protected]',\n url='http://wagtail.io/',\n packages=find_packages(),\n include_package_data=True,\n license='BSD',\n long_description=open('README.rst').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Framework :: Django',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n install_requires=install_requires,\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n)\n", "path": "setup.py"}]}
1,384
108
gh_patches_debug_8392
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-5122
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Incense being used when false ### Expected Behavior Don't use Incense when set to false in config ### Actual Behavior Bot using incense when set to false in config ### Your FULL config.json (remove your username, password, gmapkey and any other private info) http://pastebin.com/YEHMRMiE ### Output when issue occurred [2016-09-02 15:43:55] [UseIncense] [INFO] [use_incense] Using Ordinary incense. 8 incense remaining ### Steps to Reproduce Run bot with Incense false in config ### Other Information OS: Linux Branch: Dev Git Commit: 1cc9da7a79c421f11a4b13359f6a6c1abfcd061a Python Version: 2.7.12 Any other relevant files/configs (eg: path files) config.json </issue> <code> [start of pokemongo_bot/cell_workers/use_incense.py] 1 import time 2 from pokemongo_bot.base_task import BaseTask 3 from pokemongo_bot.worker_result import WorkerResult 4 from pokemongo_bot.item_list import Item 5 from pokemongo_bot import inventory 6 7 class UseIncense(BaseTask): 8 SUPPORTED_TASK_API_VERSION = 1 9 10 def initialize(self): 11 self.start_time = 0 12 self.use_incense = self.config.get('use_incense', False) 13 self.use_order = self.config.get('use_order', {}) 14 self._update_inventory() 15 16 self.types = { 17 401: "Ordinary", 18 402: "Spicy", 19 403: "Cool", 20 404: "Floral" 21 } 22 23 def _get_type(self): 24 for order in self.use_order: 25 if order == "ordinary" and self.incense_ordinary_count > 0: 26 return Item.ITEM_INCENSE_ORDINARY.value 27 if order == "spicy" and self.incense_spicy_count > 0: 28 return Item.ITEM_INCENSE_SPICY.value 29 if order == "cool" and self.incense_cool_count > 0: 30 return Item.ITEM_INCENSE_COOL.value 31 if order == "floral" and self.incense_floral_count > 0: 32 return Item.ITEM_INCENSE_FLORAL.value 33 34 return Item.ITEM_INCENSE_ORDINARY.value 35 36 def _update_inventory(self): 37 self.incense_ordinary_count = inventory.items().get(Item.ITEM_INCENSE_ORDINARY.value).count 38 self.incense_spicy_count = inventory.items().get(Item.ITEM_INCENSE_SPICY.value).count 39 self.incense_cool_count = inventory.items().get(Item.ITEM_INCENSE_COOL.value).count 40 self.incense_floral_count = inventory.items().get(Item.ITEM_INCENSE_FLORAL.value).count 41 42 def _has_count(self): 43 return self.incense_ordinary_count > 0 or self.incense_spicy_count > 0 or self.incense_cool_count > 0 or self.incense_floral_count > 0 44 45 def _should_run(self): 46 if self._has_count() > 0 and self.start_time == 0: 47 return True 48 49 using_incense = time.time() - self.start_time < 1800 50 if not using_incense: 51 self._update_inventory() 52 if self._has_count() and self.use_incense: 53 return True 54 55 def work(self): 56 if self._should_run(): 57 self.start_time = time.time() 58 type = self._get_type() 59 response_dict = self.bot.api.use_incense(incense_type=type) 60 result = response_dict.get('responses', {}).get('USE_INCENSE', {}).get('result', 0) 61 if result is 1: 62 self.emit_event( 63 'use_incense', 64 formatted="Using {type} incense. {incense_count} incense remaining", 65 data={ 66 'type': self.types.get(type, 'Unknown'), 67 'incense_count': inventory.items().get(type).count 68 } 69 ) 70 else: 71 self.emit_event( 72 'use_incense', 73 formatted="Unable to use incense {type}. {incense_count} incense remaining", 74 data={ 75 'type': self.types.get(type, 'Unknown'), 76 'incense_count': inventory.items().get(type).count 77 } 78 ) 79 80 return WorkerResult.SUCCESS 81 [end of pokemongo_bot/cell_workers/use_incense.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pokemongo_bot/cell_workers/use_incense.py b/pokemongo_bot/cell_workers/use_incense.py --- a/pokemongo_bot/cell_workers/use_incense.py +++ b/pokemongo_bot/cell_workers/use_incense.py @@ -42,7 +42,10 @@ def _has_count(self): return self.incense_ordinary_count > 0 or self.incense_spicy_count > 0 or self.incense_cool_count > 0 or self.incense_floral_count > 0 - def _should_run(self): + def _should_run(self): + if not self.use_incense: + return False + if self._has_count() > 0 and self.start_time == 0: return True
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/use_incense.py b/pokemongo_bot/cell_workers/use_incense.py\n--- a/pokemongo_bot/cell_workers/use_incense.py\n+++ b/pokemongo_bot/cell_workers/use_incense.py\n@@ -42,7 +42,10 @@\n def _has_count(self):\n return self.incense_ordinary_count > 0 or self.incense_spicy_count > 0 or self.incense_cool_count > 0 or self.incense_floral_count > 0\n \n- def _should_run(self): \n+ def _should_run(self):\n+ if not self.use_incense:\n+ return False\n+\n if self._has_count() > 0 and self.start_time == 0:\n return True\n", "issue": "Incense being used when false\n### Expected Behavior\n\nDon't use Incense when set to false in config\n### Actual Behavior\n\nBot using incense when set to false in config\n### Your FULL config.json (remove your username, password, gmapkey and any other private info)\n\nhttp://pastebin.com/YEHMRMiE\n### Output when issue occurred\n\n[2016-09-02 15:43:55] [UseIncense] [INFO] [use_incense] Using Ordinary incense. 8 incense remaining\n### Steps to Reproduce\n\nRun bot with Incense false in config\n### Other Information\n\nOS: Linux\nBranch: Dev\nGit Commit: 1cc9da7a79c421f11a4b13359f6a6c1abfcd061a\nPython Version: 2.7.12\nAny other relevant files/configs (eg: path files) \nconfig.json\n\n", "before_files": [{"content": "import time\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.worker_result import WorkerResult\nfrom pokemongo_bot.item_list import Item\nfrom pokemongo_bot import inventory\n\nclass UseIncense(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n def initialize(self):\n self.start_time = 0\n self.use_incense = self.config.get('use_incense', False)\n self.use_order = self.config.get('use_order', {})\n self._update_inventory()\n \n self.types = {\n 401: \"Ordinary\",\n 402: \"Spicy\",\n 403: \"Cool\",\n 404: \"Floral\"\n }\n \n def _get_type(self):\n for order in self.use_order:\n if order == \"ordinary\" and self.incense_ordinary_count > 0:\n return Item.ITEM_INCENSE_ORDINARY.value\n if order == \"spicy\" and self.incense_spicy_count > 0:\n return Item.ITEM_INCENSE_SPICY.value\n if order == \"cool\" and self.incense_cool_count > 0:\n return Item.ITEM_INCENSE_COOL.value\n if order == \"floral\" and self.incense_floral_count > 0:\n return Item.ITEM_INCENSE_FLORAL.value\n \n return Item.ITEM_INCENSE_ORDINARY.value \n \n def _update_inventory(self):\n self.incense_ordinary_count = inventory.items().get(Item.ITEM_INCENSE_ORDINARY.value).count \n self.incense_spicy_count = inventory.items().get(Item.ITEM_INCENSE_SPICY.value).count\n self.incense_cool_count = inventory.items().get(Item.ITEM_INCENSE_COOL.value).count \n self.incense_floral_count = inventory.items().get(Item.ITEM_INCENSE_FLORAL.value).count \n \n def _has_count(self):\n return self.incense_ordinary_count > 0 or self.incense_spicy_count > 0 or self.incense_cool_count > 0 or self.incense_floral_count > 0\n \n def _should_run(self): \n if self._has_count() > 0 and self.start_time == 0:\n return True \n \n using_incense = time.time() - self.start_time < 1800\n if not using_incense: \n self._update_inventory()\n if self._has_count() and self.use_incense:\n return True\n\n def work(self):\n if self._should_run():\n self.start_time = time.time()\n type = self._get_type() \n response_dict = self.bot.api.use_incense(incense_type=type)\n result = response_dict.get('responses', {}).get('USE_INCENSE', {}).get('result', 0)\n if result is 1:\n self.emit_event(\n 'use_incense',\n formatted=\"Using {type} incense. {incense_count} incense remaining\",\n data={\n 'type': self.types.get(type, 'Unknown'),\n 'incense_count': inventory.items().get(type).count\n }\n )\n else:\n self.emit_event(\n 'use_incense',\n formatted=\"Unable to use incense {type}. {incense_count} incense remaining\",\n data={\n 'type': self.types.get(type, 'Unknown'),\n 'incense_count': inventory.items().get(type).count\n }\n )\n \n return WorkerResult.SUCCESS\n", "path": "pokemongo_bot/cell_workers/use_incense.py"}]}
1,660
178
gh_patches_debug_32415
rasdani/github-patches
git_diff
vllm-project__vllm-4368
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature]: Cannot use FlashAttention backend for Volta and Turing GPUs. (but FlashAttention v1.0.9 supports Turing GPU.) ### 🚀 The feature, motivation and pitch Turing GPU can use FlashAttention v1.0.9 which can reduce use of vram significantly. FlashAttention has no plan to support Turing GPU in FlashAttention v2 actually. so please support FlashAttention v1.0.9. thanks a lot! many friends having 8*2080ti need this help. ### Alternatives _No response_ ### Additional context _No response_ </issue> <code> [start of vllm/attention/selector.py] 1 import enum 2 import os 3 from functools import lru_cache 4 from typing import Type 5 6 import torch 7 8 from vllm.attention.backends.abstract import AttentionBackend 9 from vllm.logger import init_logger 10 from vllm.utils import is_cpu, is_hip 11 12 logger = init_logger(__name__) 13 14 VLLM_ATTENTION_BACKEND = "VLLM_ATTENTION_BACKEND" 15 16 17 class _Backend(enum.Enum): 18 FLASH_ATTN = enum.auto() 19 XFORMERS = enum.auto() 20 ROCM_FLASH = enum.auto() 21 TORCH_SDPA = enum.auto() 22 23 24 @lru_cache(maxsize=None) 25 def get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]: 26 backend = _which_attn_to_use(dtype) 27 if backend == _Backend.FLASH_ATTN: 28 logger.info("Using FlashAttention backend.") 29 from vllm.attention.backends.flash_attn import ( # noqa: F401 30 FlashAttentionBackend) 31 return FlashAttentionBackend 32 elif backend == _Backend.XFORMERS: 33 logger.info("Using XFormers backend.") 34 from vllm.attention.backends.xformers import ( # noqa: F401 35 XFormersBackend) 36 return XFormersBackend 37 elif backend == _Backend.ROCM_FLASH: 38 logger.info("Using ROCmFlashAttention backend.") 39 from vllm.attention.backends.rocm_flash_attn import ( # noqa: F401 40 ROCmFlashAttentionBackend) 41 return ROCmFlashAttentionBackend 42 elif backend == _Backend.TORCH_SDPA: 43 logger.info("Using Torch SDPA backend.") 44 from vllm.attention.backends.torch_sdpa import TorchSDPABackend 45 return TorchSDPABackend 46 else: 47 raise ValueError("Invalid attention backend.") 48 49 50 def _which_attn_to_use(dtype: torch.dtype) -> _Backend: 51 """Returns which flash attention backend to use.""" 52 if is_cpu(): 53 return _Backend.TORCH_SDPA 54 55 if is_hip(): 56 # AMD GPUs. 57 if torch.cuda.get_device_capability()[0] != 9: 58 # not Instinct series GPUs. 59 logger.info("flash_atten is not supported on NAVI GPUs.") 60 return _Backend.ROCM_FLASH 61 62 # NVIDIA GPUs. 63 if torch.cuda.get_device_capability()[0] < 8: 64 # Volta and Turing NVIDIA GPUs. 65 logger.info("Cannot use FlashAttention backend for Volta and Turing " 66 "GPUs.") 67 return _Backend.XFORMERS 68 69 if dtype not in (torch.float16, torch.bfloat16): 70 logger.info("Cannot use FlashAttention backend for dtype other than " 71 "torch.float16 or torch.bfloat16.") 72 return _Backend.XFORMERS 73 74 try: 75 import flash_attn # noqa: F401 76 except ImportError: 77 logger.info( 78 "Cannot use FlashAttention backend because the flash_attn package " 79 "is not found. Please install it for better performance.") 80 return _Backend.XFORMERS 81 82 backend_by_env_var = os.getenv(VLLM_ATTENTION_BACKEND) 83 if backend_by_env_var is not None: 84 return _Backend[backend_by_env_var] 85 86 # Default case. 87 return _Backend.FLASH_ATTN 88 [end of vllm/attention/selector.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -25,7 +25,7 @@ def get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]: backend = _which_attn_to_use(dtype) if backend == _Backend.FLASH_ATTN: - logger.info("Using FlashAttention backend.") + logger.info("Using FlashAttention-2 backend.") from vllm.attention.backends.flash_attn import ( # noqa: F401 FlashAttentionBackend) return FlashAttentionBackend @@ -62,12 +62,12 @@ # NVIDIA GPUs. if torch.cuda.get_device_capability()[0] < 8: # Volta and Turing NVIDIA GPUs. - logger.info("Cannot use FlashAttention backend for Volta and Turing " + logger.info("Cannot use FlashAttention-2 backend for Volta and Turing " "GPUs.") return _Backend.XFORMERS if dtype not in (torch.float16, torch.bfloat16): - logger.info("Cannot use FlashAttention backend for dtype other than " + logger.info("Cannot use FlashAttention-2 backend for dtype other than " "torch.float16 or torch.bfloat16.") return _Backend.XFORMERS @@ -75,8 +75,8 @@ import flash_attn # noqa: F401 except ImportError: logger.info( - "Cannot use FlashAttention backend because the flash_attn package " - "is not found. Please install it for better performance.") + "Cannot use FlashAttention-2 backend because the flash_attn " + "package is not found. Please install it for better performance.") return _Backend.XFORMERS backend_by_env_var = os.getenv(VLLM_ATTENTION_BACKEND)
{"golden_diff": "diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py\n--- a/vllm/attention/selector.py\n+++ b/vllm/attention/selector.py\n@@ -25,7 +25,7 @@\n def get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]:\n backend = _which_attn_to_use(dtype)\n if backend == _Backend.FLASH_ATTN:\n- logger.info(\"Using FlashAttention backend.\")\n+ logger.info(\"Using FlashAttention-2 backend.\")\n from vllm.attention.backends.flash_attn import ( # noqa: F401\n FlashAttentionBackend)\n return FlashAttentionBackend\n@@ -62,12 +62,12 @@\n # NVIDIA GPUs.\n if torch.cuda.get_device_capability()[0] < 8:\n # Volta and Turing NVIDIA GPUs.\n- logger.info(\"Cannot use FlashAttention backend for Volta and Turing \"\n+ logger.info(\"Cannot use FlashAttention-2 backend for Volta and Turing \"\n \"GPUs.\")\n return _Backend.XFORMERS\n \n if dtype not in (torch.float16, torch.bfloat16):\n- logger.info(\"Cannot use FlashAttention backend for dtype other than \"\n+ logger.info(\"Cannot use FlashAttention-2 backend for dtype other than \"\n \"torch.float16 or torch.bfloat16.\")\n return _Backend.XFORMERS\n \n@@ -75,8 +75,8 @@\n import flash_attn # noqa: F401\n except ImportError:\n logger.info(\n- \"Cannot use FlashAttention backend because the flash_attn package \"\n- \"is not found. Please install it for better performance.\")\n+ \"Cannot use FlashAttention-2 backend because the flash_attn \"\n+ \"package is not found. Please install it for better performance.\")\n return _Backend.XFORMERS\n \n backend_by_env_var = os.getenv(VLLM_ATTENTION_BACKEND)\n", "issue": "[Feature]: Cannot use FlashAttention backend for Volta and Turing GPUs. (but FlashAttention v1.0.9 supports Turing GPU.)\n### \ud83d\ude80 The feature, motivation and pitch\r\n\r\nTuring GPU can use FlashAttention v1.0.9 which can reduce use of vram significantly.\r\n\r\nFlashAttention has no plan to support Turing GPU in FlashAttention v2 actually.\r\nso please support FlashAttention v1.0.9. thanks a lot!\r\n\r\nmany friends having 8*2080ti need this help.\r\n\r\n### Alternatives\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import enum\nimport os\nfrom functools import lru_cache\nfrom typing import Type\n\nimport torch\n\nfrom vllm.attention.backends.abstract import AttentionBackend\nfrom vllm.logger import init_logger\nfrom vllm.utils import is_cpu, is_hip\n\nlogger = init_logger(__name__)\n\nVLLM_ATTENTION_BACKEND = \"VLLM_ATTENTION_BACKEND\"\n\n\nclass _Backend(enum.Enum):\n FLASH_ATTN = enum.auto()\n XFORMERS = enum.auto()\n ROCM_FLASH = enum.auto()\n TORCH_SDPA = enum.auto()\n\n\n@lru_cache(maxsize=None)\ndef get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]:\n backend = _which_attn_to_use(dtype)\n if backend == _Backend.FLASH_ATTN:\n logger.info(\"Using FlashAttention backend.\")\n from vllm.attention.backends.flash_attn import ( # noqa: F401\n FlashAttentionBackend)\n return FlashAttentionBackend\n elif backend == _Backend.XFORMERS:\n logger.info(\"Using XFormers backend.\")\n from vllm.attention.backends.xformers import ( # noqa: F401\n XFormersBackend)\n return XFormersBackend\n elif backend == _Backend.ROCM_FLASH:\n logger.info(\"Using ROCmFlashAttention backend.\")\n from vllm.attention.backends.rocm_flash_attn import ( # noqa: F401\n ROCmFlashAttentionBackend)\n return ROCmFlashAttentionBackend\n elif backend == _Backend.TORCH_SDPA:\n logger.info(\"Using Torch SDPA backend.\")\n from vllm.attention.backends.torch_sdpa import TorchSDPABackend\n return TorchSDPABackend\n else:\n raise ValueError(\"Invalid attention backend.\")\n\n\ndef _which_attn_to_use(dtype: torch.dtype) -> _Backend:\n \"\"\"Returns which flash attention backend to use.\"\"\"\n if is_cpu():\n return _Backend.TORCH_SDPA\n\n if is_hip():\n # AMD GPUs.\n if torch.cuda.get_device_capability()[0] != 9:\n # not Instinct series GPUs.\n logger.info(\"flash_atten is not supported on NAVI GPUs.\")\n return _Backend.ROCM_FLASH\n\n # NVIDIA GPUs.\n if torch.cuda.get_device_capability()[0] < 8:\n # Volta and Turing NVIDIA GPUs.\n logger.info(\"Cannot use FlashAttention backend for Volta and Turing \"\n \"GPUs.\")\n return _Backend.XFORMERS\n\n if dtype not in (torch.float16, torch.bfloat16):\n logger.info(\"Cannot use FlashAttention backend for dtype other than \"\n \"torch.float16 or torch.bfloat16.\")\n return _Backend.XFORMERS\n\n try:\n import flash_attn # noqa: F401\n except ImportError:\n logger.info(\n \"Cannot use FlashAttention backend because the flash_attn package \"\n \"is not found. Please install it for better performance.\")\n return _Backend.XFORMERS\n\n backend_by_env_var = os.getenv(VLLM_ATTENTION_BACKEND)\n if backend_by_env_var is not None:\n return _Backend[backend_by_env_var]\n\n # Default case.\n return _Backend.FLASH_ATTN\n", "path": "vllm/attention/selector.py"}]}
1,548
427
gh_patches_debug_22037
rasdani/github-patches
git_diff
netbox-community__netbox-9826
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Contacts field to Virtual Machines table view ### NetBox version v3.2.7 ### Feature type Change to existing functionality ### Proposed functionality I would suggest to add contacts field to Virtual Machines table view/export, similarly to what we have in Devices. Currently in Devices in the "Configure Table" it's possible to select "Contacts" as a column, but it's not available in Virtual Machines. ### Use case When browsing through or exporting Virtual Machines it would be nice to be able to see who the owner/contact is. ### Database changes _No response_ ### External dependencies _No response_ </issue> <code> [start of netbox/virtualization/tables/virtualmachines.py] 1 import django_tables2 as tables 2 3 from dcim.tables.devices import BaseInterfaceTable 4 from netbox.tables import NetBoxTable, columns 5 from tenancy.tables import TenancyColumnsMixin 6 from virtualization.models import VirtualMachine, VMInterface 7 8 __all__ = ( 9 'VirtualMachineTable', 10 'VirtualMachineVMInterfaceTable', 11 'VMInterfaceTable', 12 ) 13 14 VMINTERFACE_BUTTONS = """ 15 {% if perms.ipam.add_ipaddress %} 16 <a href="{% url 'ipam:ipaddress_add' %}?vminterface={{ record.pk }}&return_url={% url 'virtualization:virtualmachine_interfaces' pk=object.pk %}" class="btn btn-sm btn-success" title="Add IP Address"> 17 <i class="mdi mdi-plus-thick" aria-hidden="true"></i> 18 </a> 19 {% endif %} 20 """ 21 22 23 # 24 # Virtual machines 25 # 26 27 class VirtualMachineTable(TenancyColumnsMixin, NetBoxTable): 28 name = tables.Column( 29 order_by=('_name',), 30 linkify=True 31 ) 32 status = columns.ChoiceFieldColumn() 33 cluster = tables.Column( 34 linkify=True 35 ) 36 role = columns.ColoredLabelColumn() 37 comments = columns.MarkdownColumn() 38 primary_ip4 = tables.Column( 39 linkify=True, 40 verbose_name='IPv4 Address' 41 ) 42 primary_ip6 = tables.Column( 43 linkify=True, 44 verbose_name='IPv6 Address' 45 ) 46 primary_ip = tables.Column( 47 linkify=True, 48 order_by=('primary_ip4', 'primary_ip6'), 49 verbose_name='IP Address' 50 ) 51 tags = columns.TagColumn( 52 url_name='virtualization:virtualmachine_list' 53 ) 54 55 class Meta(NetBoxTable.Meta): 56 model = VirtualMachine 57 fields = ( 58 'pk', 'id', 'name', 'status', 'cluster', 'role', 'tenant', 'tenant_group', 'platform', 'vcpus', 'memory', 'disk', 59 'primary_ip4', 'primary_ip6', 'primary_ip', 'comments', 'tags', 'created', 'last_updated', 60 ) 61 default_columns = ( 62 'pk', 'name', 'status', 'cluster', 'role', 'tenant', 'vcpus', 'memory', 'disk', 'primary_ip', 63 ) 64 65 66 # 67 # VM components 68 # 69 70 class VMInterfaceTable(BaseInterfaceTable): 71 virtual_machine = tables.Column( 72 linkify=True 73 ) 74 name = tables.Column( 75 linkify=True 76 ) 77 vrf = tables.Column( 78 linkify=True 79 ) 80 contacts = columns.ManyToManyColumn( 81 linkify_item=True 82 ) 83 tags = columns.TagColumn( 84 url_name='virtualization:vminterface_list' 85 ) 86 87 class Meta(NetBoxTable.Meta): 88 model = VMInterface 89 fields = ( 90 'pk', 'id', 'name', 'virtual_machine', 'enabled', 'mac_address', 'mtu', 'mode', 'description', 'tags', 91 'vrf', 'ip_addresses', 'fhrp_groups', 'untagged_vlan', 'tagged_vlans', 'contacts', 'created', 92 'last_updated', 93 ) 94 default_columns = ('pk', 'name', 'virtual_machine', 'enabled', 'description') 95 96 97 class VirtualMachineVMInterfaceTable(VMInterfaceTable): 98 parent = tables.Column( 99 linkify=True 100 ) 101 bridge = tables.Column( 102 linkify=True 103 ) 104 actions = columns.ActionsColumn( 105 actions=('edit', 'delete'), 106 extra_buttons=VMINTERFACE_BUTTONS 107 ) 108 109 class Meta(NetBoxTable.Meta): 110 model = VMInterface 111 fields = ( 112 'pk', 'id', 'name', 'enabled', 'parent', 'bridge', 'mac_address', 'mtu', 'mode', 'description', 'tags', 113 'ip_addresses', 'fhrp_groups', 'untagged_vlan', 'tagged_vlans', 'actions', 114 ) 115 default_columns = ('pk', 'name', 'enabled', 'mac_address', 'mtu', 'mode', 'description', 'ip_addresses') 116 row_attrs = { 117 'data-name': lambda record: record.name, 118 } 119 [end of netbox/virtualization/tables/virtualmachines.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/virtualization/tables/virtualmachines.py b/netbox/virtualization/tables/virtualmachines.py --- a/netbox/virtualization/tables/virtualmachines.py +++ b/netbox/virtualization/tables/virtualmachines.py @@ -48,6 +48,9 @@ order_by=('primary_ip4', 'primary_ip6'), verbose_name='IP Address' ) + contacts = columns.ManyToManyColumn( + linkify_item=True + ) tags = columns.TagColumn( url_name='virtualization:virtualmachine_list' ) @@ -56,7 +59,7 @@ model = VirtualMachine fields = ( 'pk', 'id', 'name', 'status', 'cluster', 'role', 'tenant', 'tenant_group', 'platform', 'vcpus', 'memory', 'disk', - 'primary_ip4', 'primary_ip6', 'primary_ip', 'comments', 'tags', 'created', 'last_updated', + 'primary_ip4', 'primary_ip6', 'primary_ip', 'comments', 'contacts', 'tags', 'created', 'last_updated', ) default_columns = ( 'pk', 'name', 'status', 'cluster', 'role', 'tenant', 'vcpus', 'memory', 'disk', 'primary_ip',
{"golden_diff": "diff --git a/netbox/virtualization/tables/virtualmachines.py b/netbox/virtualization/tables/virtualmachines.py\n--- a/netbox/virtualization/tables/virtualmachines.py\n+++ b/netbox/virtualization/tables/virtualmachines.py\n@@ -48,6 +48,9 @@\n order_by=('primary_ip4', 'primary_ip6'),\n verbose_name='IP Address'\n )\n+ contacts = columns.ManyToManyColumn(\n+ linkify_item=True\n+ )\n tags = columns.TagColumn(\n url_name='virtualization:virtualmachine_list'\n )\n@@ -56,7 +59,7 @@\n model = VirtualMachine\n fields = (\n 'pk', 'id', 'name', 'status', 'cluster', 'role', 'tenant', 'tenant_group', 'platform', 'vcpus', 'memory', 'disk',\n- 'primary_ip4', 'primary_ip6', 'primary_ip', 'comments', 'tags', 'created', 'last_updated',\n+ 'primary_ip4', 'primary_ip6', 'primary_ip', 'comments', 'contacts', 'tags', 'created', 'last_updated',\n )\n default_columns = (\n 'pk', 'name', 'status', 'cluster', 'role', 'tenant', 'vcpus', 'memory', 'disk', 'primary_ip',\n", "issue": "Add Contacts field to Virtual Machines table view\n### NetBox version\n\nv3.2.7\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nI would suggest to add contacts field to Virtual Machines table view/export, similarly to what we have in Devices. \r\nCurrently in Devices in the \"Configure Table\" it's possible to select \"Contacts\" as a column, but it's not available in Virtual Machines. \n\n### Use case\n\nWhen browsing through or exporting Virtual Machines it would be nice to be able to see who the owner/contact is. \n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "import django_tables2 as tables\n\nfrom dcim.tables.devices import BaseInterfaceTable\nfrom netbox.tables import NetBoxTable, columns\nfrom tenancy.tables import TenancyColumnsMixin\nfrom virtualization.models import VirtualMachine, VMInterface\n\n__all__ = (\n 'VirtualMachineTable',\n 'VirtualMachineVMInterfaceTable',\n 'VMInterfaceTable',\n)\n\nVMINTERFACE_BUTTONS = \"\"\"\n{% if perms.ipam.add_ipaddress %}\n <a href=\"{% url 'ipam:ipaddress_add' %}?vminterface={{ record.pk }}&return_url={% url 'virtualization:virtualmachine_interfaces' pk=object.pk %}\" class=\"btn btn-sm btn-success\" title=\"Add IP Address\">\n <i class=\"mdi mdi-plus-thick\" aria-hidden=\"true\"></i>\n </a>\n{% endif %}\n\"\"\"\n\n\n#\n# Virtual machines\n#\n\nclass VirtualMachineTable(TenancyColumnsMixin, NetBoxTable):\n name = tables.Column(\n order_by=('_name',),\n linkify=True\n )\n status = columns.ChoiceFieldColumn()\n cluster = tables.Column(\n linkify=True\n )\n role = columns.ColoredLabelColumn()\n comments = columns.MarkdownColumn()\n primary_ip4 = tables.Column(\n linkify=True,\n verbose_name='IPv4 Address'\n )\n primary_ip6 = tables.Column(\n linkify=True,\n verbose_name='IPv6 Address'\n )\n primary_ip = tables.Column(\n linkify=True,\n order_by=('primary_ip4', 'primary_ip6'),\n verbose_name='IP Address'\n )\n tags = columns.TagColumn(\n url_name='virtualization:virtualmachine_list'\n )\n\n class Meta(NetBoxTable.Meta):\n model = VirtualMachine\n fields = (\n 'pk', 'id', 'name', 'status', 'cluster', 'role', 'tenant', 'tenant_group', 'platform', 'vcpus', 'memory', 'disk',\n 'primary_ip4', 'primary_ip6', 'primary_ip', 'comments', 'tags', 'created', 'last_updated',\n )\n default_columns = (\n 'pk', 'name', 'status', 'cluster', 'role', 'tenant', 'vcpus', 'memory', 'disk', 'primary_ip',\n )\n\n\n#\n# VM components\n#\n\nclass VMInterfaceTable(BaseInterfaceTable):\n virtual_machine = tables.Column(\n linkify=True\n )\n name = tables.Column(\n linkify=True\n )\n vrf = tables.Column(\n linkify=True\n )\n contacts = columns.ManyToManyColumn(\n linkify_item=True\n )\n tags = columns.TagColumn(\n url_name='virtualization:vminterface_list'\n )\n\n class Meta(NetBoxTable.Meta):\n model = VMInterface\n fields = (\n 'pk', 'id', 'name', 'virtual_machine', 'enabled', 'mac_address', 'mtu', 'mode', 'description', 'tags',\n 'vrf', 'ip_addresses', 'fhrp_groups', 'untagged_vlan', 'tagged_vlans', 'contacts', 'created',\n 'last_updated',\n )\n default_columns = ('pk', 'name', 'virtual_machine', 'enabled', 'description')\n\n\nclass VirtualMachineVMInterfaceTable(VMInterfaceTable):\n parent = tables.Column(\n linkify=True\n )\n bridge = tables.Column(\n linkify=True\n )\n actions = columns.ActionsColumn(\n actions=('edit', 'delete'),\n extra_buttons=VMINTERFACE_BUTTONS\n )\n\n class Meta(NetBoxTable.Meta):\n model = VMInterface\n fields = (\n 'pk', 'id', 'name', 'enabled', 'parent', 'bridge', 'mac_address', 'mtu', 'mode', 'description', 'tags',\n 'ip_addresses', 'fhrp_groups', 'untagged_vlan', 'tagged_vlans', 'actions',\n )\n default_columns = ('pk', 'name', 'enabled', 'mac_address', 'mtu', 'mode', 'description', 'ip_addresses')\n row_attrs = {\n 'data-name': lambda record: record.name,\n }\n", "path": "netbox/virtualization/tables/virtualmachines.py"}]}
1,823
297
gh_patches_debug_26762
rasdani/github-patches
git_diff
pytorch__ignite-1312
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve Canberra metric ## 🚀 Feature Actual implementation of Canberra metric does not use absolute value on terms in denominator. Moreover, `sklearn` can be used in test. See https://arxiv.org/pdf/1411.7474.pdf See https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html </issue> <code> [start of ignite/contrib/metrics/regression/canberra_metric.py] 1 import torch 2 3 from ignite.contrib.metrics.regression._base import _BaseRegression 4 5 6 class CanberraMetric(_BaseRegression): 7 r""" 8 Calculates the Canberra Metric. 9 10 :math:`\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{A_j + P_j}` 11 12 where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value. 13 14 More details can be found in `Botchkarev 2018`__. 15 16 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. 17 - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`. 18 19 __ https://arxiv.org/abs/1809.03006 20 """ 21 22 def reset(self): 23 self._sum_of_errors = 0.0 24 25 def _update(self, output): 26 y_pred, y = output 27 errors = torch.abs(y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred)) 28 self._sum_of_errors += torch.sum(errors).item() 29 30 def compute(self): 31 return self._sum_of_errors 32 [end of ignite/contrib/metrics/regression/canberra_metric.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py --- a/ignite/contrib/metrics/regression/canberra_metric.py +++ b/ignite/contrib/metrics/regression/canberra_metric.py @@ -7,16 +7,19 @@ r""" Calculates the Canberra Metric. - :math:`\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{A_j + P_j}` + :math:`\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}` where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value. - More details can be found in `Botchkarev 2018`__. + More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`. - __ https://arxiv.org/abs/1809.03006 + .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006 + .. _scikit-learn distance metrics: + https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html + """ def reset(self): @@ -24,7 +27,7 @@ def _update(self, output): y_pred, y = output - errors = torch.abs(y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred)) + errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred))) self._sum_of_errors += torch.sum(errors).item() def compute(self):
{"golden_diff": "diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py\n--- a/ignite/contrib/metrics/regression/canberra_metric.py\n+++ b/ignite/contrib/metrics/regression/canberra_metric.py\n@@ -7,16 +7,19 @@\n r\"\"\"\n Calculates the Canberra Metric.\n \n- :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{A_j + P_j}`\n+ :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{|A_j| + |P_j|}`\n \n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n \n- More details can be found in `Botchkarev 2018`__.\n+ More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_\n \n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n \n- __ https://arxiv.org/abs/1809.03006\n+ .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n+ .. _scikit-learn distance metrics:\n+ https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n+\n \"\"\"\n \n def reset(self):\n@@ -24,7 +27,7 @@\n \n def _update(self, output):\n y_pred, y = output\n- errors = torch.abs(y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred))\n+ errors = torch.abs(y.view_as(y_pred) - y_pred) / (torch.abs(y_pred) + torch.abs(y.view_as(y_pred)))\n self._sum_of_errors += torch.sum(errors).item()\n \n def compute(self):\n", "issue": "Improve Canberra metric\n## \ud83d\ude80 Feature\r\n\r\nActual implementation of Canberra metric does not use absolute value on terms in denominator. Moreover, `sklearn` can be used in test.\r\n\r\nSee https://arxiv.org/pdf/1411.7474.pdf \r\n\r\nSee https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\r\n\n", "before_files": [{"content": "import torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\n\n\nclass CanberraMetric(_BaseRegression):\n r\"\"\"\n Calculates the Canberra Metric.\n\n :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{A_j + P_j}`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`__.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n __ https://arxiv.org/abs/1809.03006\n \"\"\"\n\n def reset(self):\n self._sum_of_errors = 0.0\n\n def _update(self, output):\n y_pred, y = output\n errors = torch.abs(y.view_as(y_pred) - y_pred) / (y_pred + y.view_as(y_pred))\n self._sum_of_errors += torch.sum(errors).item()\n\n def compute(self):\n return self._sum_of_errors\n", "path": "ignite/contrib/metrics/regression/canberra_metric.py"}]}
975
507
gh_patches_debug_27545
rasdani/github-patches
git_diff
encode__uvicorn-227
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error integrating with Channels if 'lifespan' is not specified in router I'm not entirely sure if I should be posting this here or on `channels`. I'm using v0.3.12 which I believe has already introduced the new `lifespan` protocol defined in asgiref. But this causes an error with `channels`' router ```bash Traceback (most recent call last): File "/usr/local/lib/python3.6/site-packages/uvicorn/lifespan.py", line 29, in run await self.asgi(self.receive, self.send) File "/usr/local/lib/python3.6/site-packages/uvicorn/middleware/message_logger.py", line 51, in __call__ inner = self.app(self.scope) File "/usr/local/lib/python3.6/site-packages/channels/routing.py", line 58, in __call__ raise ValueError("No application configured for scope type %r" % scope["type"]) ValueError: No application configured for scope type 'lifespan' ``` My `routing.py` file looks like this: ```python application = ProtocolTypeRouter({ # Empty for now (http->django views is added by default) 'websocket': JWTWebsocketMiddleware( URLRouter(urlpatterns) ) }) ``` **EDIT**: Sorry my workaround wasn't actually working as you'll need at least one `path` in the `URLRouter`, so I've removed it. To temporarily get around this, I had to downgrade to `v0.3.9`. </issue> <code> [start of uvicorn/middleware/message_logger.py] 1 import logging 2 3 PLACEHOLDER_FORMAT = { 4 'body': '<{length} bytes>', 5 'bytes': '<{length} bytes>', 6 'text': '<{length} chars>', 7 'headers': '<...>', 8 } 9 10 11 def message_with_placeholders(message): 12 """ 13 Return an ASGI message, with any body-type content omitted and replaced 14 with a placeholder. 15 """ 16 new_message = message.copy() 17 for attr in PLACEHOLDER_FORMAT.keys(): 18 if message.get(attr) is not None: 19 content = message[attr] 20 placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content)) 21 new_message[attr] = placeholder 22 return new_message 23 24 25 class MessageLoggerMiddleware: 26 def __init__(self, app): 27 self.task_counter = 0 28 self.app = app 29 self.logger = logging.getLogger("uvicorn") 30 31 def __call__(self, scope): 32 self.task_counter += 1 33 return MessageLoggerResponder(scope, self.app, self.logger, self.task_counter) 34 35 36 class MessageLoggerResponder: 37 def __init__(self, scope, app, logger, task_counter): 38 self.scope = scope 39 self.app = app 40 self.logger = logger 41 self.task_counter = task_counter 42 self.client_addr = scope.get('client') 43 44 async def __call__(self, receive, send): 45 self._receive = receive 46 self._send = send 47 logged_scope = message_with_placeholders(self.scope) 48 log_text = '%s - ASGI [%d] Started %s' 49 self.logger.debug(log_text, self.client_addr, self.task_counter, logged_scope) 50 try: 51 inner = self.app(self.scope) 52 await inner(self.receive, self.send) 53 except: 54 log_text = '%s - ASGI [%d] Raised exception' 55 self.logger.debug(log_text, self.client_addr, self.task_counter) 56 raise 57 else: 58 log_text = '%s - ASGI [%d] Completed' 59 self.logger.debug(log_text, self.client_addr, self.task_counter) 60 61 async def receive(self): 62 message = await self._receive() 63 logged_message = message_with_placeholders(message) 64 log_text = '%s - ASGI [%d] Sent %s' 65 self.logger.debug(log_text, self.client_addr, self.task_counter, logged_message) 66 return message 67 68 async def send(self, message): 69 logged_message = message_with_placeholders(message) 70 log_text = '%s - ASGI [%d] Received %s' 71 self.logger.debug(log_text, self.client_addr, self.task_counter, logged_message) 72 await self._send(message) 73 [end of uvicorn/middleware/message_logger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/middleware/message_logger.py b/uvicorn/middleware/message_logger.py --- a/uvicorn/middleware/message_logger.py +++ b/uvicorn/middleware/message_logger.py @@ -36,20 +36,27 @@ class MessageLoggerResponder: def __init__(self, scope, app, logger, task_counter): self.scope = scope - self.app = app self.logger = logger self.task_counter = task_counter self.client_addr = scope.get('client') + logged_scope = message_with_placeholders(scope) + log_text = '%s - ASGI [%d] Initialized %s' + self.logger.debug(log_text, self.client_addr, self.task_counter, logged_scope) + try: + self.inner = app(scope) + except: + log_text = '%s - ASGI [%d] Raised exception' + self.logger.debug(log_text, self.client_addr, self.task_counter) + raise + async def __call__(self, receive, send): self._receive = receive self._send = send - logged_scope = message_with_placeholders(self.scope) - log_text = '%s - ASGI [%d] Started %s' - self.logger.debug(log_text, self.client_addr, self.task_counter, logged_scope) + log_text = '%s - ASGI [%d] Started task' + self.logger.debug(log_text, self.client_addr, self.task_counter) try: - inner = self.app(self.scope) - await inner(self.receive, self.send) + await self.inner(self.receive, self.send) except: log_text = '%s - ASGI [%d] Raised exception' self.logger.debug(log_text, self.client_addr, self.task_counter)
{"golden_diff": "diff --git a/uvicorn/middleware/message_logger.py b/uvicorn/middleware/message_logger.py\n--- a/uvicorn/middleware/message_logger.py\n+++ b/uvicorn/middleware/message_logger.py\n@@ -36,20 +36,27 @@\n class MessageLoggerResponder:\n def __init__(self, scope, app, logger, task_counter):\n self.scope = scope\n- self.app = app\n self.logger = logger\n self.task_counter = task_counter\n self.client_addr = scope.get('client')\n \n+ logged_scope = message_with_placeholders(scope)\n+ log_text = '%s - ASGI [%d] Initialized %s'\n+ self.logger.debug(log_text, self.client_addr, self.task_counter, logged_scope)\n+ try:\n+ self.inner = app(scope)\n+ except:\n+ log_text = '%s - ASGI [%d] Raised exception'\n+ self.logger.debug(log_text, self.client_addr, self.task_counter)\n+ raise\n+\n async def __call__(self, receive, send):\n self._receive = receive\n self._send = send\n- logged_scope = message_with_placeholders(self.scope)\n- log_text = '%s - ASGI [%d] Started %s'\n- self.logger.debug(log_text, self.client_addr, self.task_counter, logged_scope)\n+ log_text = '%s - ASGI [%d] Started task'\n+ self.logger.debug(log_text, self.client_addr, self.task_counter)\n try:\n- inner = self.app(self.scope)\n- await inner(self.receive, self.send)\n+ await self.inner(self.receive, self.send)\n except:\n log_text = '%s - ASGI [%d] Raised exception'\n self.logger.debug(log_text, self.client_addr, self.task_counter)\n", "issue": "Error integrating with Channels if 'lifespan' is not specified in router\nI'm not entirely sure if I should be posting this here or on `channels`.\r\n\r\nI'm using v0.3.12 which I believe has already introduced the new `lifespan` protocol defined in asgiref. But this causes an error with `channels`' router\r\n\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/uvicorn/lifespan.py\", line 29, in run\r\n await self.asgi(self.receive, self.send)\r\n File \"/usr/local/lib/python3.6/site-packages/uvicorn/middleware/message_logger.py\", line 51, in __call__\r\n inner = self.app(self.scope)\r\n File \"/usr/local/lib/python3.6/site-packages/channels/routing.py\", line 58, in __call__\r\n raise ValueError(\"No application configured for scope type %r\" % scope[\"type\"])\r\nValueError: No application configured for scope type 'lifespan'\r\n```\r\n\r\nMy `routing.py` file looks like this:\r\n\r\n```python\r\napplication = ProtocolTypeRouter({\r\n # Empty for now (http->django views is added by default)\r\n 'websocket': JWTWebsocketMiddleware(\r\n URLRouter(urlpatterns)\r\n )\r\n})\r\n```\r\n\r\n**EDIT**: Sorry my workaround wasn't actually working as you'll need at least one `path` in the `URLRouter`, so I've removed it.\r\n\r\nTo temporarily get around this, I had to downgrade to `v0.3.9`.\n", "before_files": [{"content": "import logging\n\nPLACEHOLDER_FORMAT = {\n 'body': '<{length} bytes>',\n 'bytes': '<{length} bytes>',\n 'text': '<{length} chars>',\n 'headers': '<...>',\n}\n\n\ndef message_with_placeholders(message):\n \"\"\"\n Return an ASGI message, with any body-type content omitted and replaced\n with a placeholder.\n \"\"\"\n new_message = message.copy()\n for attr in PLACEHOLDER_FORMAT.keys():\n if message.get(attr) is not None:\n content = message[attr]\n placeholder = PLACEHOLDER_FORMAT[attr].format(length=len(content))\n new_message[attr] = placeholder\n return new_message\n\n\nclass MessageLoggerMiddleware:\n def __init__(self, app):\n self.task_counter = 0\n self.app = app\n self.logger = logging.getLogger(\"uvicorn\")\n\n def __call__(self, scope):\n self.task_counter += 1\n return MessageLoggerResponder(scope, self.app, self.logger, self.task_counter)\n\n\nclass MessageLoggerResponder:\n def __init__(self, scope, app, logger, task_counter):\n self.scope = scope\n self.app = app\n self.logger = logger\n self.task_counter = task_counter\n self.client_addr = scope.get('client')\n\n async def __call__(self, receive, send):\n self._receive = receive\n self._send = send\n logged_scope = message_with_placeholders(self.scope)\n log_text = '%s - ASGI [%d] Started %s'\n self.logger.debug(log_text, self.client_addr, self.task_counter, logged_scope)\n try:\n inner = self.app(self.scope)\n await inner(self.receive, self.send)\n except:\n log_text = '%s - ASGI [%d] Raised exception'\n self.logger.debug(log_text, self.client_addr, self.task_counter)\n raise\n else:\n log_text = '%s - ASGI [%d] Completed'\n self.logger.debug(log_text, self.client_addr, self.task_counter)\n\n async def receive(self):\n message = await self._receive()\n logged_message = message_with_placeholders(message)\n log_text = '%s - ASGI [%d] Sent %s'\n self.logger.debug(log_text, self.client_addr, self.task_counter, logged_message)\n return message\n\n async def send(self, message):\n logged_message = message_with_placeholders(message)\n log_text = '%s - ASGI [%d] Received %s'\n self.logger.debug(log_text, self.client_addr, self.task_counter, logged_message)\n await self._send(message)\n", "path": "uvicorn/middleware/message_logger.py"}]}
1,564
390
gh_patches_debug_6181
rasdani/github-patches
git_diff
scrapy__scrapy-2816
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DNSCACHE_ENABLED=False not working Originally reported by @softwarevamp on [StackOverflow](https://stackoverflow.com/questions/44877296/scrapy-with-dnscache-enabled-false-not-working): > When i run scrapy shell with `DNSCACHE_ENABLED=False` got ``` KeyError: 'dictionary is empty' twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: www.mydomain.com. ``` ``` 2017-07-03 03:09:12 [twisted] CRITICAL: while looking up www.mydomain.com with <scrapy.resolver.CachingThreadedResolver object at 0x3fd0050> Traceback (most recent call last): File "/usr/lib64/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/usr/lib64/python2.7/site-packages/scrapy/resolver.py", line 29, in _cache_result dnscache[name] = result File "/usr/lib64/python2.7/site-packages/scrapy/utils/datatypes.py", line 305, in __setitem__ self.popitem(last=False) File "/usr/lib64/python2.7/collections.py", line 159, in popitem raise KeyError('dictionary is empty') KeyError: 'dictionary is empty' 2017-07-03 03:09:12 [scrapy.downloadermiddlewares.retry] DEBUG: Gave up retrying <GET //www.mydomain.com/> (failed 3 times): DNS lookup failed: no results for hostname lookup: www.mydomain.com. Traceback (most recent call last): File "/usr/bin/scrapy", line 11, in <module> sys.exit(execute()) File "/usr/lib64/python2.7/site-packages/scrapy/cmdline.py", line 149, in execute _run_print_help(parser, _run_command, cmd, args, opts) File "/usr/lib64/python2.7/site-packages/scrapy/cmdline.py", line 89, in _run_print_help func(*a, **kw) File "/usr/lib64/python2.7/site-packages/scrapy/cmdline.py", line 156, in _run_command cmd.run(args, opts) File "/usr/lib64/python2.7/site-packages/scrapy/commands/shell.py", line 73, in run shell.start(url=url, redirect=not opts.no_redirect) File "/usr/lib64/python2.7/site-packages/scrapy/shell.py", line 48, in start self.fetch(url, spider, redirect=redirect) File "/usr/lib64/python2.7/site-packages/scrapy/shell.py", line 115, in fetch reactor, self._schedule, request, spider) File "/usr/lib64/python2.7/site-packages/twisted/internet/threads.py", line 122, in blockingCallFromThread result.raiseException() File "<string>", line 2, in raiseException twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: www.mydomain.com. ``` > Any thoughts welcome </issue> <code> [start of scrapy/resolver.py] 1 from twisted.internet import defer 2 from twisted.internet.base import ThreadedResolver 3 4 from scrapy.utils.datatypes import LocalCache 5 6 # TODO: cache misses 7 8 dnscache = LocalCache(10000) 9 10 class CachingThreadedResolver(ThreadedResolver): 11 def __init__(self, reactor, cache_size, timeout): 12 super(CachingThreadedResolver, self).__init__(reactor) 13 dnscache.limit = cache_size 14 self.timeout = timeout 15 16 def getHostByName(self, name, timeout=None): 17 if name in dnscache: 18 return defer.succeed(dnscache[name]) 19 # in Twisted<=16.6, getHostByName() is always called with 20 # a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple), 21 # so the input argument above is simply overridden 22 # to enforce Scrapy's DNS_TIMEOUT setting's value 23 timeout = (self.timeout,) 24 d = super(CachingThreadedResolver, self).getHostByName(name, timeout) 25 d.addCallback(self._cache_result, name) 26 return d 27 28 def _cache_result(self, result, name): 29 dnscache[name] = result 30 return result 31 [end of scrapy/resolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/resolver.py b/scrapy/resolver.py --- a/scrapy/resolver.py +++ b/scrapy/resolver.py @@ -22,7 +22,8 @@ # to enforce Scrapy's DNS_TIMEOUT setting's value timeout = (self.timeout,) d = super(CachingThreadedResolver, self).getHostByName(name, timeout) - d.addCallback(self._cache_result, name) + if dnscache.limit: + d.addCallback(self._cache_result, name) return d def _cache_result(self, result, name):
{"golden_diff": "diff --git a/scrapy/resolver.py b/scrapy/resolver.py\n--- a/scrapy/resolver.py\n+++ b/scrapy/resolver.py\n@@ -22,7 +22,8 @@\n # to enforce Scrapy's DNS_TIMEOUT setting's value\n timeout = (self.timeout,)\n d = super(CachingThreadedResolver, self).getHostByName(name, timeout)\n- d.addCallback(self._cache_result, name)\n+ if dnscache.limit:\n+ d.addCallback(self._cache_result, name)\n return d\n \n def _cache_result(self, result, name):\n", "issue": "DNSCACHE_ENABLED=False not working\nOriginally reported by @softwarevamp on [StackOverflow](https://stackoverflow.com/questions/44877296/scrapy-with-dnscache-enabled-false-not-working):\r\n\r\n> When i run scrapy shell with `DNSCACHE_ENABLED=False` got\r\n```\r\nKeyError: 'dictionary is empty'\r\ntwisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: www.mydomain.com.\r\n```\r\n\r\n```\r\n 2017-07-03 03:09:12 [twisted] CRITICAL: while looking up www.mydomain.com with <scrapy.resolver.CachingThreadedResolver object at 0x3fd0050>\r\n Traceback (most recent call last):\r\n File \"/usr/lib64/python2.7/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/resolver.py\", line 29, in _cache_result\r\n dnscache[name] = result\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/utils/datatypes.py\", line 305, in __setitem__\r\n self.popitem(last=False)\r\n File \"/usr/lib64/python2.7/collections.py\", line 159, in popitem\r\n raise KeyError('dictionary is empty')\r\n KeyError: 'dictionary is empty'\r\n 2017-07-03 03:09:12 [scrapy.downloadermiddlewares.retry] DEBUG: Gave up retrying <GET //www.mydomain.com/> (failed 3 times): DNS lookup failed: no results for hostname lookup: www.mydomain.com.\r\n Traceback (most recent call last):\r\n File \"/usr/bin/scrapy\", line 11, in <module>\r\n sys.exit(execute())\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/cmdline.py\", line 149, in execute\r\n _run_print_help(parser, _run_command, cmd, args, opts)\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/cmdline.py\", line 89, in _run_print_help\r\n func(*a, **kw)\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/cmdline.py\", line 156, in _run_command\r\n cmd.run(args, opts)\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/commands/shell.py\", line 73, in run\r\n shell.start(url=url, redirect=not opts.no_redirect)\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/shell.py\", line 48, in start\r\n self.fetch(url, spider, redirect=redirect)\r\n File \"/usr/lib64/python2.7/site-packages/scrapy/shell.py\", line 115, in fetch\r\n reactor, self._schedule, request, spider)\r\n File \"/usr/lib64/python2.7/site-packages/twisted/internet/threads.py\", line 122, in blockingCallFromThread\r\n result.raiseException()\r\n File \"<string>\", line 2, in raiseException\r\n twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: www.mydomain.com.\r\n```\r\n\r\n> Any thoughts welcome\n", "before_files": [{"content": "from twisted.internet import defer\nfrom twisted.internet.base import ThreadedResolver\n\nfrom scrapy.utils.datatypes import LocalCache\n\n# TODO: cache misses\n\ndnscache = LocalCache(10000)\n\nclass CachingThreadedResolver(ThreadedResolver):\n def __init__(self, reactor, cache_size, timeout):\n super(CachingThreadedResolver, self).__init__(reactor)\n dnscache.limit = cache_size\n self.timeout = timeout\n\n def getHostByName(self, name, timeout=None):\n if name in dnscache:\n return defer.succeed(dnscache[name])\n # in Twisted<=16.6, getHostByName() is always called with\n # a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple),\n # so the input argument above is simply overridden\n # to enforce Scrapy's DNS_TIMEOUT setting's value\n timeout = (self.timeout,)\n d = super(CachingThreadedResolver, self).getHostByName(name, timeout)\n d.addCallback(self._cache_result, name)\n return d\n\n def _cache_result(self, result, name):\n dnscache[name] = result\n return result\n", "path": "scrapy/resolver.py"}]}
1,609
131
gh_patches_debug_24499
rasdani/github-patches
git_diff
pre-commit__pre-commit-797
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> python_venv language fails to use python3 interpreter and is using python2.7 instead Apparently pre-commit failed to use python3 interpreter when I tried to add a hook and thus failed because venv module was not installed on default python2.7! ``` $ pre-commit try-repo ../python-license-check [19:55:27] [INFO] Initializing environment for ../python-license-check. =============================================================================== Using config: =============================================================================== repos: - repo: ../python-license-check rev: 4048cf3844dbbf45690c153a7da7f532585ec87c hooks: - id: liccheck =============================================================================== [INFO] Installing environment for ../python-license-check. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... An unexpected error has occurred: CalledProcessError: Command: ('/Users/ssbarnea/.pyenv/versions/2.7.14/bin/python2.7', '-mvenv', '/var/folders/br/99tfdvcs3vvfwdk69z7f0xmc0000gn/T/tmpayl0P5/repoHa7_qe/py_venv-python2.7') Return code: 1 Expected return code: 0 Output: (none) Errors: /Users/ssbarnea/.pyenv/versions/2.7.14/bin/python2.7: No module named venv Check the log at /Users/ssbarnea/.cache/pre-commit/pre-commit.log FAIL: 1 ssbarnea@smac: ~/os/jira master ⚡ $ cat ../python-license-check/.pre-commit-hooks.yaml [19:55:34] - id: liccheck name: Validates dependency licenses for Python packages description: This validator validates a pre-commit hooks manifest file entry: liccheck -s setup.cfg -r requirements.txt language: python_venv ``` Based on the documentation I was expecting to see pre-commit using the `python3` executable for calling venv module. </issue> <code> [start of pre_commit/languages/python_venv.py] 1 from __future__ import unicode_literals 2 3 import os.path 4 5 from pre_commit.languages import python 6 from pre_commit.util import CalledProcessError 7 from pre_commit.util import cmd_output 8 9 10 ENVIRONMENT_DIR = 'py_venv' 11 12 13 def orig_py_exe(exe): # pragma: no cover (platform specific) 14 """A -mvenv virtualenv made from a -mvirtualenv virtualenv installs 15 packages to the incorrect location. Attempt to find the _original_ exe 16 and invoke `-mvenv` from there. 17 18 See: 19 - https://github.com/pre-commit/pre-commit/issues/755 20 - https://github.com/pypa/virtualenv/issues/1095 21 - https://bugs.python.org/issue30811 22 """ 23 try: 24 prefix_script = 'import sys; print(sys.real_prefix)' 25 _, prefix, _ = cmd_output(exe, '-c', prefix_script) 26 prefix = prefix.strip() 27 except CalledProcessError: 28 # not created from -mvirtualenv 29 return exe 30 31 if os.name == 'nt': 32 expected = os.path.join(prefix, 'python.exe') 33 else: 34 expected = os.path.join(prefix, 'bin', os.path.basename(exe)) 35 36 if os.path.exists(expected): 37 return expected 38 else: 39 return exe 40 41 42 def make_venv(envdir, python): 43 cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/') 44 45 46 get_default_version = python.get_default_version 47 _interface = python.py_interface(ENVIRONMENT_DIR, make_venv) 48 in_env, healthy, run_hook, install_environment = _interface 49 [end of pre_commit/languages/python_venv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/python_venv.py b/pre_commit/languages/python_venv.py --- a/pre_commit/languages/python_venv.py +++ b/pre_commit/languages/python_venv.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import os.path +import sys from pre_commit.languages import python from pre_commit.util import CalledProcessError @@ -10,6 +11,13 @@ ENVIRONMENT_DIR = 'py_venv' +def get_default_version(): # pragma: no cover (version specific) + if sys.version_info < (3,): + return 'python3' + else: + return python.get_default_version() + + def orig_py_exe(exe): # pragma: no cover (platform specific) """A -mvenv virtualenv made from a -mvirtualenv virtualenv installs packages to the incorrect location. Attempt to find the _original_ exe @@ -43,6 +51,5 @@ cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/') -get_default_version = python.get_default_version _interface = python.py_interface(ENVIRONMENT_DIR, make_venv) in_env, healthy, run_hook, install_environment = _interface
{"golden_diff": "diff --git a/pre_commit/languages/python_venv.py b/pre_commit/languages/python_venv.py\n--- a/pre_commit/languages/python_venv.py\n+++ b/pre_commit/languages/python_venv.py\n@@ -1,6 +1,7 @@\n from __future__ import unicode_literals\n \n import os.path\n+import sys\n \n from pre_commit.languages import python\n from pre_commit.util import CalledProcessError\n@@ -10,6 +11,13 @@\n ENVIRONMENT_DIR = 'py_venv'\n \n \n+def get_default_version(): # pragma: no cover (version specific)\n+ if sys.version_info < (3,):\n+ return 'python3'\n+ else:\n+ return python.get_default_version()\n+\n+\n def orig_py_exe(exe): # pragma: no cover (platform specific)\n \"\"\"A -mvenv virtualenv made from a -mvirtualenv virtualenv installs\n packages to the incorrect location. Attempt to find the _original_ exe\n@@ -43,6 +51,5 @@\n cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')\n \n \n-get_default_version = python.get_default_version\n _interface = python.py_interface(ENVIRONMENT_DIR, make_venv)\n in_env, healthy, run_hook, install_environment = _interface\n", "issue": "python_venv language fails to use python3 interpreter and is using python2.7 instead\nApparently pre-commit failed to use python3 interpreter when I tried to add a hook and thus failed because venv module was not installed on default python2.7!\r\n\r\n```\r\n$ pre-commit try-repo ../python-license-check [19:55:27]\r\n[INFO] Initializing environment for ../python-license-check.\r\n===============================================================================\r\nUsing config:\r\n===============================================================================\r\nrepos:\r\n- repo: ../python-license-check\r\n rev: 4048cf3844dbbf45690c153a7da7f532585ec87c\r\n hooks:\r\n - id: liccheck\r\n===============================================================================\r\n[INFO] Installing environment for ../python-license-check.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/Users/ssbarnea/.pyenv/versions/2.7.14/bin/python2.7', '-mvenv', '/var/folders/br/99tfdvcs3vvfwdk69z7f0xmc0000gn/T/tmpayl0P5/repoHa7_qe/py_venv-python2.7')\r\nReturn code: 1\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors:\r\n /Users/ssbarnea/.pyenv/versions/2.7.14/bin/python2.7: No module named venv\r\n\r\n\r\nCheck the log at /Users/ssbarnea/.cache/pre-commit/pre-commit.log\r\nFAIL: 1\r\nssbarnea@smac: ~/os/jira master \u26a1 $ cat ../python-license-check/.pre-commit-hooks.yaml [19:55:34]\r\n- id: liccheck\r\n name: Validates dependency licenses for Python packages\r\n description: This validator validates a pre-commit hooks manifest file\r\n entry: liccheck -s setup.cfg -r requirements.txt\r\n language: python_venv\r\n```\r\n\r\nBased on the documentation I was expecting to see pre-commit using the `python3` executable for calling venv module. \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os.path\n\nfrom pre_commit.languages import python\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'py_venv'\n\n\ndef orig_py_exe(exe): # pragma: no cover (platform specific)\n \"\"\"A -mvenv virtualenv made from a -mvirtualenv virtualenv installs\n packages to the incorrect location. Attempt to find the _original_ exe\n and invoke `-mvenv` from there.\n\n See:\n - https://github.com/pre-commit/pre-commit/issues/755\n - https://github.com/pypa/virtualenv/issues/1095\n - https://bugs.python.org/issue30811\n \"\"\"\n try:\n prefix_script = 'import sys; print(sys.real_prefix)'\n _, prefix, _ = cmd_output(exe, '-c', prefix_script)\n prefix = prefix.strip()\n except CalledProcessError:\n # not created from -mvirtualenv\n return exe\n\n if os.name == 'nt':\n expected = os.path.join(prefix, 'python.exe')\n else:\n expected = os.path.join(prefix, 'bin', os.path.basename(exe))\n\n if os.path.exists(expected):\n return expected\n else:\n return exe\n\n\ndef make_venv(envdir, python):\n cmd_output(orig_py_exe(python), '-mvenv', envdir, cwd='/')\n\n\nget_default_version = python.get_default_version\n_interface = python.py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python_venv.py"}]}
1,476
289
gh_patches_debug_18504
rasdani/github-patches
git_diff
open-mmlab__mmdetection-2296
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ImportError: cannot import name 'CARAFENAIVE' from 'mmdet.ops.carafe' The module name 'CARAFENAIVE' in file 'mmdet.ops.carafe.grad_check.py' shoud be 'CARAFENaive'. When I run this command 'python mmdet/ops/carafe/grad_check.py', the following error is reported: ImportError: cannot import name 'CARAFENAIVE' from 'mmdet.ops.carafe'. </issue> <code> [start of mmdet/ops/carafe/grad_check.py] 1 import os.path as osp 2 import sys 3 4 import mmcv 5 import torch 6 from torch.autograd import gradcheck 7 8 sys.path.append(osp.abspath(osp.join(__file__, '../../'))) 9 from mmdet.ops.carafe import CARAFENAIVE # noqa: E402, isort:skip 10 from mmdet.ops.carafe import carafe_naive # noqa: E402, isort:skip 11 from mmdet.ops.carafe import carafe, CARAFE # noqa: E402, isort:skip 12 13 feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda:0').double() 14 mask = torch.randn( 15 2, 100, 6, 6, requires_grad=True, device='cuda:0').sigmoid().double() 16 17 print('Gradcheck for carafe...') 18 test = gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) 19 print(test) 20 21 print('Gradcheck for carafe naive...') 22 test = gradcheck(CARAFENAIVE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) 23 print(test) 24 25 feat = torch.randn( 26 2, 1024, 100, 100, requires_grad=True, device='cuda:0').float() 27 mask = torch.randn( 28 2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float() 29 loop_num = 500 30 31 time_forward = 0 32 time_backward = 0 33 bar = mmcv.ProgressBar(loop_num) 34 timer = mmcv.Timer() 35 for i in range(loop_num): 36 x = carafe(feat.clone(), mask.clone(), 5, 1, 2) 37 torch.cuda.synchronize() 38 time_forward += timer.since_last_check() 39 x.sum().backward(retain_graph=True) 40 torch.cuda.synchronize() 41 time_backward += timer.since_last_check() 42 bar.update() 43 print('\nCARAFE time forward: {} ms/iter | time backward: {} ms/iter'.format( 44 (time_forward + 1e-3) * 1e3 / loop_num, 45 (time_backward + 1e-3) * 1e3 / loop_num)) 46 47 time_naive_forward = 0 48 time_naive_backward = 0 49 bar = mmcv.ProgressBar(loop_num) 50 timer = mmcv.Timer() 51 for i in range(loop_num): 52 x = carafe_naive(feat.clone(), mask.clone(), 5, 1, 2) 53 torch.cuda.synchronize() 54 time_naive_forward += timer.since_last_check() 55 x.sum().backward(retain_graph=True) 56 torch.cuda.synchronize() 57 time_naive_backward += timer.since_last_check() 58 bar.update() 59 print('\nCARAFE naive time forward: {} ms/iter | time backward: {} ms/iter'. 60 format((time_naive_forward + 1e-3) * 1e3 / loop_num, 61 (time_naive_backward + 1e-3) * 1e3 / loop_num)) 62 [end of mmdet/ops/carafe/grad_check.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mmdet/ops/carafe/grad_check.py b/mmdet/ops/carafe/grad_check.py --- a/mmdet/ops/carafe/grad_check.py +++ b/mmdet/ops/carafe/grad_check.py @@ -6,9 +6,8 @@ from torch.autograd import gradcheck sys.path.append(osp.abspath(osp.join(__file__, '../../'))) -from mmdet.ops.carafe import CARAFENAIVE # noqa: E402, isort:skip -from mmdet.ops.carafe import carafe_naive # noqa: E402, isort:skip -from mmdet.ops.carafe import carafe, CARAFE # noqa: E402, isort:skip +from mmdet.ops.carafe import CARAFE, CARAFENaive # noqa: E402, isort:skip +from mmdet.ops.carafe import carafe, carafe_naive # noqa: E402, isort:skip feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda:0').double() mask = torch.randn( @@ -19,7 +18,7 @@ print(test) print('Gradcheck for carafe naive...') -test = gradcheck(CARAFENAIVE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) +test = gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4) print(test) feat = torch.randn(
{"golden_diff": "diff --git a/mmdet/ops/carafe/grad_check.py b/mmdet/ops/carafe/grad_check.py\n--- a/mmdet/ops/carafe/grad_check.py\n+++ b/mmdet/ops/carafe/grad_check.py\n@@ -6,9 +6,8 @@\n from torch.autograd import gradcheck\n \n sys.path.append(osp.abspath(osp.join(__file__, '../../')))\n-from mmdet.ops.carafe import CARAFENAIVE # noqa: E402, isort:skip\n-from mmdet.ops.carafe import carafe_naive # noqa: E402, isort:skip\n-from mmdet.ops.carafe import carafe, CARAFE # noqa: E402, isort:skip\n+from mmdet.ops.carafe import CARAFE, CARAFENaive # noqa: E402, isort:skip\n+from mmdet.ops.carafe import carafe, carafe_naive # noqa: E402, isort:skip\n \n feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda:0').double()\n mask = torch.randn(\n@@ -19,7 +18,7 @@\n print(test)\n \n print('Gradcheck for carafe naive...')\n-test = gradcheck(CARAFENAIVE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)\n+test = gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)\n print(test)\n \n feat = torch.randn(\n", "issue": "ImportError: cannot import name 'CARAFENAIVE' from 'mmdet.ops.carafe'\nThe module name 'CARAFENAIVE' in file 'mmdet.ops.carafe.grad_check.py' shoud be 'CARAFENaive'. When I run this command 'python mmdet/ops/carafe/grad_check.py', the following error is reported: ImportError: cannot import name 'CARAFENAIVE' from 'mmdet.ops.carafe'.\n", "before_files": [{"content": "import os.path as osp\nimport sys\n\nimport mmcv\nimport torch\nfrom torch.autograd import gradcheck\n\nsys.path.append(osp.abspath(osp.join(__file__, '../../')))\nfrom mmdet.ops.carafe import CARAFENAIVE # noqa: E402, isort:skip\nfrom mmdet.ops.carafe import carafe_naive # noqa: E402, isort:skip\nfrom mmdet.ops.carafe import carafe, CARAFE # noqa: E402, isort:skip\n\nfeat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda:0').double()\nmask = torch.randn(\n 2, 100, 6, 6, requires_grad=True, device='cuda:0').sigmoid().double()\n\nprint('Gradcheck for carafe...')\ntest = gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)\nprint(test)\n\nprint('Gradcheck for carafe naive...')\ntest = gradcheck(CARAFENAIVE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)\nprint(test)\n\nfeat = torch.randn(\n 2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()\nmask = torch.randn(\n 2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()\nloop_num = 500\n\ntime_forward = 0\ntime_backward = 0\nbar = mmcv.ProgressBar(loop_num)\ntimer = mmcv.Timer()\nfor i in range(loop_num):\n x = carafe(feat.clone(), mask.clone(), 5, 1, 2)\n torch.cuda.synchronize()\n time_forward += timer.since_last_check()\n x.sum().backward(retain_graph=True)\n torch.cuda.synchronize()\n time_backward += timer.since_last_check()\n bar.update()\nprint('\\nCARAFE time forward: {} ms/iter | time backward: {} ms/iter'.format(\n (time_forward + 1e-3) * 1e3 / loop_num,\n (time_backward + 1e-3) * 1e3 / loop_num))\n\ntime_naive_forward = 0\ntime_naive_backward = 0\nbar = mmcv.ProgressBar(loop_num)\ntimer = mmcv.Timer()\nfor i in range(loop_num):\n x = carafe_naive(feat.clone(), mask.clone(), 5, 1, 2)\n torch.cuda.synchronize()\n time_naive_forward += timer.since_last_check()\n x.sum().backward(retain_graph=True)\n torch.cuda.synchronize()\n time_naive_backward += timer.since_last_check()\n bar.update()\nprint('\\nCARAFE naive time forward: {} ms/iter | time backward: {} ms/iter'.\n format((time_naive_forward + 1e-3) * 1e3 / loop_num,\n (time_naive_backward + 1e-3) * 1e3 / loop_num))\n", "path": "mmdet/ops/carafe/grad_check.py"}]}
1,475
382
gh_patches_debug_9269
rasdani/github-patches
git_diff
autogluon__autogluon-2915
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update scikit-learn-intelex version - [ ] Check if scikit-learn-intelex can be upgraded. </issue> <code> [start of tabular/setup.py] 1 #!/usr/bin/env python 2 ########################### 3 # This code block is a HACK (!), but is necessary to avoid code duplication. Do NOT alter these lines. 4 import os 5 from setuptools import setup 6 import importlib.util 7 filepath = os.path.abspath(os.path.dirname(__file__)) 8 filepath_import = os.path.join(filepath, '..', 'core', 'src', 'autogluon', 'core', '_setup_utils.py') 9 spec = importlib.util.spec_from_file_location("ag_min_dependencies", filepath_import) 10 ag = importlib.util.module_from_spec(spec) 11 # Identical to `from autogluon.core import _setup_utils as ag`, but works without `autogluon.core` being installed. 12 spec.loader.exec_module(ag) 13 ########################### 14 15 import sys 16 17 version = ag.load_version_file() 18 version = ag.update_version(version) 19 20 submodule = 'tabular' 21 install_requires = [ 22 # version ranges added in ag.get_dependency_version_ranges() 23 'numpy', # version range defined in `core/_setup_utils.py` 24 'scipy', # version range defined in `core/_setup_utils.py` 25 'pandas', # version range defined in `core/_setup_utils.py` 26 'scikit-learn', # version range defined in `core/_setup_utils.py` 27 'networkx', # version range defined in `core/_setup_utils.py` 28 f'{ag.PACKAGE_NAME}.core=={version}', 29 f'{ag.PACKAGE_NAME}.features=={version}', 30 ] 31 32 extras_require = { 33 'lightgbm': [ 34 'lightgbm>=3.3,<3.4', 35 ], 36 'catboost': [ 37 'catboost>=1.0,<1.2', 38 ], 39 # FIXME: Debug why xgboost 1.6 has 4x+ slower inference on multiclass datasets compared to 1.4 40 # It is possibly only present on MacOS, haven't tested linux. 41 # XGBoost made API breaking changes in 1.6 with custom metric and callback support, so we don't support older versions. 42 'xgboost': [ 43 'xgboost>=1.6,<1.8', 44 ], 45 'fastai': [ 46 'torch>=1.9,<1.14', 47 'fastai>=2.3.1,<2.8', 48 ], 49 'ray': [ 50 f'{ag.PACKAGE_NAME}.core[all]=={version}', 51 ], 52 'skex': [ 53 'scikit-learn-intelex>=2021.6,<2021.8', 54 ], 55 'imodels': [ 56 'imodels>=1.3.10,<1.4.0', # 1.3.8/1.3.9 either remove/renamed attribute `complexity_` causing failures. https://github.com/csinva/imodels/issues/147 57 ], 58 'vowpalwabbit': [ 59 # FIXME: 9.5+ causes VW to save an empty model which always predicts 0. Confirmed on MacOS (Intel CPU). Unknown how to fix. 60 'vowpalwabbit>=9,<9.5', 61 ], 62 'skl2onnx': [ 63 'skl2onnx>=1.13.0,<1.14.0', 64 # For macOS, there isn't a onnxruntime-gpu package installed with skl2onnx. 65 # Therefore, we install onnxruntime explicitly here just for macOS. 66 'onnxruntime>=1.13.0,<1.14.0' 67 ] if sys.platform == 'darwin' else [ 68 'skl2onnx>=1.13.0,<1.14.0' 69 ] 70 } 71 72 all_requires = [] 73 # TODO: Consider adding 'skex' to 'all' 74 for extra_package in ['lightgbm', 'catboost', 'xgboost', 'fastai', 'ray']: 75 all_requires += extras_require[extra_package] 76 all_requires = list(set(all_requires)) 77 extras_require['all'] = all_requires 78 79 80 test_requires = [] 81 for test_package in ['imodels', 'vowpalwabbit', 'skl2onnx']: 82 test_requires += extras_require[test_package] 83 extras_require['tests'] = test_requires 84 install_requires = ag.get_dependency_version_ranges(install_requires) 85 86 if __name__ == '__main__': 87 ag.create_version_file(version=version, submodule=submodule) 88 setup_args = ag.default_setup_args(version=version, submodule=submodule) 89 setup( 90 install_requires=install_requires, 91 extras_require=extras_require, 92 **setup_args, 93 ) 94 [end of tabular/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tabular/setup.py b/tabular/setup.py --- a/tabular/setup.py +++ b/tabular/setup.py @@ -50,7 +50,8 @@ f'{ag.PACKAGE_NAME}.core[all]=={version}', ], 'skex': [ - 'scikit-learn-intelex>=2021.6,<2021.8', + # Note: 2021.7 released on Sep 2022, version 2022.x doesn't exist (went directly from 2021.7 to 2023.0) + 'scikit-learn-intelex>=2021.7,<2023.1', ], 'imodels': [ 'imodels>=1.3.10,<1.4.0', # 1.3.8/1.3.9 either remove/renamed attribute `complexity_` causing failures. https://github.com/csinva/imodels/issues/147
{"golden_diff": "diff --git a/tabular/setup.py b/tabular/setup.py\n--- a/tabular/setup.py\n+++ b/tabular/setup.py\n@@ -50,7 +50,8 @@\n f'{ag.PACKAGE_NAME}.core[all]=={version}',\n ],\n 'skex': [\n- 'scikit-learn-intelex>=2021.6,<2021.8',\n+ # Note: 2021.7 released on Sep 2022, version 2022.x doesn't exist (went directly from 2021.7 to 2023.0)\n+ 'scikit-learn-intelex>=2021.7,<2023.1',\n ],\n 'imodels': [\n 'imodels>=1.3.10,<1.4.0', # 1.3.8/1.3.9 either remove/renamed attribute `complexity_` causing failures. https://github.com/csinva/imodels/issues/147\n", "issue": "Update scikit-learn-intelex version\n- [ ] Check if scikit-learn-intelex can be upgraded.\n", "before_files": [{"content": "#!/usr/bin/env python\n###########################\n# This code block is a HACK (!), but is necessary to avoid code duplication. Do NOT alter these lines.\nimport os\nfrom setuptools import setup\nimport importlib.util\nfilepath = os.path.abspath(os.path.dirname(__file__))\nfilepath_import = os.path.join(filepath, '..', 'core', 'src', 'autogluon', 'core', '_setup_utils.py')\nspec = importlib.util.spec_from_file_location(\"ag_min_dependencies\", filepath_import)\nag = importlib.util.module_from_spec(spec)\n# Identical to `from autogluon.core import _setup_utils as ag`, but works without `autogluon.core` being installed.\nspec.loader.exec_module(ag)\n###########################\n\nimport sys\n\nversion = ag.load_version_file()\nversion = ag.update_version(version)\n\nsubmodule = 'tabular'\ninstall_requires = [\n # version ranges added in ag.get_dependency_version_ranges()\n 'numpy', # version range defined in `core/_setup_utils.py`\n 'scipy', # version range defined in `core/_setup_utils.py`\n 'pandas', # version range defined in `core/_setup_utils.py`\n 'scikit-learn', # version range defined in `core/_setup_utils.py`\n 'networkx', # version range defined in `core/_setup_utils.py`\n f'{ag.PACKAGE_NAME}.core=={version}',\n f'{ag.PACKAGE_NAME}.features=={version}',\n]\n\nextras_require = {\n 'lightgbm': [\n 'lightgbm>=3.3,<3.4',\n ],\n 'catboost': [\n 'catboost>=1.0,<1.2',\n ],\n # FIXME: Debug why xgboost 1.6 has 4x+ slower inference on multiclass datasets compared to 1.4\n # It is possibly only present on MacOS, haven't tested linux.\n # XGBoost made API breaking changes in 1.6 with custom metric and callback support, so we don't support older versions.\n 'xgboost': [\n 'xgboost>=1.6,<1.8',\n ],\n 'fastai': [\n 'torch>=1.9,<1.14',\n 'fastai>=2.3.1,<2.8',\n ],\n 'ray': [\n f'{ag.PACKAGE_NAME}.core[all]=={version}',\n ],\n 'skex': [\n 'scikit-learn-intelex>=2021.6,<2021.8',\n ],\n 'imodels': [\n 'imodels>=1.3.10,<1.4.0', # 1.3.8/1.3.9 either remove/renamed attribute `complexity_` causing failures. https://github.com/csinva/imodels/issues/147\n ],\n 'vowpalwabbit': [\n # FIXME: 9.5+ causes VW to save an empty model which always predicts 0. Confirmed on MacOS (Intel CPU). Unknown how to fix.\n 'vowpalwabbit>=9,<9.5',\n ],\n 'skl2onnx': [\n 'skl2onnx>=1.13.0,<1.14.0',\n # For macOS, there isn't a onnxruntime-gpu package installed with skl2onnx.\n # Therefore, we install onnxruntime explicitly here just for macOS.\n 'onnxruntime>=1.13.0,<1.14.0'\n ] if sys.platform == 'darwin' else [\n 'skl2onnx>=1.13.0,<1.14.0'\n ]\n}\n\nall_requires = []\n# TODO: Consider adding 'skex' to 'all'\nfor extra_package in ['lightgbm', 'catboost', 'xgboost', 'fastai', 'ray']:\n all_requires += extras_require[extra_package]\nall_requires = list(set(all_requires))\nextras_require['all'] = all_requires\n\n\ntest_requires = []\nfor test_package in ['imodels', 'vowpalwabbit', 'skl2onnx']:\n test_requires += extras_require[test_package]\nextras_require['tests'] = test_requires\ninstall_requires = ag.get_dependency_version_ranges(install_requires)\n\nif __name__ == '__main__':\n ag.create_version_file(version=version, submodule=submodule)\n setup_args = ag.default_setup_args(version=version, submodule=submodule)\n setup(\n install_requires=install_requires,\n extras_require=extras_require,\n **setup_args,\n )\n", "path": "tabular/setup.py"}]}
1,738
238
gh_patches_debug_2274
rasdani/github-patches
git_diff
svthalia__concrexit-1844
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Event (registration) status message in the API ### Is your feature request related to a problem? Please describe. Currently, the event status messages (like 'you cannot cancel your registration without having to pay a fine') are hardcoded and whenever we update them, we must also update the app ### Describe the solution you'd like Put the message in the API ### Additional context Also checkout #1381 </issue> <code> [start of website/events/api/v2/serializers/event.py] 1 from rest_framework import serializers 2 3 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer 4 from announcements.api.v2.serializers import SlideSerializer 5 from documents.api.v2.serializers.document import DocumentSerializer 6 from events import services 7 from events.api.v2.serializers.event_registration import EventRegistrationSerializer 8 from events.models import Event, EventRegistration 9 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer 10 from utils.snippets import create_google_maps_url 11 12 13 class EventSerializer(serializers.ModelSerializer): 14 """Serializer for events.""" 15 16 class Meta: 17 model = Event 18 fields = ( 19 "pk", 20 "title", 21 "description", 22 "start", 23 "end", 24 "category", 25 "registration_start", 26 "registration_end", 27 "cancel_deadline", 28 "optional_registrations", 29 "location", 30 "price", 31 "fine", 32 "num_participants", 33 "max_participants", 34 "no_registration_message", 35 "has_fields", 36 "food_event", 37 "maps_url", 38 "user_permissions", 39 "user_registration", 40 "organiser", 41 "slide", 42 "documents", 43 ) 44 45 description = CleanedHTMLSerializer() 46 organiser = MemberGroupSerializer() 47 user_registration = serializers.SerializerMethodField("_user_registration") 48 num_participants = serializers.SerializerMethodField("_num_participants") 49 maps_url = serializers.SerializerMethodField("_maps_url") 50 price = serializers.DecimalField(max_digits=5, decimal_places=2) 51 fine = serializers.DecimalField(max_digits=5, decimal_places=2) 52 slide = SlideSerializer() 53 documents = DocumentSerializer(many=True) 54 user_permissions = serializers.SerializerMethodField("_user_permissions") 55 56 def _user_registration(self, instance): 57 try: 58 if self.context["request"].member: 59 reg = instance.eventregistration_set.get( 60 member=self.context["request"].member, date_cancelled=None 61 ) 62 return EventRegistrationSerializer( 63 reg, 64 context=self.context, 65 fields=("pk", "present", "queue_position", "date", "payment"), 66 ).data 67 except EventRegistration.DoesNotExist: 68 pass 69 return None 70 71 def _num_participants(self, instance): 72 if ( 73 instance.max_participants 74 and instance.participants.count() > instance.max_participants 75 ): 76 return instance.max_participants 77 return instance.participants.count() 78 79 def _user_permissions(self, instance): 80 member = self.context["request"].member 81 return services.event_permissions(member, instance) 82 83 def _maps_url(self, instance): 84 return create_google_maps_url(instance.map_location, zoom=13, size="450x250") 85 [end of website/events/api/v2/serializers/event.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py --- a/website/events/api/v2/serializers/event.py +++ b/website/events/api/v2/serializers/event.py @@ -32,6 +32,7 @@ "num_participants", "max_participants", "no_registration_message", + "cancel_too_late_message", "has_fields", "food_event", "maps_url",
{"golden_diff": "diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -32,6 +32,7 @@\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n+ \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n", "issue": "Event (registration) status message in the API\n### Is your feature request related to a problem? Please describe.\r\nCurrently, the event status messages (like 'you cannot cancel your registration without having to pay a fine') are hardcoded and whenever we update them, we must also update the app\r\n\r\n### Describe the solution you'd like\r\nPut the message in the API\r\n\r\n### Additional context\r\nAlso checkout #1381 \n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom announcements.api.v2.serializers import SlideSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event, EventRegistration\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organiser\",\n \"slide\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organiser = MemberGroupSerializer()\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n price = serializers.DecimalField(max_digits=5, decimal_places=2)\n fine = serializers.DecimalField(max_digits=5, decimal_places=2)\n slide = SlideSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance):\n try:\n if self.context[\"request\"].member:\n reg = instance.eventregistration_set.get(\n member=self.context[\"request\"].member, date_cancelled=None\n )\n return EventRegistrationSerializer(\n reg,\n context=self.context,\n fields=(\"pk\", \"present\", \"queue_position\", \"date\", \"payment\"),\n ).data\n except EventRegistration.DoesNotExist:\n pass\n return None\n\n def _num_participants(self, instance):\n if (\n instance.max_participants\n and instance.participants.count() > instance.max_participants\n ):\n return instance.max_participants\n return instance.participants.count()\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}]}
1,359
112
gh_patches_debug_28
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1889
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Run tests on Windows in CI </issue> <code> [start of docs/getting_started/flask_example.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # flask_example.py 16 import flask 17 import requests 18 19 from opentelemetry import trace 20 from opentelemetry.instrumentation.flask import FlaskInstrumentor 21 from opentelemetry.instrumentation.requests import RequestsInstrumentor 22 from opentelemetry.sdk.trace import TracerProvider 23 from opentelemetry.sdk.trace.export import ( 24 BatchSpanProcessor, 25 ConsoleSpanExporter, 26 ) 27 28 trace.set_tracer_provider(TracerProvider()) 29 trace.get_tracer_provider().add_span_processor( 30 BatchSpanProcessor(ConsoleSpanExporter()) 31 ) 32 33 app = flask.Flask(__name__) 34 FlaskInstrumentor().instrument_app(app) 35 RequestsInstrumentor().instrument() 36 37 tracer = trace.get_tracer(__name__) 38 39 40 @app.route("/") 41 def hello(): 42 with tracer.start_as_current_span("example-request"): 43 requests.get("http://www.example.com") 44 return "hello" 45 46 47 app.run(debug=True, port=5000) 48 [end of docs/getting_started/flask_example.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/getting_started/flask_example.py b/docs/getting_started/flask_example.py --- a/docs/getting_started/flask_example.py +++ b/docs/getting_started/flask_example.py @@ -44,4 +44,4 @@ return "hello" -app.run(debug=True, port=5000) +app.run(port=5000)
{"golden_diff": "diff --git a/docs/getting_started/flask_example.py b/docs/getting_started/flask_example.py\n--- a/docs/getting_started/flask_example.py\n+++ b/docs/getting_started/flask_example.py\n@@ -44,4 +44,4 @@\n return \"hello\"\n \n \n-app.run(debug=True, port=5000)\n+app.run(port=5000)\n", "issue": "Run tests on Windows in CI\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# flask_example.py\nimport flask\nimport requests\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.flask import FlaskInstrumentor\nfrom opentelemetry.instrumentation.requests import RequestsInstrumentor\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n BatchSpanProcessor,\n ConsoleSpanExporter,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntrace.get_tracer_provider().add_span_processor(\n BatchSpanProcessor(ConsoleSpanExporter())\n)\n\napp = flask.Flask(__name__)\nFlaskInstrumentor().instrument_app(app)\nRequestsInstrumentor().instrument()\n\ntracer = trace.get_tracer(__name__)\n\n\[email protected](\"/\")\ndef hello():\n with tracer.start_as_current_span(\"example-request\"):\n requests.get(\"http://www.example.com\")\n return \"hello\"\n\n\napp.run(debug=True, port=5000)\n", "path": "docs/getting_started/flask_example.py"}]}
952
85
gh_patches_debug_9216
rasdani/github-patches
git_diff
interlegis__sapl-2102
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Audiências Públicas sem possibilidade de Edição Ao criar uma Audiência Pública e salva-la, não aparecem os metadados da matéria legislativa inseridas no preenchimento. Ao clicar em Editar, só aparece o título da audiência criada. grato </issue> <code> [start of sapl/audiencia/forms.py] 1 from django import forms 2 from django.core.exceptions import ObjectDoesNotExist, ValidationError 3 from django.db import transaction 4 from django.utils.translation import ugettext_lazy as _ 5 from sapl.audiencia.models import AudienciaPublica, TipoAudienciaPublica 6 from sapl.materia.models import MateriaLegislativa, TipoMateriaLegislativa 7 from sapl.utils import timezone 8 9 class AudienciaForm(forms.ModelForm): 10 11 data_atual = timezone.now() 12 13 tipo = forms.ModelChoiceField(required=True, 14 label='Tipo de Audiência Pública', 15 queryset=TipoAudienciaPublica.objects.all().order_by('nome')) 16 17 tipo_materia = forms.ModelChoiceField( 18 label=_('Tipo Matéria'), 19 required=True, 20 queryset=TipoMateriaLegislativa.objects.all(), 21 empty_label='Selecione', 22 ) 23 24 numero_materia = forms.CharField( 25 label='Número Matéria', required=True) 26 27 ano_materia = forms.CharField( 28 label='Ano Matéria', 29 initial=int(data_atual.year), 30 required=True) 31 32 class Meta: 33 model = AudienciaPublica 34 fields = ['tipo', 'numero', 'nome', 35 'tema', 'data', 'hora_inicio', 'hora_fim', 36 'observacao', 'audiencia_cancelada', 'url_audio', 37 'url_video', 'upload_pauta', 'upload_ata', 38 'upload_anexo', 'tipo_materia', 'numero_materia', 39 'ano_materia'] 40 41 42 def __init__(self, **kwargs): 43 super(AudienciaForm, self).__init__(**kwargs) 44 45 tipos = [] 46 47 if not self.fields['tipo'].queryset: 48 tipos.append(TipoAudienciaPublica.objects.create(nome='Audiência Pública', tipo='A')) 49 tipos.append(TipoAudienciaPublica.objects.create(nome='Plebiscito', tipo='P')) 50 tipos.append(TipoAudienciaPublica.objects.create(nome='Referendo', tipo='R')) 51 tipos.append(TipoAudienciaPublica.objects.create(nome='Iniciativa Popular', tipo='I')) 52 53 for t in tipos: 54 t.save() 55 56 57 def clean(self): 58 cleaned_data = super(AudienciaForm, self).clean() 59 if not self.is_valid(): 60 return cleaned_data 61 62 try: 63 materia = MateriaLegislativa.objects.get( 64 numero=self.cleaned_data['numero_materia'], 65 ano=self.cleaned_data['ano_materia'], 66 tipo=self.cleaned_data['tipo_materia']) 67 except ObjectDoesNotExist: 68 msg = _('A matéria a ser inclusa não existe no cadastro' 69 ' de matérias legislativas.') 70 raise ValidationError(msg) 71 else: 72 cleaned_data['materia'] = materia 73 74 if self.cleaned_data['hora_inicio'] and self.cleaned_data['hora_fim']: 75 if (self.cleaned_data['hora_fim'] < 76 self.cleaned_data['hora_inicio']): 77 msg = _('A hora de fim não pode ser anterior a hora de ínicio') 78 raise ValidationError(msg) 79 80 return self.cleaned_data 81 82 @transaction.atomic() 83 def save(self, commit=True): 84 audiencia = super(AudienciaForm, self).save(commit) 85 return audiencia [end of sapl/audiencia/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sapl/audiencia/forms.py b/sapl/audiencia/forms.py --- a/sapl/audiencia/forms.py +++ b/sapl/audiencia/forms.py @@ -77,9 +77,11 @@ msg = _('A hora de fim não pode ser anterior a hora de ínicio') raise ValidationError(msg) - return self.cleaned_data + return cleaned_data @transaction.atomic() def save(self, commit=True): - audiencia = super(AudienciaForm, self).save(commit) + audiencia = super(AudienciaForm, self).save(False) + audiencia.materia = self.cleaned_data['materia'] + audiencia.save() return audiencia \ No newline at end of file
{"golden_diff": "diff --git a/sapl/audiencia/forms.py b/sapl/audiencia/forms.py\n--- a/sapl/audiencia/forms.py\n+++ b/sapl/audiencia/forms.py\n@@ -77,9 +77,11 @@\n msg = _('A hora de fim n\u00e3o pode ser anterior a hora de \u00ednicio')\n raise ValidationError(msg)\n \n- return self.cleaned_data\n+ return cleaned_data\n \n @transaction.atomic()\n def save(self, commit=True):\n- audiencia = super(AudienciaForm, self).save(commit)\n+ audiencia = super(AudienciaForm, self).save(False)\n+ audiencia.materia = self.cleaned_data['materia']\n+ audiencia.save()\n return audiencia\n\\ No newline at end of file\n", "issue": "Audi\u00eancias P\u00fablicas sem possibilidade de Edi\u00e7\u00e3o\nAo criar uma Audi\u00eancia P\u00fablica e salva-la, n\u00e3o aparecem os metadados da mat\u00e9ria legislativa inseridas no preenchimento. \r\nAo clicar em Editar, s\u00f3 aparece o t\u00edtulo da audi\u00eancia criada.\r\ngrato\n", "before_files": [{"content": "from django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import transaction\nfrom django.utils.translation import ugettext_lazy as _\nfrom sapl.audiencia.models import AudienciaPublica, TipoAudienciaPublica\nfrom sapl.materia.models import MateriaLegislativa, TipoMateriaLegislativa\nfrom sapl.utils import timezone\n\nclass AudienciaForm(forms.ModelForm):\n\n data_atual = timezone.now()\n\n tipo = forms.ModelChoiceField(required=True,\n label='Tipo de Audi\u00eancia P\u00fablica',\n queryset=TipoAudienciaPublica.objects.all().order_by('nome'))\n\n tipo_materia = forms.ModelChoiceField(\n label=_('Tipo Mat\u00e9ria'),\n required=True,\n queryset=TipoMateriaLegislativa.objects.all(),\n empty_label='Selecione',\n )\n\n numero_materia = forms.CharField(\n label='N\u00famero Mat\u00e9ria', required=True)\n\n ano_materia = forms.CharField(\n label='Ano Mat\u00e9ria',\n initial=int(data_atual.year),\n required=True)\n\n class Meta:\n model = AudienciaPublica\n fields = ['tipo', 'numero', 'nome',\n 'tema', 'data', 'hora_inicio', 'hora_fim',\n 'observacao', 'audiencia_cancelada', 'url_audio',\n 'url_video', 'upload_pauta', 'upload_ata',\n 'upload_anexo', 'tipo_materia', 'numero_materia',\n 'ano_materia']\n\n\n def __init__(self, **kwargs):\n super(AudienciaForm, self).__init__(**kwargs)\n\n tipos = []\n\n if not self.fields['tipo'].queryset:\n tipos.append(TipoAudienciaPublica.objects.create(nome='Audi\u00eancia P\u00fablica', tipo='A'))\n tipos.append(TipoAudienciaPublica.objects.create(nome='Plebiscito', tipo='P'))\n tipos.append(TipoAudienciaPublica.objects.create(nome='Referendo', tipo='R'))\n tipos.append(TipoAudienciaPublica.objects.create(nome='Iniciativa Popular', tipo='I'))\n\n for t in tipos:\n t.save()\n\n\n def clean(self):\n cleaned_data = super(AudienciaForm, self).clean()\n if not self.is_valid():\n return cleaned_data\n\n try:\n materia = MateriaLegislativa.objects.get(\n numero=self.cleaned_data['numero_materia'],\n ano=self.cleaned_data['ano_materia'],\n tipo=self.cleaned_data['tipo_materia'])\n except ObjectDoesNotExist:\n msg = _('A mat\u00e9ria a ser inclusa n\u00e3o existe no cadastro'\n ' de mat\u00e9rias legislativas.')\n raise ValidationError(msg)\n else:\n cleaned_data['materia'] = materia\n\n if self.cleaned_data['hora_inicio'] and self.cleaned_data['hora_fim']:\n if (self.cleaned_data['hora_fim'] <\n self.cleaned_data['hora_inicio']):\n msg = _('A hora de fim n\u00e3o pode ser anterior a hora de \u00ednicio')\n raise ValidationError(msg)\n\n return self.cleaned_data\n\n @transaction.atomic()\n def save(self, commit=True):\n audiencia = super(AudienciaForm, self).save(commit)\n return audiencia", "path": "sapl/audiencia/forms.py"}]}
1,467
170
gh_patches_debug_31210
rasdani/github-patches
git_diff
ansible__ansible-lint-480
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E202 (no leading zero for octal permission) False positive # Issue Type - Bug report # Ansible and Ansible Lint details - ansible-lint installation method: Ansible Galaxy use of ansible-lint # Desired Behaviour ansible-lint provides correct reason to following the lint # Actual Behaviour (Bug report only) Please give some details of what is actually happening. Include a [minimum complete verifiable example](http://stackoverflow.com/help/mcve) with: - playbook - output of running ansible-lint - if you're getting a stack trace, output of `ansible-playbook --syntax-check playbook` "Numeric file permissions without leading zero can behave in unexpected ways. See http://docs.ansible.com/ansible/file_module.html" - https://docs.ansible.com/ansible-lint/rules/default_rules.html ./galaxy/downloads/tmp2REDBx/tasks/main-tasks.yml:4: [E202] Octal file permissions must contain leading zero ``` # Lets create the configuration first... # avoid locking ourself out. - name: Create firewalld config directories file: dest: "{{ item }}" state: directory mode: "750" with_items: - /etc/firewalld - /etc/firewalld/zones notify: firewalld__reload ``` It is not correct that "750" is mis-interpreted. "750" is a string, and is correctly interpreted as the octal permission number `750` i.e. `rwxr-x---`. Personally I have been using the string "750", rather than remember that YAML supports octal numeric literals using a leading zero. </issue> <code> [start of lib/ansiblelint/rules/OctalPermissionsRule.py] 1 # Copyright (c) 2013-2014 Will Thames <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 21 from ansiblelint import AnsibleLintRule 22 import re 23 import six 24 25 26 class OctalPermissionsRule(AnsibleLintRule): 27 id = '202' 28 shortdesc = 'Octal file permissions must contain leading zero' 29 description = ( 30 'Numeric file permissions without leading zero can behave ' 31 'in unexpected ways. See ' 32 'http://docs.ansible.com/ansible/file_module.html' 33 ) 34 severity = 'VERY_HIGH' 35 tags = ['formatting', 'ANSIBLE0009'] 36 version_added = 'historic' 37 38 _modules = ['assemble', 'copy', 'file', 'ini_file', 'lineinfile', 39 'replace', 'synchronize', 'template', 'unarchive'] 40 41 mode_regex = re.compile(r'^\s*[0-9]+\s*$') 42 valid_mode_regex = re.compile(r'^\s*0[0-7]{3,4}\s*$') 43 44 def is_invalid_permission(self, mode): 45 # sensible file permission modes don't 46 # have write bit set when read bit is 47 # not set and don't have execute bit set 48 # when user execute bit is not set. 49 # also, user permissions are more generous than 50 # group permissions and user and group permissions 51 # are more generous than world permissions 52 53 other_write_without_read = (mode % 8 and mode % 8 < 4 and 54 not (mode % 8 == 1 and (mode >> 6) % 2 == 1)) 55 group_write_without_read = ((mode >> 3) % 8 and (mode >> 3) % 8 < 4 and 56 not ((mode >> 3) % 8 == 1 and (mode >> 6) % 2 == 1)) 57 user_write_without_read = ((mode >> 6) % 8 and (mode >> 6) % 8 < 4 and 58 not (mode >> 6) % 8 == 1) 59 other_more_generous_than_group = mode % 8 > (mode >> 3) % 8 60 other_more_generous_than_user = mode % 8 > (mode >> 6) % 8 61 group_more_generous_than_user = (mode >> 3) % 8 > (mode >> 6) % 8 62 63 return (other_write_without_read or 64 group_write_without_read or 65 user_write_without_read or 66 other_more_generous_than_group or 67 other_more_generous_than_user or 68 group_more_generous_than_user) 69 70 def matchtask(self, file, task): 71 if task["action"]["__ansible_module__"] in self._modules: 72 mode = task['action'].get('mode', None) 73 if isinstance(mode, six.string_types) and self.mode_regex.match(mode): 74 return not self.valid_mode_regex.match(mode) 75 if isinstance(mode, int): 76 return self.is_invalid_permission(mode) 77 [end of lib/ansiblelint/rules/OctalPermissionsRule.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansiblelint/rules/OctalPermissionsRule.py b/lib/ansiblelint/rules/OctalPermissionsRule.py --- a/lib/ansiblelint/rules/OctalPermissionsRule.py +++ b/lib/ansiblelint/rules/OctalPermissionsRule.py @@ -19,13 +19,12 @@ # THE SOFTWARE. from ansiblelint import AnsibleLintRule -import re import six class OctalPermissionsRule(AnsibleLintRule): id = '202' - shortdesc = 'Octal file permissions must contain leading zero' + shortdesc = 'Octal file permissions must contain leading zero or be a string' description = ( 'Numeric file permissions without leading zero can behave ' 'in unexpected ways. See ' @@ -38,9 +37,6 @@ _modules = ['assemble', 'copy', 'file', 'ini_file', 'lineinfile', 'replace', 'synchronize', 'template', 'unarchive'] - mode_regex = re.compile(r'^\s*[0-9]+\s*$') - valid_mode_regex = re.compile(r'^\s*0[0-7]{3,4}\s*$') - def is_invalid_permission(self, mode): # sensible file permission modes don't # have write bit set when read bit is @@ -70,7 +66,9 @@ def matchtask(self, file, task): if task["action"]["__ansible_module__"] in self._modules: mode = task['action'].get('mode', None) - if isinstance(mode, six.string_types) and self.mode_regex.match(mode): - return not self.valid_mode_regex.match(mode) + + if isinstance(mode, six.string_types): + return False + if isinstance(mode, int): return self.is_invalid_permission(mode)
{"golden_diff": "diff --git a/lib/ansiblelint/rules/OctalPermissionsRule.py b/lib/ansiblelint/rules/OctalPermissionsRule.py\n--- a/lib/ansiblelint/rules/OctalPermissionsRule.py\n+++ b/lib/ansiblelint/rules/OctalPermissionsRule.py\n@@ -19,13 +19,12 @@\n # THE SOFTWARE.\n \n from ansiblelint import AnsibleLintRule\n-import re\n import six\n \n \n class OctalPermissionsRule(AnsibleLintRule):\n id = '202'\n- shortdesc = 'Octal file permissions must contain leading zero'\n+ shortdesc = 'Octal file permissions must contain leading zero or be a string'\n description = (\n 'Numeric file permissions without leading zero can behave '\n 'in unexpected ways. See '\n@@ -38,9 +37,6 @@\n _modules = ['assemble', 'copy', 'file', 'ini_file', 'lineinfile',\n 'replace', 'synchronize', 'template', 'unarchive']\n \n- mode_regex = re.compile(r'^\\s*[0-9]+\\s*$')\n- valid_mode_regex = re.compile(r'^\\s*0[0-7]{3,4}\\s*$')\n-\n def is_invalid_permission(self, mode):\n # sensible file permission modes don't\n # have write bit set when read bit is\n@@ -70,7 +66,9 @@\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in self._modules:\n mode = task['action'].get('mode', None)\n- if isinstance(mode, six.string_types) and self.mode_regex.match(mode):\n- return not self.valid_mode_regex.match(mode)\n+\n+ if isinstance(mode, six.string_types):\n+ return False\n+\n if isinstance(mode, int):\n return self.is_invalid_permission(mode)\n", "issue": "E202 (no leading zero for octal permission) False positive\n# Issue Type\r\n- Bug report\r\n\r\n# Ansible and Ansible Lint details\r\n\r\n- ansible-lint installation method: Ansible Galaxy use of ansible-lint\r\n\r\n# Desired Behaviour\r\n\r\nansible-lint provides correct reason to following the lint\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nPlease give some details of what is actually happening.\r\nInclude a [minimum complete verifiable example](http://stackoverflow.com/help/mcve)\r\nwith:\r\n- playbook\r\n- output of running ansible-lint\r\n- if you're getting a stack trace, output of\r\n `ansible-playbook --syntax-check playbook`\r\n\r\n\"Numeric file permissions without leading zero can behave in unexpected ways. See http://docs.ansible.com/ansible/file_module.html\" - https://docs.ansible.com/ansible-lint/rules/default_rules.html\r\n\r\n./galaxy/downloads/tmp2REDBx/tasks/main-tasks.yml:4: [E202] Octal file permissions must contain leading zero\r\n\r\n```\r\n# Lets create the configuration first...\r\n# avoid locking ourself out.\r\n\r\n- name: Create firewalld config directories\r\n file:\r\n dest: \"{{ item }}\"\r\n state: directory\r\n mode: \"750\"\r\n with_items:\r\n - /etc/firewalld\r\n - /etc/firewalld/zones\r\n notify: firewalld__reload\r\n```\r\n\r\nIt is not correct that \"750\" is mis-interpreted. \"750\" is a string, and is correctly interpreted as the octal permission number `750` i.e. `rwxr-x---`.\r\n\r\nPersonally I have been using the string \"750\", rather than remember that YAML supports octal numeric literals using a leading zero.\n", "before_files": [{"content": "# Copyright (c) 2013-2014 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nimport re\nimport six\n\n\nclass OctalPermissionsRule(AnsibleLintRule):\n id = '202'\n shortdesc = 'Octal file permissions must contain leading zero'\n description = (\n 'Numeric file permissions without leading zero can behave '\n 'in unexpected ways. See '\n 'http://docs.ansible.com/ansible/file_module.html'\n )\n severity = 'VERY_HIGH'\n tags = ['formatting', 'ANSIBLE0009']\n version_added = 'historic'\n\n _modules = ['assemble', 'copy', 'file', 'ini_file', 'lineinfile',\n 'replace', 'synchronize', 'template', 'unarchive']\n\n mode_regex = re.compile(r'^\\s*[0-9]+\\s*$')\n valid_mode_regex = re.compile(r'^\\s*0[0-7]{3,4}\\s*$')\n\n def is_invalid_permission(self, mode):\n # sensible file permission modes don't\n # have write bit set when read bit is\n # not set and don't have execute bit set\n # when user execute bit is not set.\n # also, user permissions are more generous than\n # group permissions and user and group permissions\n # are more generous than world permissions\n\n other_write_without_read = (mode % 8 and mode % 8 < 4 and\n not (mode % 8 == 1 and (mode >> 6) % 2 == 1))\n group_write_without_read = ((mode >> 3) % 8 and (mode >> 3) % 8 < 4 and\n not ((mode >> 3) % 8 == 1 and (mode >> 6) % 2 == 1))\n user_write_without_read = ((mode >> 6) % 8 and (mode >> 6) % 8 < 4 and\n not (mode >> 6) % 8 == 1)\n other_more_generous_than_group = mode % 8 > (mode >> 3) % 8\n other_more_generous_than_user = mode % 8 > (mode >> 6) % 8\n group_more_generous_than_user = (mode >> 3) % 8 > (mode >> 6) % 8\n\n return (other_write_without_read or\n group_write_without_read or\n user_write_without_read or\n other_more_generous_than_group or\n other_more_generous_than_user or\n group_more_generous_than_user)\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in self._modules:\n mode = task['action'].get('mode', None)\n if isinstance(mode, six.string_types) and self.mode_regex.match(mode):\n return not self.valid_mode_regex.match(mode)\n if isinstance(mode, int):\n return self.is_invalid_permission(mode)\n", "path": "lib/ansiblelint/rules/OctalPermissionsRule.py"}]}
1,943
399
gh_patches_debug_32269
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-491
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add mypy plugin for strawberry.union We have a function to create union types and also add name and description that are used when generating the GraphQL schema[1]. Unfortunately MyPy complains when using the annotation, since it doesn't recognise it as a type, here's an example: ```python ExampleUnion = strawberry.union( "ExampleUnion", types=( ABC, CDE, ), ) @strawberry.mutation def abc(phone_number: str) -> ExampleUnion: # ... ``` Error: ``` Variable "api.mutations.abc.ExampleUnion" is not valid as a type ``` [1] This is why we can't use typing.Union directly, as we can't pass arguments to it </issue> <code> [start of strawberry/ext/mypy_plugin.py] 1 from typing import Callable, Optional 2 3 from mypy.plugin import AnalyzeTypeContext, ClassDefContext, Plugin 4 from mypy.plugins import dataclasses 5 from mypy.types import Type 6 7 8 def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type: 9 type_name = ctx.type.args[0] 10 type_ = ctx.api.analyze_type(type_name) 11 12 return type_ 13 14 15 def private_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type: 16 type_name = ctx.type.args[0] 17 type_ = ctx.api.analyze_type(type_name) 18 19 return type_ 20 21 22 class StrawberryPlugin(Plugin): 23 def get_type_analyze_hook(self, fullname: str): 24 if fullname == "strawberry.lazy_type.LazyType": 25 return lazy_type_analyze_callback 26 27 if any( 28 name in fullname 29 for name in {"strawberry.private.Private", "strawberry.Private"} 30 ): 31 return private_type_analyze_callback 32 33 return None 34 35 def get_class_decorator_hook( 36 self, fullname: str 37 ) -> Optional[Callable[[ClassDefContext], None]]: 38 if any( 39 strawberry_decorator in fullname 40 for strawberry_decorator in { 41 "strawberry.type", 42 "strawberry.federation.type", 43 "strawberry.input", 44 "strawberry.interface", 45 } 46 ): 47 return dataclasses.dataclass_class_maker_callback 48 return None 49 50 51 def plugin(version: str): 52 return StrawberryPlugin 53 [end of strawberry/ext/mypy_plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/ext/mypy_plugin.py b/strawberry/ext/mypy_plugin.py --- a/strawberry/ext/mypy_plugin.py +++ b/strawberry/ext/mypy_plugin.py @@ -1,8 +1,23 @@ from typing import Callable, Optional -from mypy.plugin import AnalyzeTypeContext, ClassDefContext, Plugin +from mypy.nodes import ( + GDEF, + Expression, + IndexExpr, + NameExpr, + SymbolTableNode, + TupleExpr, + TypeAlias, +) +from mypy.plugin import ( + AnalyzeTypeContext, + ClassDefContext, + DynamicClassDefContext, + Plugin, + SemanticAnalyzerPluginInterface, +) from mypy.plugins import dataclasses -from mypy.types import Type +from mypy.types import Type, UnionType def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type: @@ -19,7 +34,48 @@ return type_ +def _get_type_for_expr(expr: Expression, api: SemanticAnalyzerPluginInterface): + if isinstance(expr, NameExpr): + return api.named_type(expr.name) + + if isinstance(expr, IndexExpr): + type_ = _get_type_for_expr(expr.base, api) + type_.args = [_get_type_for_expr(expr.index, api)] + + return type_ + + raise ValueError(f"Unsupported expression f{type(expr)}") + + +def union_hook(ctx: DynamicClassDefContext) -> None: + types = ctx.call.args[1] + + if isinstance(types, TupleExpr): + type_ = UnionType(tuple(_get_type_for_expr(x, ctx.api) for x in types.items)) + + type_alias = TypeAlias( + type_, + fullname=ctx.api.qualified_name(ctx.name), + line=ctx.call.line, + column=ctx.call.column, + ) + + ctx.api.add_symbol_table_node( + ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False) + ) + + class StrawberryPlugin(Plugin): + def get_dynamic_class_hook( + self, fullname: str + ) -> Optional[Callable[[DynamicClassDefContext], None]]: + # TODO: investigate why we need this instead of `strawberry.union.union` on CI + # we have the same issue in the other hooks + if "strawberry.union" in fullname: + return union_hook + + return None + def get_type_analyze_hook(self, fullname: str): if fullname == "strawberry.lazy_type.LazyType": return lazy_type_analyze_callback
{"golden_diff": "diff --git a/strawberry/ext/mypy_plugin.py b/strawberry/ext/mypy_plugin.py\n--- a/strawberry/ext/mypy_plugin.py\n+++ b/strawberry/ext/mypy_plugin.py\n@@ -1,8 +1,23 @@\n from typing import Callable, Optional\n \n-from mypy.plugin import AnalyzeTypeContext, ClassDefContext, Plugin\n+from mypy.nodes import (\n+ GDEF,\n+ Expression,\n+ IndexExpr,\n+ NameExpr,\n+ SymbolTableNode,\n+ TupleExpr,\n+ TypeAlias,\n+)\n+from mypy.plugin import (\n+ AnalyzeTypeContext,\n+ ClassDefContext,\n+ DynamicClassDefContext,\n+ Plugin,\n+ SemanticAnalyzerPluginInterface,\n+)\n from mypy.plugins import dataclasses\n-from mypy.types import Type\n+from mypy.types import Type, UnionType\n \n \n def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n@@ -19,7 +34,48 @@\n return type_\n \n \n+def _get_type_for_expr(expr: Expression, api: SemanticAnalyzerPluginInterface):\n+ if isinstance(expr, NameExpr):\n+ return api.named_type(expr.name)\n+\n+ if isinstance(expr, IndexExpr):\n+ type_ = _get_type_for_expr(expr.base, api)\n+ type_.args = [_get_type_for_expr(expr.index, api)]\n+\n+ return type_\n+\n+ raise ValueError(f\"Unsupported expression f{type(expr)}\")\n+\n+\n+def union_hook(ctx: DynamicClassDefContext) -> None:\n+ types = ctx.call.args[1]\n+\n+ if isinstance(types, TupleExpr):\n+ type_ = UnionType(tuple(_get_type_for_expr(x, ctx.api) for x in types.items))\n+\n+ type_alias = TypeAlias(\n+ type_,\n+ fullname=ctx.api.qualified_name(ctx.name),\n+ line=ctx.call.line,\n+ column=ctx.call.column,\n+ )\n+\n+ ctx.api.add_symbol_table_node(\n+ ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)\n+ )\n+\n+\n class StrawberryPlugin(Plugin):\n+ def get_dynamic_class_hook(\n+ self, fullname: str\n+ ) -> Optional[Callable[[DynamicClassDefContext], None]]:\n+ # TODO: investigate why we need this instead of `strawberry.union.union` on CI\n+ # we have the same issue in the other hooks\n+ if \"strawberry.union\" in fullname:\n+ return union_hook\n+\n+ return None\n+\n def get_type_analyze_hook(self, fullname: str):\n if fullname == \"strawberry.lazy_type.LazyType\":\n return lazy_type_analyze_callback\n", "issue": "Add mypy plugin for strawberry.union\nWe have a function to create union types and also add name and description that are used when generating the GraphQL schema[1].\r\n\r\nUnfortunately MyPy complains when using the annotation, since it doesn't recognise it as a type, here's an example:\r\n\r\n```python\r\nExampleUnion = strawberry.union(\r\n \"ExampleUnion\",\r\n types=(\r\n ABC,\r\n CDE,\r\n ),\r\n)\r\n\r\[email protected]\r\ndef abc(phone_number: str) -> ExampleUnion:\r\n # ...\r\n```\r\n\r\nError:\r\n\r\n```\r\nVariable \"api.mutations.abc.ExampleUnion\" is not valid as a type\r\n```\r\n\r\n[1] This is why we can't use typing.Union directly, as we can't pass arguments to it\n", "before_files": [{"content": "from typing import Callable, Optional\n\nfrom mypy.plugin import AnalyzeTypeContext, ClassDefContext, Plugin\nfrom mypy.plugins import dataclasses\nfrom mypy.types import Type\n\n\ndef lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n type_name = ctx.type.args[0]\n type_ = ctx.api.analyze_type(type_name)\n\n return type_\n\n\ndef private_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:\n type_name = ctx.type.args[0]\n type_ = ctx.api.analyze_type(type_name)\n\n return type_\n\n\nclass StrawberryPlugin(Plugin):\n def get_type_analyze_hook(self, fullname: str):\n if fullname == \"strawberry.lazy_type.LazyType\":\n return lazy_type_analyze_callback\n\n if any(\n name in fullname\n for name in {\"strawberry.private.Private\", \"strawberry.Private\"}\n ):\n return private_type_analyze_callback\n\n return None\n\n def get_class_decorator_hook(\n self, fullname: str\n ) -> Optional[Callable[[ClassDefContext], None]]:\n if any(\n strawberry_decorator in fullname\n for strawberry_decorator in {\n \"strawberry.type\",\n \"strawberry.federation.type\",\n \"strawberry.input\",\n \"strawberry.interface\",\n }\n ):\n return dataclasses.dataclass_class_maker_callback\n return None\n\n\ndef plugin(version: str):\n return StrawberryPlugin\n", "path": "strawberry/ext/mypy_plugin.py"}]}
1,111
595
gh_patches_debug_4568
rasdani/github-patches
git_diff
medtagger__MedTagger-466
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add information about Dataset to Tasks endpoint ## Current Behavior Tasks endpoint returns list of all Tasks **without** information about their Datasets. ## Expected Behavior Each Task returned by this endpoint should also contain information about its Dataset. ## Tasks to do - [ ] Add Dataset key to the Task returned by Tasks endpoint. ## Additional comment Dataset key will be fine only if there will be an endpoint that will return all Datasets available in MedTagger. </issue> <code> [start of backend/medtagger/api/tasks/serializers.py] 1 """Module responsible for storage of serializers used in Tasks endpoints.""" 2 from flask_restplus import fields 3 4 from medtagger.api import api 5 from medtagger.definitions import LabelTool 6 7 out__label_tag = api.model('Label Tag model', { 8 'key': fields.String(), 9 'name': fields.String(), 10 'actions_ids': fields.List(fields.Integer(), 11 attribute=lambda label_tag: [action.id for action in label_tag.actions]), 12 'tools': fields.List(fields.String(), description='Available tools for Label Tag', 13 enum=[tool.name for tool in LabelTool], 14 attribute=lambda label_tag: [tool.name for tool in label_tag.tools]), 15 }) 16 17 in__label_tag = api.model('Label Tag model', { 18 'key': fields.String(), 19 'name': fields.String(), 20 'actions_ids': fields.List(fields.Integer()), 21 'tools': fields.List(fields.String(), description='Available tools for Label Tag', 22 enum=[tool.name for tool in LabelTool]), 23 }) 24 25 out__task = api.model('Task model', { 26 'key': fields.String(), 27 'name': fields.String(), 28 'image_path': fields.String(), 29 'tags': fields.List(fields.Nested(out__label_tag), attribute='available_tags'), 30 }) 31 32 in__task = api.model('New Task model', { 33 'key': fields.String(), 34 'name': fields.String(), 35 'image_path': fields.String(), 36 'datasets_keys': fields.List(fields.String()), 37 'tags': fields.List(fields.Nested(in__label_tag), attribute='available_tags'), 38 }) 39 [end of backend/medtagger/api/tasks/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend/medtagger/api/tasks/serializers.py b/backend/medtagger/api/tasks/serializers.py --- a/backend/medtagger/api/tasks/serializers.py +++ b/backend/medtagger/api/tasks/serializers.py @@ -27,6 +27,7 @@ 'name': fields.String(), 'image_path': fields.String(), 'tags': fields.List(fields.Nested(out__label_tag), attribute='available_tags'), + 'datasets_keys': fields.List(fields.String(), attribute=lambda task: [dataset.key for dataset in task.datasets]), }) in__task = api.model('New Task model', {
{"golden_diff": "diff --git a/backend/medtagger/api/tasks/serializers.py b/backend/medtagger/api/tasks/serializers.py\n--- a/backend/medtagger/api/tasks/serializers.py\n+++ b/backend/medtagger/api/tasks/serializers.py\n@@ -27,6 +27,7 @@\n 'name': fields.String(),\n 'image_path': fields.String(),\n 'tags': fields.List(fields.Nested(out__label_tag), attribute='available_tags'),\n+ 'datasets_keys': fields.List(fields.String(), attribute=lambda task: [dataset.key for dataset in task.datasets]),\n })\n \n in__task = api.model('New Task model', {\n", "issue": "Add information about Dataset to Tasks endpoint\n## Current Behavior\r\n\r\nTasks endpoint returns list of all Tasks **without** information about their Datasets.\r\n\r\n## Expected Behavior\r\n\r\nEach Task returned by this endpoint should also contain information about its Dataset.\r\n\r\n## Tasks to do\r\n\r\n- [ ] Add Dataset key to the Task returned by Tasks endpoint.\r\n\r\n## Additional comment\r\n\r\nDataset key will be fine only if there will be an endpoint that will return all Datasets available in MedTagger.\n", "before_files": [{"content": "\"\"\"Module responsible for storage of serializers used in Tasks endpoints.\"\"\"\nfrom flask_restplus import fields\n\nfrom medtagger.api import api\nfrom medtagger.definitions import LabelTool\n\nout__label_tag = api.model('Label Tag model', {\n 'key': fields.String(),\n 'name': fields.String(),\n 'actions_ids': fields.List(fields.Integer(),\n attribute=lambda label_tag: [action.id for action in label_tag.actions]),\n 'tools': fields.List(fields.String(), description='Available tools for Label Tag',\n enum=[tool.name for tool in LabelTool],\n attribute=lambda label_tag: [tool.name for tool in label_tag.tools]),\n})\n\nin__label_tag = api.model('Label Tag model', {\n 'key': fields.String(),\n 'name': fields.String(),\n 'actions_ids': fields.List(fields.Integer()),\n 'tools': fields.List(fields.String(), description='Available tools for Label Tag',\n enum=[tool.name for tool in LabelTool]),\n})\n\nout__task = api.model('Task model', {\n 'key': fields.String(),\n 'name': fields.String(),\n 'image_path': fields.String(),\n 'tags': fields.List(fields.Nested(out__label_tag), attribute='available_tags'),\n})\n\nin__task = api.model('New Task model', {\n 'key': fields.String(),\n 'name': fields.String(),\n 'image_path': fields.String(),\n 'datasets_keys': fields.List(fields.String()),\n 'tags': fields.List(fields.Nested(in__label_tag), attribute='available_tags'),\n})\n", "path": "backend/medtagger/api/tasks/serializers.py"}]}
1,033
141
gh_patches_debug_29120
rasdani/github-patches
git_diff
OCA__social-262
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [10.0] mail_sendrid, Error: No module named simplejson > File ".../addons/oca-social/mail_sendgrid/controllers/json_request.py", line 4, in <module> > import simplejson > ImportError: No module named simplejson I put `simplejson` into file requirements.txt and it solved this issue. </issue> <code> [start of mail_sendgrid/controllers/json_request.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2016-2017 Compassion CH (http://www.compassion.ch) 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). 4 import simplejson 5 6 from odoo.http import JsonRequest, Root, Response 7 8 # Monkeypatch type of request rooter to use RESTJsonRequest 9 old_get_request = Root.get_request 10 11 12 def get_request(self, httprequest): 13 if (httprequest.mimetype == "application/json" and 14 httprequest.environ['PATH_INFO'].startswith('/mail')): 15 return RESTJsonRequest(httprequest) 16 return old_get_request(self, httprequest) 17 18 19 Root.get_request = get_request 20 21 22 class RESTJsonRequest(JsonRequest): 23 """ Special RestJson Handler to enable receiving lists in JSON 24 body 25 """ 26 def __init__(self, *args): 27 try: 28 super(RESTJsonRequest, self).__init__(*args) 29 except AttributeError: 30 # The JSON may contain a list 31 self.params = dict() 32 self.context = dict(self.session.context) 33 34 def _json_response(self, result=None, error=None): 35 response = {} 36 if error is not None: 37 response['error'] = error 38 if result is not None: 39 response['result'] = result 40 41 mime = 'application/json' 42 body = simplejson.dumps(response) 43 44 return Response( 45 body, headers=[('Content-Type', mime), 46 ('Content-Length', len(body))]) 47 [end of mail_sendgrid/controllers/json_request.py] [start of mail_sendgrid/__manifest__.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2015-2017 Compassion CH (http://www.compassion.ch) 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). 4 { 5 'name': 'SendGrid', 6 'version': '10.0.1.0.0', 7 'category': 'Social Network', 8 'author': 'Compassion CH, Odoo Community Association (OCA)', 9 'license': 'AGPL-3', 10 'website': 'https://github.com/OCA/social', 11 'depends': ['mail_tracking'], 12 'data': [ 13 'security/ir.model.access.csv', 14 'views/sendgrid_email_view.xml', 15 'views/sendgrid_template_view.xml', 16 'views/mail_compose_message_view.xml', 17 'views/email_template_view.xml', 18 ], 19 'demo': [], 20 'installable': True, 21 'auto_install': False, 22 'external_dependencies': { 23 'python': ['sendgrid'], 24 }, 25 } 26 [end of mail_sendgrid/__manifest__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mail_sendgrid/__manifest__.py b/mail_sendgrid/__manifest__.py --- a/mail_sendgrid/__manifest__.py +++ b/mail_sendgrid/__manifest__.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- -# Copyright 2015-2017 Compassion CH (http://www.compassion.ch) +# Copyright 2015-2018 Compassion CH (http://www.compassion.ch) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). { 'name': 'SendGrid', - 'version': '10.0.1.0.0', + 'version': '10.0.1.0.1', 'category': 'Social Network', 'author': 'Compassion CH, Odoo Community Association (OCA)', 'license': 'AGPL-3', @@ -20,6 +20,6 @@ 'installable': True, 'auto_install': False, 'external_dependencies': { - 'python': ['sendgrid'], + 'python': ['sendgrid', 'simplejson'], }, } diff --git a/mail_sendgrid/controllers/json_request.py b/mail_sendgrid/controllers/json_request.py --- a/mail_sendgrid/controllers/json_request.py +++ b/mail_sendgrid/controllers/json_request.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright 2016-2017 Compassion CH (http://www.compassion.ch) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). -import simplejson - +import logging from odoo.http import JsonRequest, Root, Response # Monkeypatch type of request rooter to use RESTJsonRequest old_get_request = Root.get_request +_logger = logging.getLogger(__name__) + +try: + import simplejson +except ImportError: + _logger.error("Please install simplejson tu use mail_sendgrid module") + _logger.debug("ImportError details:", exc_info=True) def get_request(self, httprequest):
{"golden_diff": "diff --git a/mail_sendgrid/__manifest__.py b/mail_sendgrid/__manifest__.py\n--- a/mail_sendgrid/__manifest__.py\n+++ b/mail_sendgrid/__manifest__.py\n@@ -1,9 +1,9 @@\n # -*- coding: utf-8 -*-\n-# Copyright 2015-2017 Compassion CH (http://www.compassion.ch)\n+# Copyright 2015-2018 Compassion CH (http://www.compassion.ch)\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n {\n 'name': 'SendGrid',\n- 'version': '10.0.1.0.0',\n+ 'version': '10.0.1.0.1',\n 'category': 'Social Network',\n 'author': 'Compassion CH, Odoo Community Association (OCA)',\n 'license': 'AGPL-3',\n@@ -20,6 +20,6 @@\n 'installable': True,\n 'auto_install': False,\n 'external_dependencies': {\n- 'python': ['sendgrid'],\n+ 'python': ['sendgrid', 'simplejson'],\n },\n }\ndiff --git a/mail_sendgrid/controllers/json_request.py b/mail_sendgrid/controllers/json_request.py\n--- a/mail_sendgrid/controllers/json_request.py\n+++ b/mail_sendgrid/controllers/json_request.py\n@@ -1,12 +1,18 @@\n # -*- coding: utf-8 -*-\n # Copyright 2016-2017 Compassion CH (http://www.compassion.ch)\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n-import simplejson\n-\n+import logging\n from odoo.http import JsonRequest, Root, Response\n \n # Monkeypatch type of request rooter to use RESTJsonRequest\n old_get_request = Root.get_request\n+_logger = logging.getLogger(__name__)\n+\n+try:\n+ import simplejson\n+except ImportError:\n+ _logger.error(\"Please install simplejson tu use mail_sendgrid module\")\n+ _logger.debug(\"ImportError details:\", exc_info=True)\n \n \n def get_request(self, httprequest):\n", "issue": "[10.0] mail_sendrid, Error: No module named simplejson\n> File \".../addons/oca-social/mail_sendgrid/controllers/json_request.py\", line 4, in <module>\r\n> import simplejson\r\n> ImportError: No module named simplejson\r\n\r\nI put `simplejson` into file requirements.txt and it solved this issue.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2017 Compassion CH (http://www.compassion.ch)\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\nimport simplejson\n\nfrom odoo.http import JsonRequest, Root, Response\n\n# Monkeypatch type of request rooter to use RESTJsonRequest\nold_get_request = Root.get_request\n\n\ndef get_request(self, httprequest):\n if (httprequest.mimetype == \"application/json\" and\n httprequest.environ['PATH_INFO'].startswith('/mail')):\n return RESTJsonRequest(httprequest)\n return old_get_request(self, httprequest)\n\n\nRoot.get_request = get_request\n\n\nclass RESTJsonRequest(JsonRequest):\n \"\"\" Special RestJson Handler to enable receiving lists in JSON\n body\n \"\"\"\n def __init__(self, *args):\n try:\n super(RESTJsonRequest, self).__init__(*args)\n except AttributeError:\n # The JSON may contain a list\n self.params = dict()\n self.context = dict(self.session.context)\n\n def _json_response(self, result=None, error=None):\n response = {}\n if error is not None:\n response['error'] = error\n if result is not None:\n response['result'] = result\n\n mime = 'application/json'\n body = simplejson.dumps(response)\n\n return Response(\n body, headers=[('Content-Type', mime),\n ('Content-Length', len(body))])\n", "path": "mail_sendgrid/controllers/json_request.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2015-2017 Compassion CH (http://www.compassion.ch)\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n{\n 'name': 'SendGrid',\n 'version': '10.0.1.0.0',\n 'category': 'Social Network',\n 'author': 'Compassion CH, Odoo Community Association (OCA)',\n 'license': 'AGPL-3',\n 'website': 'https://github.com/OCA/social',\n 'depends': ['mail_tracking'],\n 'data': [\n 'security/ir.model.access.csv',\n 'views/sendgrid_email_view.xml',\n 'views/sendgrid_template_view.xml',\n 'views/mail_compose_message_view.xml',\n 'views/email_template_view.xml',\n ],\n 'demo': [],\n 'installable': True,\n 'auto_install': False,\n 'external_dependencies': {\n 'python': ['sendgrid'],\n },\n}\n", "path": "mail_sendgrid/__manifest__.py"}]}
1,308
472
gh_patches_debug_27142
rasdani/github-patches
git_diff
Netflix__lemur-302
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Chain Certificate is not exporting Hi Team, While extracting .p12 formatted file (which was exported from Lemur) we cannot find the Chain file on the same. Could you please let us know if we need to perform any additional step to download the .p12 along with the chain. Thanks, Akash John </issue> <code> [start of lemur/plugins/lemur_openssl/plugin.py] 1 """ 2 .. module: lemur.plugins.lemur_openssl.plugin 3 :platform: Unix 4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more 5 :license: Apache, see LICENSE for more details. 6 7 .. moduleauthor:: Kevin Glisson <[email protected]> 8 """ 9 import subprocess 10 11 from flask import current_app 12 13 from lemur.utils import mktempfile, mktemppath 14 from lemur.plugins.bases import ExportPlugin 15 from lemur.plugins import lemur_openssl as openssl 16 from lemur.common.utils import get_psuedo_random_string 17 18 19 def run_process(command): 20 """ 21 Runs a given command with pOpen and wraps some 22 error handling around it. 23 :param command: 24 :return: 25 """ 26 p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 27 current_app.logger.debug(command) 28 stdout, stderr = p.communicate() 29 30 if p.returncode != 0: 31 current_app.logger.debug(" ".join(command)) 32 current_app.logger.error(stderr) 33 raise Exception(stderr) 34 35 36 def create_pkcs12(cert, p12_tmp, key, alias, passphrase): 37 """ 38 Creates a pkcs12 formated file. 39 :param cert: 40 :param jks_tmp: 41 :param key: 42 :param alias: 43 :param passphrase: 44 """ 45 with mktempfile() as key_tmp: 46 with open(key_tmp, 'w') as f: 47 f.write(key) 48 49 # Create PKCS12 keystore from private key and public certificate 50 with mktempfile() as cert_tmp: 51 with open(cert_tmp, 'w') as f: 52 f.write(cert) 53 54 run_process([ 55 "openssl", 56 "pkcs12", 57 "-export", 58 "-name", alias, 59 "-in", cert_tmp, 60 "-inkey", key_tmp, 61 "-out", p12_tmp, 62 "-password", "pass:{}".format(passphrase) 63 ]) 64 65 66 class OpenSSLExportPlugin(ExportPlugin): 67 title = 'OpenSSL' 68 slug = 'openssl-export' 69 description = 'Is a loose interface to openssl and support various formats' 70 version = openssl.VERSION 71 72 author = 'Kevin Glisson' 73 author_url = 'https://github.com/netflix/lemur' 74 75 options = [ 76 { 77 'name': 'type', 78 'type': 'select', 79 'required': True, 80 'available': ['PKCS12 (.p12)'], 81 'helpMessage': 'Choose the format you wish to export', 82 }, 83 { 84 'name': 'passphrase', 85 'type': 'str', 86 'required': False, 87 'helpMessage': 'If no passphrase is given one will be generated for you, we highly recommend this. Minimum length is 8.', 88 'validation': '' 89 }, 90 { 91 'name': 'alias', 92 'type': 'str', 93 'required': False, 94 'helpMessage': 'Enter the alias you wish to use for the keystore.', 95 } 96 ] 97 98 def export(self, body, chain, key, options, **kwargs): 99 """ 100 Generates a Java Keystore or Truststore 101 102 :param key: 103 :param chain: 104 :param body: 105 :param options: 106 :param kwargs: 107 """ 108 if self.get_option('passphrase', options): 109 passphrase = self.get_option('passphrase', options) 110 else: 111 passphrase = get_psuedo_random_string() 112 113 if self.get_option('alias', options): 114 alias = self.get_option('alias', options) 115 else: 116 alias = "blah" 117 118 type = self.get_option('type', options) 119 120 with mktemppath() as output_tmp: 121 if type == 'PKCS12 (.p12)': 122 create_pkcs12(body, output_tmp, key, alias, passphrase) 123 extension = "p12" 124 else: 125 raise Exception("Unable to export, unsupported type: {0}".format(type)) 126 127 with open(output_tmp, 'rb') as f: 128 raw = f.read() 129 130 return extension, passphrase, raw 131 [end of lemur/plugins/lemur_openssl/plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lemur/plugins/lemur_openssl/plugin.py b/lemur/plugins/lemur_openssl/plugin.py --- a/lemur/plugins/lemur_openssl/plugin.py +++ b/lemur/plugins/lemur_openssl/plugin.py @@ -33,11 +33,12 @@ raise Exception(stderr) -def create_pkcs12(cert, p12_tmp, key, alias, passphrase): +def create_pkcs12(cert, chain, p12_tmp, key, alias, passphrase): """ Creates a pkcs12 formated file. :param cert: - :param jks_tmp: + :param chain: + :param p12_tmp: :param key: :param alias: :param passphrase: @@ -49,7 +50,7 @@ # Create PKCS12 keystore from private key and public certificate with mktempfile() as cert_tmp: with open(cert_tmp, 'w') as f: - f.write(cert) + f.writelines([cert + "\n", chain + "\n"]) run_process([ "openssl", @@ -119,7 +120,7 @@ with mktemppath() as output_tmp: if type == 'PKCS12 (.p12)': - create_pkcs12(body, output_tmp, key, alias, passphrase) + create_pkcs12(body, chain, output_tmp, key, alias, passphrase) extension = "p12" else: raise Exception("Unable to export, unsupported type: {0}".format(type))
{"golden_diff": "diff --git a/lemur/plugins/lemur_openssl/plugin.py b/lemur/plugins/lemur_openssl/plugin.py\n--- a/lemur/plugins/lemur_openssl/plugin.py\n+++ b/lemur/plugins/lemur_openssl/plugin.py\n@@ -33,11 +33,12 @@\n raise Exception(stderr)\n \n \n-def create_pkcs12(cert, p12_tmp, key, alias, passphrase):\n+def create_pkcs12(cert, chain, p12_tmp, key, alias, passphrase):\n \"\"\"\n Creates a pkcs12 formated file.\n :param cert:\n- :param jks_tmp:\n+ :param chain:\n+ :param p12_tmp:\n :param key:\n :param alias:\n :param passphrase:\n@@ -49,7 +50,7 @@\n # Create PKCS12 keystore from private key and public certificate\n with mktempfile() as cert_tmp:\n with open(cert_tmp, 'w') as f:\n- f.write(cert)\n+ f.writelines([cert + \"\\n\", chain + \"\\n\"])\n \n run_process([\n \"openssl\",\n@@ -119,7 +120,7 @@\n \n with mktemppath() as output_tmp:\n if type == 'PKCS12 (.p12)':\n- create_pkcs12(body, output_tmp, key, alias, passphrase)\n+ create_pkcs12(body, chain, output_tmp, key, alias, passphrase)\n extension = \"p12\"\n else:\n raise Exception(\"Unable to export, unsupported type: {0}\".format(type))\n", "issue": "Chain Certificate is not exporting\nHi Team,\n\nWhile extracting .p12 formatted file (which was exported from Lemur) we cannot find the Chain file on the same. Could you please let us know if we need to perform any additional step to download the .p12 along with the chain. \n\nThanks,\nAkash John\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.plugins.lemur_openssl.plugin\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport subprocess\n\nfrom flask import current_app\n\nfrom lemur.utils import mktempfile, mktemppath\nfrom lemur.plugins.bases import ExportPlugin\nfrom lemur.plugins import lemur_openssl as openssl\nfrom lemur.common.utils import get_psuedo_random_string\n\n\ndef run_process(command):\n \"\"\"\n Runs a given command with pOpen and wraps some\n error handling around it.\n :param command:\n :return:\n \"\"\"\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n current_app.logger.debug(command)\n stdout, stderr = p.communicate()\n\n if p.returncode != 0:\n current_app.logger.debug(\" \".join(command))\n current_app.logger.error(stderr)\n raise Exception(stderr)\n\n\ndef create_pkcs12(cert, p12_tmp, key, alias, passphrase):\n \"\"\"\n Creates a pkcs12 formated file.\n :param cert:\n :param jks_tmp:\n :param key:\n :param alias:\n :param passphrase:\n \"\"\"\n with mktempfile() as key_tmp:\n with open(key_tmp, 'w') as f:\n f.write(key)\n\n # Create PKCS12 keystore from private key and public certificate\n with mktempfile() as cert_tmp:\n with open(cert_tmp, 'w') as f:\n f.write(cert)\n\n run_process([\n \"openssl\",\n \"pkcs12\",\n \"-export\",\n \"-name\", alias,\n \"-in\", cert_tmp,\n \"-inkey\", key_tmp,\n \"-out\", p12_tmp,\n \"-password\", \"pass:{}\".format(passphrase)\n ])\n\n\nclass OpenSSLExportPlugin(ExportPlugin):\n title = 'OpenSSL'\n slug = 'openssl-export'\n description = 'Is a loose interface to openssl and support various formats'\n version = openssl.VERSION\n\n author = 'Kevin Glisson'\n author_url = 'https://github.com/netflix/lemur'\n\n options = [\n {\n 'name': 'type',\n 'type': 'select',\n 'required': True,\n 'available': ['PKCS12 (.p12)'],\n 'helpMessage': 'Choose the format you wish to export',\n },\n {\n 'name': 'passphrase',\n 'type': 'str',\n 'required': False,\n 'helpMessage': 'If no passphrase is given one will be generated for you, we highly recommend this. Minimum length is 8.',\n 'validation': ''\n },\n {\n 'name': 'alias',\n 'type': 'str',\n 'required': False,\n 'helpMessage': 'Enter the alias you wish to use for the keystore.',\n }\n ]\n\n def export(self, body, chain, key, options, **kwargs):\n \"\"\"\n Generates a Java Keystore or Truststore\n\n :param key:\n :param chain:\n :param body:\n :param options:\n :param kwargs:\n \"\"\"\n if self.get_option('passphrase', options):\n passphrase = self.get_option('passphrase', options)\n else:\n passphrase = get_psuedo_random_string()\n\n if self.get_option('alias', options):\n alias = self.get_option('alias', options)\n else:\n alias = \"blah\"\n\n type = self.get_option('type', options)\n\n with mktemppath() as output_tmp:\n if type == 'PKCS12 (.p12)':\n create_pkcs12(body, output_tmp, key, alias, passphrase)\n extension = \"p12\"\n else:\n raise Exception(\"Unable to export, unsupported type: {0}\".format(type))\n\n with open(output_tmp, 'rb') as f:\n raw = f.read()\n\n return extension, passphrase, raw\n", "path": "lemur/plugins/lemur_openssl/plugin.py"}]}
1,798
366
gh_patches_debug_21335
rasdani/github-patches
git_diff
bridgecrewio__checkov-5189
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CKV_AZURE_6] AKS API Server White Tests Failing with Correct Code **Describe the issue** [CKV_AZURE_6](https://github.com/bridgecrewio/checkov/tree/master/checkov/arm/checks/resource/AKSApiServerAuthorizedIpRanges.py) This check should trigger when an API Server whitelist IP isn't found in the TF code. **Examples** Please share an example code sample (in the IaC of your choice) + the expected outcomes. Sample (Pre 3.39.0 Provider Version): ``` resource "azurerm_kubernetes_cluster" "aks_k2" { name = var.cluster_name location = azurerm_resource_group.rg_aks.location resource_group_name = azurerm_resource_group.rg_aks.name sku_tier = var.sku_tier dns_prefix = var.dns_name api_server_authorized_ip_ranges = [my_ip_list] } ``` Sample (Post 3.39.0): ``` resource "azurerm_kubernetes_cluster" "aks_k2" { name = var.cluster_name location = azurerm_resource_group.rg_aks.location resource_group_name = azurerm_resource_group.rg_aks.name sku_tier = var.sku_tier dns_prefix = var.dns_name api_server_access_profile { authorized_ip_ranges = [my_ip_list] } } ``` Both have expected outcome of passing this test, as we list 4 IP's for whitelisting. We are failing tests ![image](https://github.com/bridgecrewio/checkov/assets/6209424/5a7a32d7-d9bb-4759-b7f1-32e206b4bd70) **Version (please complete the following information):** - Checkov Version: checkov-2.3.272 </issue> <code> [start of checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py] 1 from __future__ import annotations 2 3 from typing import Any 4 5 from checkov.common.models.consts import ANY_VALUE 6 from checkov.common.models.enums import CheckCategories, CheckResult 7 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 8 9 10 class AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck): 11 def __init__(self) -> None: 12 name = "Ensure AKS has an API Server Authorized IP Ranges enabled" 13 id = "CKV_AZURE_6" 14 supported_resources = ("azurerm_kubernetes_cluster",) 15 categories = (CheckCategories.KUBERNETES,) 16 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 17 18 def get_inspected_key(self) -> str: 19 return "api_server_authorized_ip_ranges/[0]" 20 21 def get_expected_value(self) -> Any: 22 return ANY_VALUE 23 24 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: 25 # can't be set for private cluster 26 private_cluster_enabled = conf.get("private_cluster_enabled", [False])[0] 27 if private_cluster_enabled: 28 return CheckResult.PASSED 29 return super().scan_resource_conf(conf) 30 31 32 check = AKSApiServerAuthorizedIpRanges() 33 [end of checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py --- a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py +++ b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py @@ -16,7 +16,7 @@ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_inspected_key(self) -> str: - return "api_server_authorized_ip_ranges/[0]" + return "api_server_access_profile/[0]/authorized_ip_ranges/[0]" def get_expected_value(self) -> Any: return ANY_VALUE @@ -26,6 +26,12 @@ private_cluster_enabled = conf.get("private_cluster_enabled", [False])[0] if private_cluster_enabled: return CheckResult.PASSED + + # provider version <=3.38.0 + api_server = conf.get("api_server_authorized_ip_ranges") + if api_server and isinstance(api_server, list) and api_server[0]: + return CheckResult.PASSED + return super().scan_resource_conf(conf)
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py\n--- a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py\n+++ b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py\n@@ -16,7 +16,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self) -> str:\n- return \"api_server_authorized_ip_ranges/[0]\"\n+ return \"api_server_access_profile/[0]/authorized_ip_ranges/[0]\"\n \n def get_expected_value(self) -> Any:\n return ANY_VALUE\n@@ -26,6 +26,12 @@\n private_cluster_enabled = conf.get(\"private_cluster_enabled\", [False])[0]\n if private_cluster_enabled:\n return CheckResult.PASSED\n+\n+ # provider version <=3.38.0\n+ api_server = conf.get(\"api_server_authorized_ip_ranges\")\n+ if api_server and isinstance(api_server, list) and api_server[0]:\n+ return CheckResult.PASSED\n+\n return super().scan_resource_conf(conf)\n", "issue": "[CKV_AZURE_6] AKS API Server White Tests Failing with Correct Code\n**Describe the issue**\r\n[CKV_AZURE_6](https://github.com/bridgecrewio/checkov/tree/master/checkov/arm/checks/resource/AKSApiServerAuthorizedIpRanges.py)\r\n\r\nThis check should trigger when an API Server whitelist IP isn't found in the TF code. \r\n\r\n**Examples**\r\nPlease share an example code sample (in the IaC of your choice) + the expected outcomes.\r\n\r\nSample (Pre 3.39.0 Provider Version):\r\n```\r\nresource \"azurerm_kubernetes_cluster\" \"aks_k2\" {\r\n name = var.cluster_name\r\n location = azurerm_resource_group.rg_aks.location\r\n resource_group_name = azurerm_resource_group.rg_aks.name\r\n sku_tier = var.sku_tier\r\n dns_prefix = var.dns_name\r\n api_server_authorized_ip_ranges = [my_ip_list]\r\n}\r\n```\r\nSample (Post 3.39.0):\r\n```\r\nresource \"azurerm_kubernetes_cluster\" \"aks_k2\" {\r\n name = var.cluster_name\r\n location = azurerm_resource_group.rg_aks.location\r\n resource_group_name = azurerm_resource_group.rg_aks.name\r\n sku_tier = var.sku_tier\r\n dns_prefix = var.dns_name\r\n api_server_access_profile {\r\n authorized_ip_ranges = [my_ip_list]\r\n }\r\n}\r\n```\r\n\r\nBoth have expected outcome of passing this test, as we list 4 IP's for whitelisting.\r\nWe are failing tests\r\n![image](https://github.com/bridgecrewio/checkov/assets/6209424/5a7a32d7-d9bb-4759-b7f1-32e206b4bd70)\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version: checkov-2.3.272\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass AKSApiServerAuthorizedIpRanges(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure AKS has an API Server Authorized IP Ranges enabled\"\n id = \"CKV_AZURE_6\"\n supported_resources = (\"azurerm_kubernetes_cluster\",)\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"api_server_authorized_ip_ranges/[0]\"\n\n def get_expected_value(self) -> Any:\n return ANY_VALUE\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n # can't be set for private cluster\n private_cluster_enabled = conf.get(\"private_cluster_enabled\", [False])[0]\n if private_cluster_enabled:\n return CheckResult.PASSED\n return super().scan_resource_conf(conf)\n\n\ncheck = AKSApiServerAuthorizedIpRanges()\n", "path": "checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py"}]}
1,318
282
gh_patches_debug_48523
rasdani/github-patches
git_diff
meltano__meltano-6488
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support Node v16 Currently building the Meltano UI with Node 16 results in a crash while building libsass. [That library is deprecated](https://sass-lang.com/blog/libsass-is-deprecated), so we should switch to using Dart-sass instead. CC @alexmarple </issue> <code> [start of scripts/alembic_freeze.py] 1 #!/usr/bin/env python3 2 3 """Script to freeze the Meltano database - executed by the Makefile.""" 4 5 from __future__ import annotations 6 7 from alembic.script import ScriptDirectory 8 9 from meltano.migrations import LOCK_PATH, MIGRATION_DIR 10 11 scripts = ScriptDirectory(str(MIGRATION_DIR)) 12 13 with LOCK_PATH.open("w") as lock: 14 HEAD = scripts.get_current_head() 15 lock.write(HEAD) 16 17 print(f"Meltano database frozen at {HEAD}.") 18 [end of scripts/alembic_freeze.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/alembic_freeze.py b/scripts/alembic_freeze.py --- a/scripts/alembic_freeze.py +++ b/scripts/alembic_freeze.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -"""Script to freeze the Meltano database - executed by the Makefile.""" +"""Script to freeze the Meltano database - executed by GitHub CI.""" from __future__ import annotations
{"golden_diff": "diff --git a/scripts/alembic_freeze.py b/scripts/alembic_freeze.py\n--- a/scripts/alembic_freeze.py\n+++ b/scripts/alembic_freeze.py\n@@ -1,6 +1,6 @@\n #!/usr/bin/env python3\n \n-\"\"\"Script to freeze the Meltano database - executed by the Makefile.\"\"\"\n+\"\"\"Script to freeze the Meltano database - executed by GitHub CI.\"\"\"\n \n from __future__ import annotations\n", "issue": "Support Node v16\nCurrently building the Meltano UI with Node 16 results in a crash while building libsass. [That library is deprecated](https://sass-lang.com/blog/libsass-is-deprecated), so we should switch to using Dart-sass instead.\r\n\r\nCC @alexmarple \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Script to freeze the Meltano database - executed by the Makefile.\"\"\"\n\nfrom __future__ import annotations\n\nfrom alembic.script import ScriptDirectory\n\nfrom meltano.migrations import LOCK_PATH, MIGRATION_DIR\n\nscripts = ScriptDirectory(str(MIGRATION_DIR))\n\nwith LOCK_PATH.open(\"w\") as lock:\n HEAD = scripts.get_current_head()\n lock.write(HEAD)\n\nprint(f\"Meltano database frozen at {HEAD}.\")\n", "path": "scripts/alembic_freeze.py"}]}
738
104
gh_patches_debug_2776
rasdani/github-patches
git_diff
sunpy__sunpy-1505
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> EIT data fails with wcsaxes The wcs information in the EIT header is not being identified as celestial axes by wcslib (inside astropy.wcs). This means that wcs is not detecting them as angular axes properly so therefore the set to arcsecond formatting is failing. </issue> <code> [start of sunpy/visualization/wcsaxes_compat.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Helpers and Functions to make WCSAxes work in SunPy 4 """ 5 import warnings 6 7 import matplotlib.pyplot as plt 8 9 import astropy.units as u 10 11 try: 12 import wcsaxes 13 HAVE_WCSAXES = True 14 15 except ImportError: 16 HAVE_WCSAXES = False 17 warnings.warn("SunPy plotting is improved by installing the WCSAxes module: http://wcsaxes.readthedocs.org/en/latest/index.html") 18 19 FORCE_NO_WCSAXES = False 20 21 __all__ = ['HAVE_WCSAXES', 'is_wcsaxes', 'FORCE_NO_WCSAXES'] 22 23 def is_wcsaxes(axes): 24 """ 25 Test a matplotlib Axes object to see if it is an instance of WCSAxes 26 27 Parameters 28 ---------- 29 axes : matplotlib Axes Object 30 Axes to test 31 32 Returns 33 ------- 34 result : bool 35 Result of the test 36 """ 37 38 if HAVE_WCSAXES and not FORCE_NO_WCSAXES: 39 return isinstance(axes, wcsaxes.WCSAxes) 40 else: 41 return False 42 43 44 def gca_wcs(wcs, fig=None): 45 """ 46 Get the current axes, and return a WCSAxes if possible 47 """ 48 49 if not fig: 50 fig = plt.gcf() 51 52 if not len(fig.get_axes()): 53 if HAVE_WCSAXES and not FORCE_NO_WCSAXES: 54 ax = plt.gca(projection=wcs) 55 else: 56 ax = plt.gca() 57 58 else: 59 ax = plt.gca() 60 61 return ax 62 63 def get_world_transform(axes): 64 if is_wcsaxes(axes): 65 transform = axes.get_transform('world') 66 else: 67 transform = axes.transData 68 69 return transform 70 71 def default_wcs_grid(axes): 72 """ 73 Apply some default wcsaxes grid formatting 74 """ 75 if not isinstance(axes, wcsaxes.WCSAxes): 76 raise TypeError("This axes is not a WCSAxes") 77 78 x = axes.coords[0] 79 y = axes.coords[1] 80 81 x.set_ticks(color='white') 82 y.set_ticks(color='white') 83 84 x.set_ticks_position('bl') 85 y.set_ticks_position('bl') 86 87 x.set_major_formatter('s.s') 88 y.set_major_formatter('s.s') 89 90 axes.coords.grid(color='white', alpha=0.6) 91 92 def wcsaxes_heliographic_overlay(axes): 93 """ 94 Draw a heliographic overlay using wcsaxes 95 """ 96 overlay = axes.get_coords_overlay('heliographicstonyhurst') 97 98 lon = overlay[0] 99 lat = overlay[1] 100 101 lon.coord_wrap = 180 102 lon.set_major_formatter('dd') 103 104 lon.set_axislabel('Solar Longitude') 105 lat.set_axislabel('Solar Latitude') 106 107 lon.set_ticks_position('tr') 108 lat.set_ticks_position('tr') 109 110 lon.set_ticks(spacing=10. * u.deg, color='white') 111 lat.set_ticks(spacing=10. * u.deg, color='white') 112 113 overlay.grid(color='white', alpha=0.5) 114 115 return overlay 116 [end of sunpy/visualization/wcsaxes_compat.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sunpy/visualization/wcsaxes_compat.py b/sunpy/visualization/wcsaxes_compat.py --- a/sunpy/visualization/wcsaxes_compat.py +++ b/sunpy/visualization/wcsaxes_compat.py @@ -84,6 +84,11 @@ x.set_ticks_position('bl') y.set_ticks_position('bl') + if x.coord_type != 'longitude': + x.set_coord_type('longitude', coord_wrap=180.) + if y.coord_type != 'latitude': + y.set_coord_type('latitude') + x.set_major_formatter('s.s') y.set_major_formatter('s.s')
{"golden_diff": "diff --git a/sunpy/visualization/wcsaxes_compat.py b/sunpy/visualization/wcsaxes_compat.py\n--- a/sunpy/visualization/wcsaxes_compat.py\n+++ b/sunpy/visualization/wcsaxes_compat.py\n@@ -84,6 +84,11 @@\n x.set_ticks_position('bl')\n y.set_ticks_position('bl')\n \n+ if x.coord_type != 'longitude':\n+ x.set_coord_type('longitude', coord_wrap=180.)\n+ if y.coord_type != 'latitude':\n+ y.set_coord_type('latitude')\n+\n x.set_major_formatter('s.s')\n y.set_major_formatter('s.s')\n", "issue": "EIT data fails with wcsaxes\nThe wcs information in the EIT header is not being identified as celestial axes by wcslib (inside astropy.wcs). This means that wcs is not detecting them as angular axes properly so therefore the set to arcsecond formatting is failing.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nHelpers and Functions to make WCSAxes work in SunPy\n\"\"\"\nimport warnings\n\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\ntry:\n import wcsaxes\n HAVE_WCSAXES = True\n\nexcept ImportError:\n HAVE_WCSAXES = False\n warnings.warn(\"SunPy plotting is improved by installing the WCSAxes module: http://wcsaxes.readthedocs.org/en/latest/index.html\")\n\nFORCE_NO_WCSAXES = False\n\n__all__ = ['HAVE_WCSAXES', 'is_wcsaxes', 'FORCE_NO_WCSAXES']\n\ndef is_wcsaxes(axes):\n \"\"\"\n Test a matplotlib Axes object to see if it is an instance of WCSAxes\n\n Parameters\n ----------\n axes : matplotlib Axes Object\n Axes to test\n\n Returns\n -------\n result : bool\n Result of the test\n \"\"\"\n\n if HAVE_WCSAXES and not FORCE_NO_WCSAXES:\n return isinstance(axes, wcsaxes.WCSAxes)\n else:\n return False\n\n\ndef gca_wcs(wcs, fig=None):\n \"\"\"\n Get the current axes, and return a WCSAxes if possible\n \"\"\"\n\n if not fig:\n fig = plt.gcf()\n\n if not len(fig.get_axes()):\n if HAVE_WCSAXES and not FORCE_NO_WCSAXES:\n ax = plt.gca(projection=wcs)\n else:\n ax = plt.gca()\n\n else:\n ax = plt.gca()\n\n return ax\n\ndef get_world_transform(axes):\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default wcsaxes grid formatting\n \"\"\"\n if not isinstance(axes, wcsaxes.WCSAxes):\n raise TypeError(\"This axes is not a WCSAxes\")\n\n x = axes.coords[0]\n y = axes.coords[1]\n\n x.set_ticks(color='white')\n y.set_ticks(color='white')\n\n x.set_ticks_position('bl')\n y.set_ticks_position('bl')\n\n x.set_major_formatter('s.s')\n y.set_major_formatter('s.s')\n\n axes.coords.grid(color='white', alpha=0.6)\n\ndef wcsaxes_heliographic_overlay(axes):\n \"\"\"\n Draw a heliographic overlay using wcsaxes\n \"\"\"\n overlay = axes.get_coords_overlay('heliographicstonyhurst')\n\n lon = overlay[0]\n lat = overlay[1]\n\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n\n lon.set_axislabel('Solar Longitude')\n lat.set_axislabel('Solar Latitude')\n\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n\n lon.set_ticks(spacing=10. * u.deg, color='white')\n lat.set_ticks(spacing=10. * u.deg, color='white')\n\n overlay.grid(color='white', alpha=0.5)\n\n return overlay\n", "path": "sunpy/visualization/wcsaxes_compat.py"}]}
1,519
147
gh_patches_debug_29580
rasdani/github-patches
git_diff
mit-ll-responsible-ai__hydra-zen-175
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconsistent static analysis via pyright ```python x = [1, 2, 3] make_config(a=[1, 2, 3]) make_config(a=x) # pyright marks this as invalid based on our annotations ``` This seems like a pyright issue, but we might consider revising annotations to accommodate </issue> <code> [start of src/hydra_zen/typing/_implementations.py] 1 # Copyright (c) 2021 Massachusetts Institute of Technology 2 # SPDX-License-Identifier: MIT 3 4 from dataclasses import Field 5 from enum import Enum 6 from pathlib import Path 7 from typing import ( 8 Any, 9 Callable, 10 Counter, 11 Deque, 12 Dict, 13 FrozenSet, 14 Generic, 15 List, 16 NewType, 17 Set, 18 Tuple, 19 TypeVar, 20 Union, 21 ) 22 23 from omegaconf import DictConfig, ListConfig 24 from typing_extensions import Protocol, runtime_checkable 25 26 __all__ = [ 27 "Just", 28 "Builds", 29 "PartialBuilds", 30 "Partial", 31 "Importable", 32 "SupportedPrimitive", 33 ] 34 35 36 _T = TypeVar("_T", covariant=True) 37 38 39 class Partial(Generic[_T]): 40 func: Callable[..., _T] 41 args: Tuple[Any, ...] 42 keywords: Dict[str, Any] 43 44 def __init__( 45 self, func: Callable[..., _T], *args: Any, **kwargs: Any 46 ) -> None: # pragma: no cover 47 ... 48 49 def __call__(self, *args: Any, **kwargs: Any) -> _T: # pragma: no cover 50 ... 51 52 53 InterpStr = NewType("InterpStr", str) 54 55 Importable = TypeVar("Importable") 56 57 58 class _DataClass(Protocol): # pragma: no cover 59 # doesn't provide __init__, __getattribute__, etc. 60 __dataclass_fields__: Dict[str, Field] 61 62 63 class DataClass(_DataClass, Protocol): # pragma: no cover 64 def __init__(self, *args, **kwargs) -> None: 65 ... 66 67 def __getattribute__(self, name: str) -> Any: 68 ... 69 70 def __setattr__(self, name: str, value: Any) -> None: 71 ... 72 73 74 @runtime_checkable 75 class Builds(DataClass, Protocol[_T]): # pragma: no cover 76 77 _target_: str 78 79 80 @runtime_checkable 81 class Just(Builds, Protocol[_T]): # pragma: no cover 82 path: str # interpolated string for importing obj 83 _target_: str = "hydra_zen.funcs.get_obj" 84 85 86 @runtime_checkable 87 class PartialBuilds(Builds, Protocol[_T]): # pragma: no cover 88 _target_: str = "hydra_zen.funcs.zen_processing" 89 _zen_target: str 90 _zen_partial: bool = True 91 92 93 @runtime_checkable 94 class HasTarget(Protocol): # pragma: no cover 95 _target_: str 96 97 98 @runtime_checkable 99 class HasPartialTarget(Protocol): # pragma: no cover 100 _zen_partial: bool = True 101 102 103 _HydraPrimitive = Union[ 104 bool, 105 None, 106 int, 107 float, 108 str, 109 ] 110 111 _SupportedPrimitive = Union[ 112 _HydraPrimitive, 113 ListConfig, 114 DictConfig, 115 type, 116 Callable, 117 Enum, 118 _DataClass, 119 complex, 120 Path, 121 range, 122 ] 123 124 SupportedPrimitive = Union[ 125 _SupportedPrimitive, 126 Dict[_HydraPrimitive, "SupportedPrimitive"], 127 Counter[_HydraPrimitive], 128 Set["SupportedPrimitive"], 129 FrozenSet["SupportedPrimitive"], 130 Deque["SupportedPrimitive"], 131 List["SupportedPrimitive"], 132 Tuple["SupportedPrimitive", ...], 133 ] 134 [end of src/hydra_zen/typing/_implementations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py --- a/src/hydra_zen/typing/_implementations.py +++ b/src/hydra_zen/typing/_implementations.py @@ -7,21 +7,19 @@ from typing import ( Any, Callable, - Counter, - Deque, Dict, FrozenSet, Generic, - List, + Mapping, NewType, - Set, + Sequence, Tuple, TypeVar, Union, ) from omegaconf import DictConfig, ListConfig -from typing_extensions import Protocol, runtime_checkable +from typing_extensions import Protocol, TypedDict, runtime_checkable __all__ = [ "Just", @@ -33,6 +31,10 @@ ] +class EmptyDict(TypedDict): + pass + + _T = TypeVar("_T", covariant=True) @@ -119,15 +121,21 @@ complex, Path, range, + set, + EmptyDict, # not covered by Mapping[..., ...] ] SupportedPrimitive = Union[ _SupportedPrimitive, - Dict[_HydraPrimitive, "SupportedPrimitive"], - Counter[_HydraPrimitive], - Set["SupportedPrimitive"], FrozenSet["SupportedPrimitive"], - Deque["SupportedPrimitive"], - List["SupportedPrimitive"], - Tuple["SupportedPrimitive", ...], + # Even thought this is redundant with Sequence, it seems to + # be needed for pyright to do proper checking of tuple contents + Tuple["SupportedPrimitive"], + # Mutable generic containers need to be invariant, so + # we have to settle for Sequence/Mapping. While this + # is overly permissive in terms of sequence-type, it + # at least affords quality checking of sequence content + Sequence["SupportedPrimitive"], + # Mapping is covariant only in value + Mapping[Any, "SupportedPrimitive"], ]
{"golden_diff": "diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py\n--- a/src/hydra_zen/typing/_implementations.py\n+++ b/src/hydra_zen/typing/_implementations.py\n@@ -7,21 +7,19 @@\n from typing import (\n Any,\n Callable,\n- Counter,\n- Deque,\n Dict,\n FrozenSet,\n Generic,\n- List,\n+ Mapping,\n NewType,\n- Set,\n+ Sequence,\n Tuple,\n TypeVar,\n Union,\n )\n \n from omegaconf import DictConfig, ListConfig\n-from typing_extensions import Protocol, runtime_checkable\n+from typing_extensions import Protocol, TypedDict, runtime_checkable\n \n __all__ = [\n \"Just\",\n@@ -33,6 +31,10 @@\n ]\n \n \n+class EmptyDict(TypedDict):\n+ pass\n+\n+\n _T = TypeVar(\"_T\", covariant=True)\n \n \n@@ -119,15 +121,21 @@\n complex,\n Path,\n range,\n+ set,\n+ EmptyDict, # not covered by Mapping[..., ...]\n ]\n \n SupportedPrimitive = Union[\n _SupportedPrimitive,\n- Dict[_HydraPrimitive, \"SupportedPrimitive\"],\n- Counter[_HydraPrimitive],\n- Set[\"SupportedPrimitive\"],\n FrozenSet[\"SupportedPrimitive\"],\n- Deque[\"SupportedPrimitive\"],\n- List[\"SupportedPrimitive\"],\n- Tuple[\"SupportedPrimitive\", ...],\n+ # Even thought this is redundant with Sequence, it seems to\n+ # be needed for pyright to do proper checking of tuple contents\n+ Tuple[\"SupportedPrimitive\"],\n+ # Mutable generic containers need to be invariant, so\n+ # we have to settle for Sequence/Mapping. While this\n+ # is overly permissive in terms of sequence-type, it\n+ # at least affords quality checking of sequence content\n+ Sequence[\"SupportedPrimitive\"],\n+ # Mapping is covariant only in value\n+ Mapping[Any, \"SupportedPrimitive\"],\n ]\n", "issue": "Inconsistent static analysis via pyright\n```python\r\nx = [1, 2, 3]\r\nmake_config(a=[1, 2, 3])\r\nmake_config(a=x) # pyright marks this as invalid based on our annotations\r\n```\r\n\r\nThis seems like a pyright issue, but we might consider revising annotations to accommodate\n", "before_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom dataclasses import Field\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import (\n Any,\n Callable,\n Counter,\n Deque,\n Dict,\n FrozenSet,\n Generic,\n List,\n NewType,\n Set,\n Tuple,\n TypeVar,\n Union,\n)\n\nfrom omegaconf import DictConfig, ListConfig\nfrom typing_extensions import Protocol, runtime_checkable\n\n__all__ = [\n \"Just\",\n \"Builds\",\n \"PartialBuilds\",\n \"Partial\",\n \"Importable\",\n \"SupportedPrimitive\",\n]\n\n\n_T = TypeVar(\"_T\", covariant=True)\n\n\nclass Partial(Generic[_T]):\n func: Callable[..., _T]\n args: Tuple[Any, ...]\n keywords: Dict[str, Any]\n\n def __init__(\n self, func: Callable[..., _T], *args: Any, **kwargs: Any\n ) -> None: # pragma: no cover\n ...\n\n def __call__(self, *args: Any, **kwargs: Any) -> _T: # pragma: no cover\n ...\n\n\nInterpStr = NewType(\"InterpStr\", str)\n\nImportable = TypeVar(\"Importable\")\n\n\nclass _DataClass(Protocol): # pragma: no cover\n # doesn't provide __init__, __getattribute__, etc.\n __dataclass_fields__: Dict[str, Field]\n\n\nclass DataClass(_DataClass, Protocol): # pragma: no cover\n def __init__(self, *args, **kwargs) -> None:\n ...\n\n def __getattribute__(self, name: str) -> Any:\n ...\n\n def __setattr__(self, name: str, value: Any) -> None:\n ...\n\n\n@runtime_checkable\nclass Builds(DataClass, Protocol[_T]): # pragma: no cover\n\n _target_: str\n\n\n@runtime_checkable\nclass Just(Builds, Protocol[_T]): # pragma: no cover\n path: str # interpolated string for importing obj\n _target_: str = \"hydra_zen.funcs.get_obj\"\n\n\n@runtime_checkable\nclass PartialBuilds(Builds, Protocol[_T]): # pragma: no cover\n _target_: str = \"hydra_zen.funcs.zen_processing\"\n _zen_target: str\n _zen_partial: bool = True\n\n\n@runtime_checkable\nclass HasTarget(Protocol): # pragma: no cover\n _target_: str\n\n\n@runtime_checkable\nclass HasPartialTarget(Protocol): # pragma: no cover\n _zen_partial: bool = True\n\n\n_HydraPrimitive = Union[\n bool,\n None,\n int,\n float,\n str,\n]\n\n_SupportedPrimitive = Union[\n _HydraPrimitive,\n ListConfig,\n DictConfig,\n type,\n Callable,\n Enum,\n _DataClass,\n complex,\n Path,\n range,\n]\n\nSupportedPrimitive = Union[\n _SupportedPrimitive,\n Dict[_HydraPrimitive, \"SupportedPrimitive\"],\n Counter[_HydraPrimitive],\n Set[\"SupportedPrimitive\"],\n FrozenSet[\"SupportedPrimitive\"],\n Deque[\"SupportedPrimitive\"],\n List[\"SupportedPrimitive\"],\n Tuple[\"SupportedPrimitive\", ...],\n]\n", "path": "src/hydra_zen/typing/_implementations.py"}]}
1,648
454
gh_patches_debug_44429
rasdani/github-patches
git_diff
pytorch__ignite-408
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove `activation` argument from AveragePrecision and ROC_AUC I propose to remove `activation` argument from `AveragePrecision` and `ROC_AUC`: https://github.com/pytorch/ignite/blob/862ab0073e461a32bf5b53fa015f88c143ae9079/ignite/contrib/metrics/roc_auc.py#L23-L28 to be coherent with `Accuracy` et friends and the usage of `output_transform` to apply the binarization/activation : https://github.com/pytorch/ignite/blob/862ab0073e461a32bf5b53fa015f88c143ae9079/ignite/metrics/accuracy.py#L81-L93 </issue> <code> [start of ignite/contrib/metrics/roc_auc.py] 1 from functools import partial 2 from ignite.metrics import EpochMetric 3 4 5 def roc_auc_compute_fn(y_preds, y_targets, activation=None): 6 try: 7 from sklearn.metrics import roc_auc_score 8 except ImportError: 9 raise RuntimeError("This contrib module requires sklearn to be installed.") 10 11 y_true = y_targets.numpy() 12 if activation is not None: 13 y_preds = activation(y_preds) 14 y_pred = y_preds.numpy() 15 return roc_auc_score(y_true, y_pred) 16 17 18 class ROC_AUC(EpochMetric): 19 """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC) 20 accumulating predictions and the ground-truth during an epoch and applying 21 `sklearn.metrics.roc_auc_score <http://scikit-learn.org/stable/modules/generated/ 22 sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ . 23 24 Args: 25 activation (callable, optional): optional function to apply on prediction tensors, 26 e.g. `activation=torch.sigmoid` to transform logits. 27 output_transform (callable, optional): a callable that is used to transform the 28 :class:`~ignite.engine.Engine`'s `process_function`'s output into the 29 form expected by the metric. This can be useful if, for example, you have a multi-output model and 30 you want to compute the metric with respect to one of the outputs. 31 32 """ 33 def __init__(self, activation=None, output_transform=lambda x: x): 34 super(ROC_AUC, self).__init__(partial(roc_auc_compute_fn, activation=activation), 35 output_transform=output_transform) 36 [end of ignite/contrib/metrics/roc_auc.py] [start of ignite/contrib/metrics/average_precision.py] 1 from functools import partial 2 from ignite.metrics import EpochMetric 3 4 5 def average_precision_compute_fn(y_preds, y_targets, activation=None): 6 try: 7 from sklearn.metrics import average_precision_score 8 except ImportError: 9 raise RuntimeError("This contrib module requires sklearn to be installed.") 10 11 y_true = y_targets.numpy() 12 if activation is not None: 13 y_preds = activation(y_preds) 14 y_pred = y_preds.numpy() 15 return average_precision_score(y_true, y_pred) 16 17 18 class AveragePrecision(EpochMetric): 19 """Computes Average Precision accumulating predictions and the ground-truth during an epoch 20 and applying `sklearn.metrics.average_precision_score <http://scikit-learn.org/stable/modules/generated/ 21 sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ . 22 23 Args: 24 activation (callable, optional): optional function to apply on prediction tensors, 25 e.g. `activation=torch.sigmoid` to transform logits. 26 output_transform (callable, optional): a callable that is used to transform the 27 :class:`~ignite.engine.Engine`'s `process_function`'s output into the 28 form expected by the metric. This can be useful if, for example, you have a multi-output model and 29 you want to compute the metric with respect to one of the outputs. 30 31 """ 32 def __init__(self, activation=None, output_transform=lambda x: x): 33 super(AveragePrecision, self).__init__(partial(average_precision_compute_fn, activation=activation), 34 output_transform=output_transform) 35 [end of ignite/contrib/metrics/average_precision.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py --- a/ignite/contrib/metrics/average_precision.py +++ b/ignite/contrib/metrics/average_precision.py @@ -1,16 +1,13 @@ -from functools import partial from ignite.metrics import EpochMetric -def average_precision_compute_fn(y_preds, y_targets, activation=None): +def average_precision_compute_fn(y_preds, y_targets): try: from sklearn.metrics import average_precision_score except ImportError: raise RuntimeError("This contrib module requires sklearn to be installed.") y_true = y_targets.numpy() - if activation is not None: - y_preds = activation(y_preds) y_pred = y_preds.numpy() return average_precision_score(y_true, y_pred) @@ -21,14 +18,23 @@ sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ . Args: - activation (callable, optional): optional function to apply on prediction tensors, - e.g. `activation=torch.sigmoid` to transform logits. output_transform (callable, optional): a callable that is used to transform the :class:`~ignite.engine.Engine`'s `process_function`'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. + AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or + confidence values. To apply an activation to y_pred, use output_transform as shown below: + + .. code-block:: python + + def activated_output_transform(output): + y_pred, y = output + y_pred = torch.softmax(y_pred) + return y_pred, y + + avg_precision = AveragePrecision(activated_output_transform) + """ def __init__(self, activation=None, output_transform=lambda x: x): - super(AveragePrecision, self).__init__(partial(average_precision_compute_fn, activation=activation), - output_transform=output_transform) + super(AveragePrecision, self).__init__(average_precision_compute_fn, output_transform=output_transform) diff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py --- a/ignite/contrib/metrics/roc_auc.py +++ b/ignite/contrib/metrics/roc_auc.py @@ -1,16 +1,13 @@ -from functools import partial from ignite.metrics import EpochMetric -def roc_auc_compute_fn(y_preds, y_targets, activation=None): +def roc_auc_compute_fn(y_preds, y_targets): try: from sklearn.metrics import roc_auc_score except ImportError: raise RuntimeError("This contrib module requires sklearn to be installed.") y_true = y_targets.numpy() - if activation is not None: - y_preds = activation(y_preds) y_pred = y_preds.numpy() return roc_auc_score(y_true, y_pred) @@ -22,14 +19,23 @@ sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ . Args: - activation (callable, optional): optional function to apply on prediction tensors, - e.g. `activation=torch.sigmoid` to transform logits. output_transform (callable, optional): a callable that is used to transform the :class:`~ignite.engine.Engine`'s `process_function`'s output into the form expected by the metric. This can be useful if, for example, you have a multi-output model and you want to compute the metric with respect to one of the outputs. + ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence + values. To apply an activation to y_pred, use output_transform as shown below: + + .. code-block:: python + + def activated_output_transform(output): + y_pred, y = output + y_pred = torch.sigmoid(y_pred) + return y_pred, y + + roc_auc = ROC_AUC(activated_output_transform) + """ - def __init__(self, activation=None, output_transform=lambda x: x): - super(ROC_AUC, self).__init__(partial(roc_auc_compute_fn, activation=activation), - output_transform=output_transform) + def __init__(self, output_transform=lambda x: x): + super(ROC_AUC, self).__init__(roc_auc_compute_fn, output_transform=output_transform)
{"golden_diff": "diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py\n--- a/ignite/contrib/metrics/average_precision.py\n+++ b/ignite/contrib/metrics/average_precision.py\n@@ -1,16 +1,13 @@\n-from functools import partial\n from ignite.metrics import EpochMetric\n \n \n-def average_precision_compute_fn(y_preds, y_targets, activation=None):\n+def average_precision_compute_fn(y_preds, y_targets):\n try:\n from sklearn.metrics import average_precision_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n \n y_true = y_targets.numpy()\n- if activation is not None:\n- y_preds = activation(y_preds)\n y_pred = y_preds.numpy()\n return average_precision_score(y_true, y_pred)\n \n@@ -21,14 +18,23 @@\n sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .\n \n Args:\n- activation (callable, optional): optional function to apply on prediction tensors,\n- e.g. `activation=torch.sigmoid` to transform logits.\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n \n+ AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or\n+ confidence values. To apply an activation to y_pred, use output_transform as shown below:\n+\n+ .. code-block:: python\n+\n+ def activated_output_transform(output):\n+ y_pred, y = output\n+ y_pred = torch.softmax(y_pred)\n+ return y_pred, y\n+\n+ avg_precision = AveragePrecision(activated_output_transform)\n+\n \"\"\"\n def __init__(self, activation=None, output_transform=lambda x: x):\n- super(AveragePrecision, self).__init__(partial(average_precision_compute_fn, activation=activation),\n- output_transform=output_transform)\n+ super(AveragePrecision, self).__init__(average_precision_compute_fn, output_transform=output_transform)\ndiff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py\n--- a/ignite/contrib/metrics/roc_auc.py\n+++ b/ignite/contrib/metrics/roc_auc.py\n@@ -1,16 +1,13 @@\n-from functools import partial\n from ignite.metrics import EpochMetric\n \n \n-def roc_auc_compute_fn(y_preds, y_targets, activation=None):\n+def roc_auc_compute_fn(y_preds, y_targets):\n try:\n from sklearn.metrics import roc_auc_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n \n y_true = y_targets.numpy()\n- if activation is not None:\n- y_preds = activation(y_preds)\n y_pred = y_preds.numpy()\n return roc_auc_score(y_true, y_pred)\n \n@@ -22,14 +19,23 @@\n sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ .\n \n Args:\n- activation (callable, optional): optional function to apply on prediction tensors,\n- e.g. `activation=torch.sigmoid` to transform logits.\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n \n+ ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence\n+ values. To apply an activation to y_pred, use output_transform as shown below:\n+\n+ .. code-block:: python\n+\n+ def activated_output_transform(output):\n+ y_pred, y = output\n+ y_pred = torch.sigmoid(y_pred)\n+ return y_pred, y\n+\n+ roc_auc = ROC_AUC(activated_output_transform)\n+\n \"\"\"\n- def __init__(self, activation=None, output_transform=lambda x: x):\n- super(ROC_AUC, self).__init__(partial(roc_auc_compute_fn, activation=activation),\n- output_transform=output_transform)\n+ def __init__(self, output_transform=lambda x: x):\n+ super(ROC_AUC, self).__init__(roc_auc_compute_fn, output_transform=output_transform)\n", "issue": "Remove `activation` argument from AveragePrecision and ROC_AUC\nI propose to remove `activation` argument from `AveragePrecision` and `ROC_AUC`:\r\nhttps://github.com/pytorch/ignite/blob/862ab0073e461a32bf5b53fa015f88c143ae9079/ignite/contrib/metrics/roc_auc.py#L23-L28\r\nto be coherent with `Accuracy` et friends and the usage of `output_transform` to apply the binarization/activation :\r\nhttps://github.com/pytorch/ignite/blob/862ab0073e461a32bf5b53fa015f88c143ae9079/ignite/metrics/accuracy.py#L81-L93\r\n\r\n\n", "before_files": [{"content": "from functools import partial\nfrom ignite.metrics import EpochMetric\n\n\ndef roc_auc_compute_fn(y_preds, y_targets, activation=None):\n try:\n from sklearn.metrics import roc_auc_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n if activation is not None:\n y_preds = activation(y_preds)\n y_pred = y_preds.numpy()\n return roc_auc_score(y_true, y_pred)\n\n\nclass ROC_AUC(EpochMetric):\n \"\"\"Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC)\n accumulating predictions and the ground-truth during an epoch and applying\n `sklearn.metrics.roc_auc_score <http://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_ .\n\n Args:\n activation (callable, optional): optional function to apply on prediction tensors,\n e.g. `activation=torch.sigmoid` to transform logits.\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n\n \"\"\"\n def __init__(self, activation=None, output_transform=lambda x: x):\n super(ROC_AUC, self).__init__(partial(roc_auc_compute_fn, activation=activation),\n output_transform=output_transform)\n", "path": "ignite/contrib/metrics/roc_auc.py"}, {"content": "from functools import partial\nfrom ignite.metrics import EpochMetric\n\n\ndef average_precision_compute_fn(y_preds, y_targets, activation=None):\n try:\n from sklearn.metrics import average_precision_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n if activation is not None:\n y_preds = activation(y_preds)\n y_pred = y_preds.numpy()\n return average_precision_score(y_true, y_pred)\n\n\nclass AveragePrecision(EpochMetric):\n \"\"\"Computes Average Precision accumulating predictions and the ground-truth during an epoch\n and applying `sklearn.metrics.average_precision_score <http://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .\n\n Args:\n activation (callable, optional): optional function to apply on prediction tensors,\n e.g. `activation=torch.sigmoid` to transform logits.\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n\n \"\"\"\n def __init__(self, activation=None, output_transform=lambda x: x):\n super(AveragePrecision, self).__init__(partial(average_precision_compute_fn, activation=activation),\n output_transform=output_transform)\n", "path": "ignite/contrib/metrics/average_precision.py"}]}
1,540
1,013
gh_patches_debug_21131
rasdani/github-patches
git_diff
pypi__warehouse-2023
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Search by topic broken ? Browsing packages, then choosing the Internet / WWW9HTTP / Browser topics gives no result: https://pypi.org/search/?q=&o=&c=Topic+%3A%3A+Internet+%3A%3A+WWW%2FHTTP+%3A%3A+Browsers There should be at least the [mechanoid package](https://pypi.org/project/mechanoid/) Using firefox 50.1:0 on Ubuntu 16.04 </issue> <code> [start of warehouse/cli/search/reindex.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import binascii 14 import os 15 16 import click 17 18 from elasticsearch.helpers import parallel_bulk 19 from sqlalchemy.orm import lazyload, joinedload, load_only 20 21 from warehouse.cli.search import search 22 from warehouse.db import Session 23 from warehouse.packaging.models import Release, Project 24 from warehouse.packaging.search import Project as ProjectDocType 25 from warehouse.search import get_index 26 from warehouse.utils.db import windowed_query 27 28 29 def _project_docs(db): 30 releases = ( 31 db.query(Release) 32 .options(load_only( 33 "summary", "description", "author", 34 "author_email", "maintainer", "maintainer_email", 35 "home_page", "download_url", "keywords", "platform", 36 "created")) 37 .options(lazyload("*"), 38 (joinedload(Release.project) 39 .load_only("normalized_name", "name") 40 .joinedload(Project.releases) 41 .load_only("version", "is_prerelease")), 42 joinedload(Release._classifiers).load_only("classifier")) 43 .distinct(Release.name) 44 .order_by(Release.name, Release._pypi_ordering.desc()) 45 ) 46 for release in windowed_query(releases, Release.name, 1000): 47 p = ProjectDocType.from_db(release) 48 p.full_clean() 49 yield p.to_dict(include_meta=True) 50 51 52 @search.command() 53 @click.pass_obj 54 def reindex(config, **kwargs): 55 """ 56 Recreate the Search Index. 57 """ 58 client = config.registry["elasticsearch.client"] 59 db = Session(bind=config.registry["sqlalchemy.engine"]) 60 number_of_replicas = config.registry.get("elasticsearch.replicas", 0) 61 refresh_interval = config.registry.get("elasticsearch.interval", "1s") 62 63 # We use a randomly named index so that we can do a zero downtime reindex. 64 # Essentially we'll use a randomly named index which we will use until all 65 # of the data has been reindexed, at which point we'll point an alias at 66 # our randomly named index, and then delete the old randomly named index. 67 68 # Create the new index and associate all of our doc types with it. 69 index_base = config.registry["elasticsearch.index"] 70 random_token = binascii.hexlify(os.urandom(5)).decode("ascii") 71 new_index_name = "{}-{}".format(index_base, random_token) 72 doc_types = config.registry.get("search.doc_types", set()) 73 74 # Create the new index with zero replicas and index refreshes disabled 75 # while we are bulk indexing. 76 new_index = get_index( 77 new_index_name, 78 doc_types, 79 using=client, 80 shards=config.registry.get("elasticsearch.shards", 1), 81 replicas=0, 82 interval="-1", 83 ) 84 85 # From this point on, if any error occurs, we want to be able to delete our 86 # in progress index. 87 try: 88 db.execute("SET statement_timeout = '600s'") 89 90 for _ in parallel_bulk(client, _project_docs(db)): 91 pass 92 except: 93 new_index.delete() 94 raise 95 finally: 96 db.rollback() 97 db.close() 98 99 # Now that we've finished indexing all of our data we can optimize it and 100 # update the replicas and refresh intervals. 101 client.indices.forcemerge(index=new_index_name) 102 client.indices.put_settings( 103 index=new_index_name, 104 body={ 105 "index": { 106 "number_of_replicas": number_of_replicas, 107 "refresh_interval": refresh_interval, 108 } 109 } 110 ) 111 112 # Point the alias at our new randomly named index and delete the old index. 113 if client.indices.exists_alias(name=index_base): 114 to_delete = set() 115 actions = [] 116 for name in client.indices.get_alias(name=index_base): 117 to_delete.add(name) 118 actions.append({"remove": {"index": name, "alias": index_base}}) 119 actions.append({"add": {"index": new_index_name, "alias": index_base}}) 120 client.indices.update_aliases({"actions": actions}) 121 client.indices.delete(",".join(to_delete)) 122 else: 123 client.indices.put_alias(name=index_base, index=new_index_name) 124 [end of warehouse/cli/search/reindex.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/cli/search/reindex.py b/warehouse/cli/search/reindex.py --- a/warehouse/cli/search/reindex.py +++ b/warehouse/cli/search/reindex.py @@ -70,6 +70,7 @@ random_token = binascii.hexlify(os.urandom(5)).decode("ascii") new_index_name = "{}-{}".format(index_base, random_token) doc_types = config.registry.get("search.doc_types", set()) + shards = config.registry.get("elasticsearch.shards", 1) # Create the new index with zero replicas and index refreshes disabled # while we are bulk indexing. @@ -77,10 +78,11 @@ new_index_name, doc_types, using=client, - shards=config.registry.get("elasticsearch.shards", 1), + shards=shards, replicas=0, interval="-1", ) + new_index.create(wait_for_active_shards=shards) # From this point on, if any error occurs, we want to be able to delete our # in progress index.
{"golden_diff": "diff --git a/warehouse/cli/search/reindex.py b/warehouse/cli/search/reindex.py\n--- a/warehouse/cli/search/reindex.py\n+++ b/warehouse/cli/search/reindex.py\n@@ -70,6 +70,7 @@\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = config.registry.get(\"search.doc_types\", set())\n+ shards = config.registry.get(\"elasticsearch.shards\", 1)\n \n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n@@ -77,10 +78,11 @@\n new_index_name,\n doc_types,\n using=client,\n- shards=config.registry.get(\"elasticsearch.shards\", 1),\n+ shards=shards,\n replicas=0,\n interval=\"-1\",\n )\n+ new_index.create(wait_for_active_shards=shards)\n \n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n", "issue": "Search by topic broken ?\nBrowsing packages, then choosing the Internet / WWW9HTTP / Browser topics gives no result:\r\n\r\nhttps://pypi.org/search/?q=&o=&c=Topic+%3A%3A+Internet+%3A%3A+WWW%2FHTTP+%3A%3A+Browsers\r\n\r\nThere should be at least the [mechanoid package](https://pypi.org/project/mechanoid/)\r\n\r\nUsing firefox 50.1:0 on Ubuntu 16.04\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport os\n\nimport click\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom sqlalchemy.orm import lazyload, joinedload, load_only\n\nfrom warehouse.cli.search import search\nfrom warehouse.db import Session\nfrom warehouse.packaging.models import Release, Project\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search import get_index\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n releases = (\n db.query(Release)\n .options(load_only(\n \"summary\", \"description\", \"author\",\n \"author_email\", \"maintainer\", \"maintainer_email\",\n \"home_page\", \"download_url\", \"keywords\", \"platform\",\n \"created\"))\n .options(lazyload(\"*\"),\n (joinedload(Release.project)\n .load_only(\"normalized_name\", \"name\")\n .joinedload(Project.releases)\n .load_only(\"version\", \"is_prerelease\")),\n joinedload(Release._classifiers).load_only(\"classifier\"))\n .distinct(Release.name)\n .order_by(Release.name, Release._pypi_ordering.desc())\n )\n for release in windowed_query(releases, Release.name, 1000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n yield p.to_dict(include_meta=True)\n\n\[email protected]()\[email protected]_obj\ndef reindex(config, **kwargs):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n client = config.registry[\"elasticsearch.client\"]\n db = Session(bind=config.registry[\"sqlalchemy.engine\"])\n number_of_replicas = config.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = config.registry.get(\"elasticsearch.interval\", \"1s\")\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = config.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = config.registry.get(\"search.doc_types\", set())\n\n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=config.registry.get(\"elasticsearch.shards\", 1),\n replicas=0,\n interval=\"-1\",\n )\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(db)):\n pass\n except:\n new_index.delete()\n raise\n finally:\n db.rollback()\n db.close()\n\n # Now that we've finished indexing all of our data we can optimize it and\n # update the replicas and refresh intervals.\n client.indices.forcemerge(index=new_index_name)\n client.indices.put_settings(\n index=new_index_name,\n body={\n \"index\": {\n \"number_of_replicas\": number_of_replicas,\n \"refresh_interval\": refresh_interval,\n }\n }\n )\n\n # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/cli/search/reindex.py"}]}
1,915
244
gh_patches_debug_29581
rasdani/github-patches
git_diff
svthalia__concrexit-2709
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Food API: AttributeError: 'Event' object has no attribute 'member_registration' Sentry Issue: [CONCREXIT-KG](https://sentry.io/organizations/thalia/issues/3768057031/?referrer=github_integration) ``` AttributeError: 'Event' object has no attribute 'member_registration' (11 additional frame(s) were not displayed) ... File "rest_framework/serializers.py", line 253, in data self._data = self.to_representation(self.instance) File "rest_framework/serializers.py", line 522, in to_representation ret[field.field_name] = field.to_representation(attribute) File "rest_framework/serializers.py", line 522, in to_representation ret[field.field_name] = field.to_representation(attribute) File "rest_framework/fields.py", line 1838, in to_representation return method(value) File "events/api/v2/serializers/event.py", line 83, in _registration_status if self.context["request"].member and len(instance.member_registration) > 0: ``` </issue> <code> [start of website/pizzas/api/v2/views.py] 1 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope 2 from rest_framework import filters as framework_filters 3 from rest_framework import status 4 from rest_framework.generics import ( 5 CreateAPIView, 6 DestroyAPIView, 7 ListAPIView, 8 RetrieveAPIView, 9 UpdateAPIView, 10 get_object_or_404, 11 ) 12 from rest_framework.response import Response 13 14 from payments.exceptions import PaymentError 15 from payments.services import delete_payment 16 from pizzas.api.v2 import filters 17 from pizzas.api.v2.serializers import ( 18 FoodOrderCreateSerializer, 19 FoodOrderSerializer, 20 FoodOrderUpdateSerializer, 21 ProductSerializer, 22 ) 23 from pizzas.api.v2.serializers.food_event import FoodEventSerializer 24 from pizzas.models import FoodEvent, FoodOrder, Product 25 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod 26 27 28 class FoodEventListView(ListAPIView): 29 """Returns an overview of all food events.""" 30 31 serializer_class = FoodEventSerializer 32 queryset = FoodEvent.objects.all() 33 filter_backends = ( 34 framework_filters.OrderingFilter, 35 filters.FoodEventDateFilterBackend, 36 ) 37 ordering_fields = ("start", "end") 38 permission_classes = [ 39 IsAuthenticatedOrTokenHasScope, 40 ] 41 required_scopes = ["food:read"] 42 43 44 class FoodEventDetailView(RetrieveAPIView): 45 """Returns one single food event.""" 46 47 serializer_class = FoodEventSerializer 48 queryset = FoodEvent.objects.all() 49 permission_classes = [ 50 IsAuthenticatedOrTokenHasScope, 51 ] 52 required_scopes = ["food:read"] 53 54 55 class FoodEventProductsListView(ListAPIView): 56 """Returns an overview of all products.""" 57 58 serializer_class = ProductSerializer 59 queryset = Product.available_products.all() 60 filter_backends = (framework_filters.SearchFilter,) 61 search_fields = ("name",) 62 permission_classes = [ 63 IsAuthenticatedOrTokenHasScope, 64 ] 65 required_scopes = ["food:read"] 66 67 68 class FoodEventOrderDetailView( 69 RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView 70 ): 71 """Returns details of a food order.""" 72 73 permission_classes = [ 74 IsAuthenticatedOrTokenHasScopeForMethod, 75 ] 76 required_scopes_per_method = { 77 "GET": ["food:read"], 78 "POST": ["food:order"], 79 "PUT": ["food:order"], 80 "PATCH": ["food:order"], 81 "DELETE": ["food:order"], 82 } 83 84 def get_serializer_class(self): 85 if self.request.method.lower() == "get": 86 return FoodOrderSerializer 87 if self.request.method.lower() == "post": 88 return FoodOrderCreateSerializer 89 return FoodOrderUpdateSerializer 90 91 def get_queryset(self): 92 return FoodOrder.objects.filter(food_event=self.food_event) 93 94 def get_object(self): 95 queryset = self.filter_queryset(self.get_queryset()) 96 obj = get_object_or_404(queryset, member=self.request.member) 97 98 # May raise a permission denied 99 self.check_object_permissions(self.request, obj) 100 101 return obj 102 103 def dispatch(self, request, *args, **kwargs): 104 self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get("pk")) 105 try: 106 return super().dispatch(request, *args, **kwargs) 107 except PaymentError as e: 108 return Response( 109 str(e), 110 status=status.HTTP_403_FORBIDDEN, 111 ) 112 113 def update(self, request, *args, **kwargs): 114 instance = self.get_object() 115 116 if instance.payment: 117 delete_payment(instance, member=request.member, ignore_change_window=True) 118 119 super().update(request, *args, **kwargs) 120 121 return Response( 122 FoodOrderSerializer(instance, context=self.get_serializer_context()).data 123 ) 124 125 def create(self, request, *args, **kwargs): 126 serializer = self.get_serializer(data=request.data) 127 serializer.is_valid(raise_exception=True) 128 self.perform_create(serializer) 129 return Response( 130 FoodOrderSerializer( 131 serializer.instance, context=self.get_serializer_context() 132 ).data, 133 status=status.HTTP_201_CREATED, 134 ) 135 [end of website/pizzas/api/v2/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py --- a/website/pizzas/api/v2/views.py +++ b/website/pizzas/api/v2/views.py @@ -1,3 +1,5 @@ +from django.db.models import Prefetch + from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope from rest_framework import filters as framework_filters from rest_framework import status @@ -11,6 +13,7 @@ ) from rest_framework.response import Response +from events.models.event_registration import EventRegistration from payments.exceptions import PaymentError from payments.services import delete_payment from pizzas.api.v2 import filters @@ -45,12 +48,25 @@ """Returns one single food event.""" serializer_class = FoodEventSerializer - queryset = FoodEvent.objects.all() permission_classes = [ IsAuthenticatedOrTokenHasScope, ] required_scopes = ["food:read"] + def get_queryset(self): + events = FoodEvent.objects.all() + if self.request.member: + events = events.prefetch_related( + Prefetch( + "event__eventregistration_set", + to_attr="member_registration", + queryset=EventRegistration.objects.filter( + member=self.request.member + ).select_properties("queue_position"), + ) + ) + return events + class FoodEventProductsListView(ListAPIView): """Returns an overview of all products."""
{"golden_diff": "diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py\n--- a/website/pizzas/api/v2/views.py\n+++ b/website/pizzas/api/v2/views.py\n@@ -1,3 +1,5 @@\n+from django.db.models import Prefetch\n+\n from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\n from rest_framework import filters as framework_filters\n from rest_framework import status\n@@ -11,6 +13,7 @@\n )\n from rest_framework.response import Response\n \n+from events.models.event_registration import EventRegistration\n from payments.exceptions import PaymentError\n from payments.services import delete_payment\n from pizzas.api.v2 import filters\n@@ -45,12 +48,25 @@\n \"\"\"Returns one single food event.\"\"\"\n \n serializer_class = FoodEventSerializer\n- queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n \n+ def get_queryset(self):\n+ events = FoodEvent.objects.all()\n+ if self.request.member:\n+ events = events.prefetch_related(\n+ Prefetch(\n+ \"event__eventregistration_set\",\n+ to_attr=\"member_registration\",\n+ queryset=EventRegistration.objects.filter(\n+ member=self.request.member\n+ ).select_properties(\"queue_position\"),\n+ )\n+ )\n+ return events\n+\n \n class FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n", "issue": "Food API: AttributeError: 'Event' object has no attribute 'member_registration'\nSentry Issue: [CONCREXIT-KG](https://sentry.io/organizations/thalia/issues/3768057031/?referrer=github_integration)\n\n```\nAttributeError: 'Event' object has no attribute 'member_registration'\n(11 additional frame(s) were not displayed)\n...\n File \"rest_framework/serializers.py\", line 253, in data\n self._data = self.to_representation(self.instance)\n File \"rest_framework/serializers.py\", line 522, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/serializers.py\", line 522, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/fields.py\", line 1838, in to_representation\n return method(value)\n File \"events/api/v2/serializers/event.py\", line 83, in _registration_status\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n```\n", "before_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework import status\nfrom rest_framework.generics import (\n CreateAPIView,\n DestroyAPIView,\n ListAPIView,\n RetrieveAPIView,\n UpdateAPIView,\n get_object_or_404,\n)\nfrom rest_framework.response import Response\n\nfrom payments.exceptions import PaymentError\nfrom payments.services import delete_payment\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n FoodOrderCreateSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n ProductSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, FoodOrder, Product\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n try:\n return super().dispatch(request, *args, **kwargs)\n except PaymentError as e:\n return Response(\n str(e),\n status=status.HTTP_403_FORBIDDEN,\n )\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n\n if instance.payment:\n delete_payment(instance, member=request.member, ignore_change_window=True)\n\n super().update(request, *args, **kwargs)\n\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n FoodOrderSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}]}
1,955
326
gh_patches_debug_19964
rasdani/github-patches
git_diff
matrix-org__synapse-6151
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Uploading a too large file: wrong error code When uploading a too large file, synapse responds with a 413 (OK) but with errcode M_UNKNOWN. According to the [spec](https://matrix.org/docs/spec/client_server/latest#post-matrix-media-r0-upload), it should be "M_TOO_LARGE" Received responseCode: 413 Received responseBody: `{"errcode":"M_UNKNOWN","error":"Upload request body is too large"}` </issue> <code> [start of synapse/rest/media/v1/upload_resource.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2014-2016 OpenMarket Ltd 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import logging 17 18 from twisted.web.server import NOT_DONE_YET 19 20 from synapse.api.errors import SynapseError 21 from synapse.http.server import ( 22 DirectServeResource, 23 respond_with_json, 24 wrap_json_request_handler, 25 ) 26 from synapse.http.servlet import parse_string 27 28 logger = logging.getLogger(__name__) 29 30 31 class UploadResource(DirectServeResource): 32 isLeaf = True 33 34 def __init__(self, hs, media_repo): 35 super().__init__() 36 37 self.media_repo = media_repo 38 self.filepaths = media_repo.filepaths 39 self.store = hs.get_datastore() 40 self.clock = hs.get_clock() 41 self.server_name = hs.hostname 42 self.auth = hs.get_auth() 43 self.max_upload_size = hs.config.max_upload_size 44 self.clock = hs.get_clock() 45 46 def render_OPTIONS(self, request): 47 respond_with_json(request, 200, {}, send_cors=True) 48 return NOT_DONE_YET 49 50 @wrap_json_request_handler 51 async def _async_render_POST(self, request): 52 requester = await self.auth.get_user_by_req(request) 53 # TODO: The checks here are a bit late. The content will have 54 # already been uploaded to a tmp file at this point 55 content_length = request.getHeader(b"Content-Length").decode("ascii") 56 if content_length is None: 57 raise SynapseError(msg="Request must specify a Content-Length", code=400) 58 if int(content_length) > self.max_upload_size: 59 raise SynapseError(msg="Upload request body is too large", code=413) 60 61 upload_name = parse_string(request, b"filename", encoding=None) 62 if upload_name: 63 try: 64 upload_name = upload_name.decode("utf8") 65 except UnicodeDecodeError: 66 raise SynapseError( 67 msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400 68 ) 69 70 headers = request.requestHeaders 71 72 if headers.hasHeader(b"Content-Type"): 73 media_type = headers.getRawHeaders(b"Content-Type")[0].decode("ascii") 74 else: 75 raise SynapseError(msg="Upload request missing 'Content-Type'", code=400) 76 77 # if headers.hasHeader(b"Content-Disposition"): 78 # disposition = headers.getRawHeaders(b"Content-Disposition")[0] 79 # TODO(markjh): parse content-dispostion 80 81 content_uri = await self.media_repo.create_content( 82 media_type, upload_name, request.content, content_length, requester.user 83 ) 84 85 logger.info("Uploaded content with URI %r", content_uri) 86 87 respond_with_json(request, 200, {"content_uri": content_uri}, send_cors=True) 88 [end of synapse/rest/media/v1/upload_resource.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -17,7 +17,7 @@ from twisted.web.server import NOT_DONE_YET -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError from synapse.http.server import ( DirectServeResource, respond_with_json, @@ -56,7 +56,11 @@ if content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) if int(content_length) > self.max_upload_size: - raise SynapseError(msg="Upload request body is too large", code=413) + raise SynapseError( + msg="Upload request body is too large", + code=413, + errcode=Codes.TOO_LARGE, + ) upload_name = parse_string(request, b"filename", encoding=None) if upload_name:
{"golden_diff": "diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py\n--- a/synapse/rest/media/v1/upload_resource.py\n+++ b/synapse/rest/media/v1/upload_resource.py\n@@ -17,7 +17,7 @@\n \n from twisted.web.server import NOT_DONE_YET\n \n-from synapse.api.errors import SynapseError\n+from synapse.api.errors import Codes, SynapseError\n from synapse.http.server import (\n DirectServeResource,\n respond_with_json,\n@@ -56,7 +56,11 @@\n if content_length is None:\n raise SynapseError(msg=\"Request must specify a Content-Length\", code=400)\n if int(content_length) > self.max_upload_size:\n- raise SynapseError(msg=\"Upload request body is too large\", code=413)\n+ raise SynapseError(\n+ msg=\"Upload request body is too large\",\n+ code=413,\n+ errcode=Codes.TOO_LARGE,\n+ )\n \n upload_name = parse_string(request, b\"filename\", encoding=None)\n if upload_name:\n", "issue": "Uploading a too large file: wrong error code\nWhen uploading a too large file, synapse responds with a 413 (OK) but with errcode M_UNKNOWN. According to the [spec](https://matrix.org/docs/spec/client_server/latest#post-matrix-media-r0-upload), it should be \"M_TOO_LARGE\"\r\n\r\nReceived responseCode: 413\r\nReceived responseBody:\r\n`{\"errcode\":\"M_UNKNOWN\",\"error\":\"Upload request body is too large\"}`\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom twisted.web.server import NOT_DONE_YET\n\nfrom synapse.api.errors import SynapseError\nfrom synapse.http.server import (\n DirectServeResource,\n respond_with_json,\n wrap_json_request_handler,\n)\nfrom synapse.http.servlet import parse_string\n\nlogger = logging.getLogger(__name__)\n\n\nclass UploadResource(DirectServeResource):\n isLeaf = True\n\n def __init__(self, hs, media_repo):\n super().__init__()\n\n self.media_repo = media_repo\n self.filepaths = media_repo.filepaths\n self.store = hs.get_datastore()\n self.clock = hs.get_clock()\n self.server_name = hs.hostname\n self.auth = hs.get_auth()\n self.max_upload_size = hs.config.max_upload_size\n self.clock = hs.get_clock()\n\n def render_OPTIONS(self, request):\n respond_with_json(request, 200, {}, send_cors=True)\n return NOT_DONE_YET\n\n @wrap_json_request_handler\n async def _async_render_POST(self, request):\n requester = await self.auth.get_user_by_req(request)\n # TODO: The checks here are a bit late. The content will have\n # already been uploaded to a tmp file at this point\n content_length = request.getHeader(b\"Content-Length\").decode(\"ascii\")\n if content_length is None:\n raise SynapseError(msg=\"Request must specify a Content-Length\", code=400)\n if int(content_length) > self.max_upload_size:\n raise SynapseError(msg=\"Upload request body is too large\", code=413)\n\n upload_name = parse_string(request, b\"filename\", encoding=None)\n if upload_name:\n try:\n upload_name = upload_name.decode(\"utf8\")\n except UnicodeDecodeError:\n raise SynapseError(\n msg=\"Invalid UTF-8 filename parameter: %r\" % (upload_name), code=400\n )\n\n headers = request.requestHeaders\n\n if headers.hasHeader(b\"Content-Type\"):\n media_type = headers.getRawHeaders(b\"Content-Type\")[0].decode(\"ascii\")\n else:\n raise SynapseError(msg=\"Upload request missing 'Content-Type'\", code=400)\n\n # if headers.hasHeader(b\"Content-Disposition\"):\n # disposition = headers.getRawHeaders(b\"Content-Disposition\")[0]\n # TODO(markjh): parse content-dispostion\n\n content_uri = await self.media_repo.create_content(\n media_type, upload_name, request.content, content_length, requester.user\n )\n\n logger.info(\"Uploaded content with URI %r\", content_uri)\n\n respond_with_json(request, 200, {\"content_uri\": content_uri}, send_cors=True)\n", "path": "synapse/rest/media/v1/upload_resource.py"}]}
1,539
249
gh_patches_debug_5760
rasdani/github-patches
git_diff
NVIDIA__NVFlare-363
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Users are not warned when running poc command </issue> <code> [start of nvflare/lighter/poc.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import argparse 16 import os 17 import pathlib 18 import shutil 19 20 21 def clone_client(num_clients: int): 22 current_path = os.getcwd() 23 poc_folder = os.path.join(current_path, "poc") 24 src_folder = os.path.join(poc_folder, "client") 25 for index in range(1, num_clients + 1): 26 dst_folder = os.path.join(poc_folder, f"site-{index}") 27 shutil.copytree(src_folder, dst_folder) 28 start_sh = open(os.path.join(dst_folder, "startup", "start.sh"), "rt") 29 content = start_sh.read() 30 start_sh.close() 31 content = content.replace("NNN", f"{index}") 32 with open(os.path.join(dst_folder, "startup", "start.sh"), "wt") as f: 33 f.write(content) 34 shutil.rmtree(src_folder) 35 36 37 def main(): 38 parser = argparse.ArgumentParser() 39 parser.add_argument("-n", "--num_clients", type=int, default=1, help="number of client folders to create") 40 41 args = parser.parse_args() 42 43 file_dir_path = pathlib.Path(__file__).parent.absolute() 44 poc_zip_path = file_dir_path.parent / "poc.zip" 45 poc_folder_path = file_dir_path.parent / "poc" 46 answer = input("This will delete poc folder in current directory and create a new one. Is it OK to proceed? (y/N) ") 47 if answer.strip().upper() == "Y": 48 dest_poc_folder = os.path.join(os.getcwd(), "poc") 49 shutil.rmtree(dest_poc_folder, ignore_errors=True) 50 try: 51 shutil.unpack_archive(poc_zip_path) 52 except shutil.ReadError: 53 print(f"poc.zip not found at {poc_zip_path}, try to use template poc folder") 54 try: 55 shutil.copytree(poc_folder_path, dest_poc_folder) 56 except BaseException: 57 print(f"Unable to copy poc folder from {poc_folder_path}. Exit") 58 exit(1) 59 for root, dirs, files in os.walk(dest_poc_folder): 60 for file in files: 61 if file.endswith(".sh"): 62 os.chmod(os.path.join(root, file), 0o755) 63 clone_client(args.num_clients) 64 print("Successfully creating poc folder. Please read poc/Readme.rst for user guide.") 65 66 67 if __name__ == "__main__": 68 main() 69 [end of nvflare/lighter/poc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/lighter/poc.py b/nvflare/lighter/poc.py --- a/nvflare/lighter/poc.py +++ b/nvflare/lighter/poc.py @@ -62,6 +62,7 @@ os.chmod(os.path.join(root, file), 0o755) clone_client(args.num_clients) print("Successfully creating poc folder. Please read poc/Readme.rst for user guide.") + print("\n\nWARNING:\n******* Files generated by this poc command are NOT intended for production environments.") if __name__ == "__main__":
{"golden_diff": "diff --git a/nvflare/lighter/poc.py b/nvflare/lighter/poc.py\n--- a/nvflare/lighter/poc.py\n+++ b/nvflare/lighter/poc.py\n@@ -62,6 +62,7 @@\n os.chmod(os.path.join(root, file), 0o755)\n clone_client(args.num_clients)\n print(\"Successfully creating poc folder. Please read poc/Readme.rst for user guide.\")\n+ print(\"\\n\\nWARNING:\\n******* Files generated by this poc command are NOT intended for production environments.\")\n \n \n if __name__ == \"__main__\":\n", "issue": "Users are not warned when running poc command\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport pathlib\nimport shutil\n\n\ndef clone_client(num_clients: int):\n current_path = os.getcwd()\n poc_folder = os.path.join(current_path, \"poc\")\n src_folder = os.path.join(poc_folder, \"client\")\n for index in range(1, num_clients + 1):\n dst_folder = os.path.join(poc_folder, f\"site-{index}\")\n shutil.copytree(src_folder, dst_folder)\n start_sh = open(os.path.join(dst_folder, \"startup\", \"start.sh\"), \"rt\")\n content = start_sh.read()\n start_sh.close()\n content = content.replace(\"NNN\", f\"{index}\")\n with open(os.path.join(dst_folder, \"startup\", \"start.sh\"), \"wt\") as f:\n f.write(content)\n shutil.rmtree(src_folder)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--num_clients\", type=int, default=1, help=\"number of client folders to create\")\n\n args = parser.parse_args()\n\n file_dir_path = pathlib.Path(__file__).parent.absolute()\n poc_zip_path = file_dir_path.parent / \"poc.zip\"\n poc_folder_path = file_dir_path.parent / \"poc\"\n answer = input(\"This will delete poc folder in current directory and create a new one. Is it OK to proceed? (y/N) \")\n if answer.strip().upper() == \"Y\":\n dest_poc_folder = os.path.join(os.getcwd(), \"poc\")\n shutil.rmtree(dest_poc_folder, ignore_errors=True)\n try:\n shutil.unpack_archive(poc_zip_path)\n except shutil.ReadError:\n print(f\"poc.zip not found at {poc_zip_path}, try to use template poc folder\")\n try:\n shutil.copytree(poc_folder_path, dest_poc_folder)\n except BaseException:\n print(f\"Unable to copy poc folder from {poc_folder_path}. Exit\")\n exit(1)\n for root, dirs, files in os.walk(dest_poc_folder):\n for file in files:\n if file.endswith(\".sh\"):\n os.chmod(os.path.join(root, file), 0o755)\n clone_client(args.num_clients)\n print(\"Successfully creating poc folder. Please read poc/Readme.rst for user guide.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/lighter/poc.py"}]}
1,332
135
gh_patches_debug_14571
rasdani/github-patches
git_diff
wagtail__wagtail-1811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> wagtailsearch.views.frontend needs updating to use non-deprecated search method Currently using Page.search, which is deprecated in Wagtail 1.2 - should be updated to use the QuerySet-based mechanism instead. https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailsearch/views/frontend.py#L41 </issue> <code> [start of wagtail/wagtailsearch/views/frontend.py] 1 from django.conf import settings 2 from django.shortcuts import render 3 from django.http import JsonResponse 4 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger 5 6 from wagtail.wagtailcore import models 7 from wagtail.wagtailsearch.models import Query 8 9 10 def search( 11 request, 12 template=None, 13 template_ajax=None, 14 results_per_page=10, 15 use_json=False, 16 json_attrs=['title', 'url'], 17 show_unpublished=False, 18 search_title_only=False, 19 extra_filters={}, 20 path=None): 21 22 # Get default templates 23 if template is None: 24 if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'): 25 template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE 26 else: 27 template = 'wagtailsearch/search_results.html' 28 29 if template_ajax is None: 30 if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'): 31 template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX 32 else: 33 template_ajax = template 34 35 # Get query string and page from GET paramters 36 query_string = request.GET.get('q', '') 37 page = request.GET.get('page', request.GET.get('p', 1)) 38 39 # Search 40 if query_string != '': 41 search_results = models.Page.search( 42 query_string, 43 show_unpublished=show_unpublished, 44 search_title_only=search_title_only, 45 extra_filters=extra_filters, 46 path=path if path else request.site.root_page.path 47 ) 48 49 # Get query object 50 query = Query.get(query_string) 51 52 # Add hit 53 query.add_hit() 54 55 # Pagination 56 paginator = Paginator(search_results, results_per_page) 57 try: 58 search_results = paginator.page(page) 59 except PageNotAnInteger: 60 search_results = paginator.page(1) 61 except EmptyPage: 62 search_results = paginator.page(paginator.num_pages) 63 else: 64 query = None 65 search_results = None 66 67 if use_json: 68 # Return a json response 69 if search_results: 70 search_results_json = [] 71 for result in search_results: 72 result_specific = result.specific 73 74 search_results_json.append(dict( 75 (attr, getattr(result_specific, attr)) 76 for attr in json_attrs 77 if hasattr(result_specific, attr) 78 )) 79 80 return JsonResponse(search_results_json, safe=False) 81 else: 82 return JsonResponse([], safe=False) 83 else: # Render a template 84 if request.is_ajax() and template_ajax: 85 template = template_ajax 86 87 return render(request, template, dict( 88 query_string=query_string, 89 search_results=search_results, 90 is_ajax=request.is_ajax(), 91 query=query 92 )) 93 [end of wagtail/wagtailsearch/views/frontend.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailsearch/views/frontend.py b/wagtail/wagtailsearch/views/frontend.py --- a/wagtail/wagtailsearch/views/frontend.py +++ b/wagtail/wagtailsearch/views/frontend.py @@ -38,13 +38,18 @@ # Search if query_string != '': - search_results = models.Page.search( - query_string, - show_unpublished=show_unpublished, - search_title_only=search_title_only, - extra_filters=extra_filters, - path=path if path else request.site.root_page.path - ) + pages = models.Page.objects.filter(path__startswith=(path or request.site.root_page.path)) + + if not show_unpublished: + pages = pages.live() + + if extra_filters: + pages = pages.filter(**extra_filters) + + if search_title_only: + search_results = pages.search(query_string, fields=['title']) + else: + search_results = pages.search(query_string) # Get query object query = Query.get(query_string)
{"golden_diff": "diff --git a/wagtail/wagtailsearch/views/frontend.py b/wagtail/wagtailsearch/views/frontend.py\n--- a/wagtail/wagtailsearch/views/frontend.py\n+++ b/wagtail/wagtailsearch/views/frontend.py\n@@ -38,13 +38,18 @@\n \n # Search\n if query_string != '':\n- search_results = models.Page.search(\n- query_string,\n- show_unpublished=show_unpublished,\n- search_title_only=search_title_only,\n- extra_filters=extra_filters,\n- path=path if path else request.site.root_page.path\n- )\n+ pages = models.Page.objects.filter(path__startswith=(path or request.site.root_page.path))\n+\n+ if not show_unpublished:\n+ pages = pages.live()\n+\n+ if extra_filters:\n+ pages = pages.filter(**extra_filters)\n+\n+ if search_title_only:\n+ search_results = pages.search(query_string, fields=['title'])\n+ else:\n+ search_results = pages.search(query_string)\n \n # Get query object\n query = Query.get(query_string)\n", "issue": "wagtailsearch.views.frontend needs updating to use non-deprecated search method\nCurrently using Page.search, which is deprecated in Wagtail 1.2 - should be updated to use the QuerySet-based mechanism instead.\n\nhttps://github.com/torchbox/wagtail/blob/master/wagtail/wagtailsearch/views/frontend.py#L41\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore import models\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(\n request,\n template=None,\n template_ajax=None,\n results_per_page=10,\n use_json=False,\n json_attrs=['title', 'url'],\n show_unpublished=False,\n search_title_only=False,\n extra_filters={},\n path=None):\n\n # Get default templates\n if template is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):\n template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE\n else:\n template = 'wagtailsearch/search_results.html'\n\n if template_ajax is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):\n template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX\n else:\n template_ajax = template\n\n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n page = request.GET.get('page', request.GET.get('p', 1))\n\n # Search\n if query_string != '':\n search_results = models.Page.search(\n query_string,\n show_unpublished=show_unpublished,\n search_title_only=search_title_only,\n extra_filters=extra_filters,\n path=path if path else request.site.root_page.path\n )\n\n # Get query object\n query = Query.get(query_string)\n\n # Add hit\n query.add_hit()\n\n # Pagination\n paginator = Paginator(search_results, results_per_page)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n else:\n query = None\n search_results = None\n\n if use_json:\n # Return a json response\n if search_results:\n search_results_json = []\n for result in search_results:\n result_specific = result.specific\n\n search_results_json.append(dict(\n (attr, getattr(result_specific, attr))\n for attr in json_attrs\n if hasattr(result_specific, attr)\n ))\n\n return JsonResponse(search_results_json, safe=False)\n else:\n return JsonResponse([], safe=False)\n else: # Render a template\n if request.is_ajax() and template_ajax:\n template = template_ajax\n\n return render(request, template, dict(\n query_string=query_string,\n search_results=search_results,\n is_ajax=request.is_ajax(),\n query=query\n ))\n", "path": "wagtail/wagtailsearch/views/frontend.py"}]}
1,364
239
gh_patches_debug_61667
rasdani/github-patches
git_diff
pallets__click-2714
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docs wrongly links PRs and Issues to flask ![image](https://github.com/pallets/click/assets/13086194/cc68ea55-7c69-4e24-a2dd-f296d54a9c61) Environment: - Python version: N/A - Click version: N/A Docs wrongly links PRs and Issues to flask ![image](https://github.com/pallets/click/assets/13086194/cc68ea55-7c69-4e24-a2dd-f296d54a9c61) Environment: - Python version: N/A - Click version: N/A </issue> <code> [start of docs/conf.py] 1 from pallets_sphinx_themes import get_version 2 from pallets_sphinx_themes import ProjectLink 3 4 # Project -------------------------------------------------------------- 5 6 project = "Click" 7 copyright = "2014 Pallets" 8 author = "Pallets" 9 release, version = get_version("Click") 10 11 # General -------------------------------------------------------------- 12 13 default_role = "code" 14 extensions = [ 15 "sphinx.ext.autodoc", 16 "sphinx.ext.extlinks", 17 "sphinx.ext.intersphinx", 18 "sphinx_tabs.tabs", 19 "sphinxcontrib.log_cabinet", 20 "pallets_sphinx_themes", 21 ] 22 autodoc_member_order = "bysource" 23 autodoc_typehints = "description" 24 autodoc_preserve_defaults = True 25 extlinks = { 26 "issue": ("https://github.com/pallets/flask/issues/%s", "#%s"), 27 "pr": ("https://github.com/pallets/flask/pull/%s", "#%s"), 28 } 29 intersphinx_mapping = { 30 "python": ("https://docs.python.org/3/", None), 31 } 32 33 # HTML ----------------------------------------------------------------- 34 35 html_theme = "click" 36 html_theme_options = {"index_sidebar_logo": False} 37 html_context = { 38 "project_links": [ 39 ProjectLink("Donate", "https://palletsprojects.com/donate"), 40 ProjectLink("PyPI Releases", "https://pypi.org/project/click/"), 41 ProjectLink("Source Code", "https://github.com/pallets/click/"), 42 ProjectLink("Issue Tracker", "https://github.com/pallets/click/issues/"), 43 ProjectLink("Chat", "https://discord.gg/pallets"), 44 ] 45 } 46 html_sidebars = { 47 "index": ["project.html", "localtoc.html", "searchbox.html", "ethicalads.html"], 48 "**": ["localtoc.html", "relations.html", "searchbox.html", "ethicalads.html"], 49 } 50 singlehtml_sidebars = {"index": ["project.html", "localtoc.html", "ethicalads.html"]} 51 html_static_path = ["_static"] 52 html_favicon = "_static/click-icon.png" 53 html_logo = "_static/click-logo-sidebar.png" 54 html_title = f"Click Documentation ({version})" 55 html_show_sourcelink = False 56 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -23,8 +23,8 @@ autodoc_typehints = "description" autodoc_preserve_defaults = True extlinks = { - "issue": ("https://github.com/pallets/flask/issues/%s", "#%s"), - "pr": ("https://github.com/pallets/flask/pull/%s", "#%s"), + "issue": ("https://github.com/pallets/click/issues/%s", "#%s"), + "pr": ("https://github.com/pallets/click/pull/%s", "#%s"), } intersphinx_mapping = { "python": ("https://docs.python.org/3/", None),
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -23,8 +23,8 @@\n autodoc_typehints = \"description\"\n autodoc_preserve_defaults = True\n extlinks = {\n- \"issue\": (\"https://github.com/pallets/flask/issues/%s\", \"#%s\"),\n- \"pr\": (\"https://github.com/pallets/flask/pull/%s\", \"#%s\"),\n+ \"issue\": (\"https://github.com/pallets/click/issues/%s\", \"#%s\"),\n+ \"pr\": (\"https://github.com/pallets/click/pull/%s\", \"#%s\"),\n }\n intersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n", "issue": "Docs wrongly links PRs and Issues to flask\n![image](https://github.com/pallets/click/assets/13086194/cc68ea55-7c69-4e24-a2dd-f296d54a9c61)\r\n\r\nEnvironment:\r\n\r\n- Python version: N/A\r\n- Click version: N/A\r\n\nDocs wrongly links PRs and Issues to flask\n![image](https://github.com/pallets/click/assets/13086194/cc68ea55-7c69-4e24-a2dd-f296d54a9c61)\r\n\r\nEnvironment:\r\n\r\n- Python version: N/A\r\n- Click version: N/A\r\n\n", "before_files": [{"content": "from pallets_sphinx_themes import get_version\nfrom pallets_sphinx_themes import ProjectLink\n\n# Project --------------------------------------------------------------\n\nproject = \"Click\"\ncopyright = \"2014 Pallets\"\nauthor = \"Pallets\"\nrelease, version = get_version(\"Click\")\n\n# General --------------------------------------------------------------\n\ndefault_role = \"code\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.intersphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinxcontrib.log_cabinet\",\n \"pallets_sphinx_themes\",\n]\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\nautodoc_preserve_defaults = True\nextlinks = {\n \"issue\": (\"https://github.com/pallets/flask/issues/%s\", \"#%s\"),\n \"pr\": (\"https://github.com/pallets/flask/pull/%s\", \"#%s\"),\n}\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n}\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = \"click\"\nhtml_theme_options = {\"index_sidebar_logo\": False}\nhtml_context = {\n \"project_links\": [\n ProjectLink(\"Donate\", \"https://palletsprojects.com/donate\"),\n ProjectLink(\"PyPI Releases\", \"https://pypi.org/project/click/\"),\n ProjectLink(\"Source Code\", \"https://github.com/pallets/click/\"),\n ProjectLink(\"Issue Tracker\", \"https://github.com/pallets/click/issues/\"),\n ProjectLink(\"Chat\", \"https://discord.gg/pallets\"),\n ]\n}\nhtml_sidebars = {\n \"index\": [\"project.html\", \"localtoc.html\", \"searchbox.html\", \"ethicalads.html\"],\n \"**\": [\"localtoc.html\", \"relations.html\", \"searchbox.html\", \"ethicalads.html\"],\n}\nsinglehtml_sidebars = {\"index\": [\"project.html\", \"localtoc.html\", \"ethicalads.html\"]}\nhtml_static_path = [\"_static\"]\nhtml_favicon = \"_static/click-icon.png\"\nhtml_logo = \"_static/click-logo-sidebar.png\"\nhtml_title = f\"Click Documentation ({version})\"\nhtml_show_sourcelink = False\n", "path": "docs/conf.py"}]}
1,273
174
gh_patches_debug_1188
rasdani/github-patches
git_diff
spack__spack-18268
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Installation issue: dbus (missing libsm dependency) <!-- Thanks for taking the time to report this build failure. To proceed with the report please: 1. Title the issue "Installation issue: <name-of-the-package>". 2. Provide the information required below. We encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! --> I am trying to install visit, and I am hitting an error when it tries to install dbus. This appears to be due to dbus depending on libSM (and through that libuuid), but not declaring that dependency in Spack. So in my build of visit, the libuuid dependency is picked up and set to use the spack installed libuuid via some other package visit depends on, but dbus ends up using the system installed libSM, and there is a mismatch between the two. But the dbus package should not be linking against system libSM. ### Steps to reproduce the issue I am trying to install visit, and I am hitting an error when it tries to install dbus. This appears to be spack install [email protected]%[email protected] ^[email protected] eventually aborts with CCLD dbus-run-session /lib/../lib64/libSM.so: undefined reference to `uuid_unparse_lower@UUID_1.0' /lib/../lib64/libSM.so: undefined reference to `uuid_generate@UUID_1.0' collect2: error: ld returned 1 exit status Error appears due to the attempt to link the system /lib64/libSM.so ### Information on your system spack debug report * **Spack:** 0.14.2 * **Python:** 2.7.16 * **Platform:** linux-rhel7-broadwell ### Additional information [spack-build-env.txt](https://github.com/spack/spack/files/5125717/spack-build-env.txt) [spack-build-out.txt](https://github.com/spack/spack/files/5125718/spack-build-out.txt) No maintainers for dbus ### General information <!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. --> - [x ] I have run `spack debug report` and reported the version of Spack/Python/Platform - [x] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers - [x ] I have uploaded the build log and environment files - [ x] I have searched the issues of this repo and believe this is not a duplicate </issue> <code> [start of var/spack/repos/builtin/packages/dbus/package.py] 1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class Dbus(Package): 10 """D-Bus is a message bus system, a simple way for applications to 11 talk to one another. D-Bus supplies both a system daemon (for 12 events such new hardware device printer queue ) and a 13 per-user-login-session daemon (for general IPC needs among user 14 applications). Also, the message bus is built on top of a 15 general one-to-one message passing framework, which can be used 16 by any two applications to communicate directly (without going 17 through the message bus daemon).""" 18 19 homepage = "http://dbus.freedesktop.org/" 20 url = "http://dbus.freedesktop.org/releases/dbus/dbus-1.8.8.tar.gz" 21 22 version('1.12.8', sha256='e2dc99e7338303393b6663a98320aba6a63421bcdaaf571c8022f815e5896eb3') 23 version('1.11.2', sha256='5abc4c57686fa82669ad0039830788f9b03fdc4fff487f0ccf6c9d56ba2645c9') 24 version('1.9.0', sha256='38ebc695b5cbbd239e0f149aa5d5395f0051a0fec1b74f21ff2921b22a31c171') 25 version('1.8.8', sha256='dfab263649a979d0fff64a30cac374891a8e9940350e41f3bbd7679af32bd1fd') 26 version('1.8.6', sha256='eded83ca007b719f32761e60fd8b9ffd0f5796a4caf455b01b5a5ef740ebd23f') 27 version('1.8.4', sha256='3ef63dc8d0111042071ee7f7bafa0650c6ce2d7be957ef0b7ec269495a651ff8') 28 version('1.8.2', sha256='5689f7411165adc953f37974e276a3028db94447c76e8dd92efe910c6d3bae08') 29 30 depends_on('pkgconfig', type='build') 31 depends_on('expat') 32 depends_on('glib') 33 34 def install(self, spec, prefix): 35 configure( 36 "--prefix=%s" % prefix, 37 "--disable-systemd", 38 "--disable-launchd") 39 make() 40 make("install") 41 42 # dbus needs a machine id generated after install 43 dbus_uuidgen = Executable(join_path(prefix.bin, 'dbus-uuidgen')) 44 dbus_uuidgen('--ensure') 45 [end of var/spack/repos/builtin/packages/dbus/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/dbus/package.py b/var/spack/repos/builtin/packages/dbus/package.py --- a/var/spack/repos/builtin/packages/dbus/package.py +++ b/var/spack/repos/builtin/packages/dbus/package.py @@ -30,6 +30,7 @@ depends_on('pkgconfig', type='build') depends_on('expat') depends_on('glib') + depends_on('libsm') def install(self, spec, prefix): configure(
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/dbus/package.py b/var/spack/repos/builtin/packages/dbus/package.py\n--- a/var/spack/repos/builtin/packages/dbus/package.py\n+++ b/var/spack/repos/builtin/packages/dbus/package.py\n@@ -30,6 +30,7 @@\n depends_on('pkgconfig', type='build')\n depends_on('expat')\n depends_on('glib')\n+ depends_on('libsm')\n \n def install(self, spec, prefix):\n configure(\n", "issue": "Installation issue: dbus (missing libsm dependency)\n\r\n<!-- Thanks for taking the time to report this build failure. To proceed with the report please:\r\n\r\n1. Title the issue \"Installation issue: <name-of-the-package>\".\r\n2. Provide the information required below.\r\n\r\nWe encourage you to try, as much as possible, to reduce your problem to the minimal example that still reproduces the issue. That would help us a lot in fixing it quickly and effectively! -->\r\n\r\nI am trying to install visit, and I am hitting an error when it tries to install dbus. This appears\r\nto be due to dbus depending on libSM (and through that libuuid), but not declaring that dependency in Spack. So in my build of visit, the libuuid dependency is picked up and set to use the spack installed libuuid via some other package visit depends on, but dbus ends up using the system installed libSM, and there is a mismatch between the two. But the dbus\r\npackage should not be linking against system libSM.\r\n\r\n### Steps to reproduce the issue\r\nI am trying to install visit, and I am hitting an error when it tries to install dbus. This appears\r\nto be \r\nspack install [email protected]%[email protected] ^[email protected]\r\neventually aborts with\r\n CCLD dbus-run-session\r\n/lib/../lib64/libSM.so: undefined reference to `uuid_unparse_lower@UUID_1.0'\r\n/lib/../lib64/libSM.so: undefined reference to `uuid_generate@UUID_1.0'\r\ncollect2: error: ld returned 1 exit status\r\n\r\nError appears due to the attempt to link the system /lib64/libSM.so\r\n\r\n### Information on your system\r\nspack debug report\r\n* **Spack:** 0.14.2\r\n* **Python:** 2.7.16\r\n* **Platform:** linux-rhel7-broadwell\r\n\r\n### Additional information\r\n[spack-build-env.txt](https://github.com/spack/spack/files/5125717/spack-build-env.txt)\r\n[spack-build-out.txt](https://github.com/spack/spack/files/5125718/spack-build-out.txt)\r\n\r\nNo maintainers for dbus\r\n\r\n### General information\r\n\r\n<!-- These boxes can be checked by replacing [ ] with [x] or by clicking them after submitting the issue. -->\r\n- [x ] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [x] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers\r\n- [x ] I have uploaded the build log and environment files\r\n- [ x] I have searched the issues of this repo and believe this is not a duplicate\r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Dbus(Package):\n \"\"\"D-Bus is a message bus system, a simple way for applications to\n talk to one another. D-Bus supplies both a system daemon (for\n events such new hardware device printer queue ) and a\n per-user-login-session daemon (for general IPC needs among user\n applications). Also, the message bus is built on top of a\n general one-to-one message passing framework, which can be used\n by any two applications to communicate directly (without going\n through the message bus daemon).\"\"\"\n\n homepage = \"http://dbus.freedesktop.org/\"\n url = \"http://dbus.freedesktop.org/releases/dbus/dbus-1.8.8.tar.gz\"\n\n version('1.12.8', sha256='e2dc99e7338303393b6663a98320aba6a63421bcdaaf571c8022f815e5896eb3')\n version('1.11.2', sha256='5abc4c57686fa82669ad0039830788f9b03fdc4fff487f0ccf6c9d56ba2645c9')\n version('1.9.0', sha256='38ebc695b5cbbd239e0f149aa5d5395f0051a0fec1b74f21ff2921b22a31c171')\n version('1.8.8', sha256='dfab263649a979d0fff64a30cac374891a8e9940350e41f3bbd7679af32bd1fd')\n version('1.8.6', sha256='eded83ca007b719f32761e60fd8b9ffd0f5796a4caf455b01b5a5ef740ebd23f')\n version('1.8.4', sha256='3ef63dc8d0111042071ee7f7bafa0650c6ce2d7be957ef0b7ec269495a651ff8')\n version('1.8.2', sha256='5689f7411165adc953f37974e276a3028db94447c76e8dd92efe910c6d3bae08')\n\n depends_on('pkgconfig', type='build')\n depends_on('expat')\n depends_on('glib')\n\n def install(self, spec, prefix):\n configure(\n \"--prefix=%s\" % prefix,\n \"--disable-systemd\",\n \"--disable-launchd\")\n make()\n make(\"install\")\n\n # dbus needs a machine id generated after install\n dbus_uuidgen = Executable(join_path(prefix.bin, 'dbus-uuidgen'))\n dbus_uuidgen('--ensure')\n", "path": "var/spack/repos/builtin/packages/dbus/package.py"}]}
2,039
115
gh_patches_debug_23335
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-2919
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Mod] Bot doesn't record name changes correctly # Other bugs #### What were you trying to do? Check past names using `[p]names` command #### What were you expecting to happen? Get past names #### What actually happened? I didn't get any names, because we're checking for it in wrong event (`on_member_update` instead of `on_user_update`). #### How can we reproduce this issue? 1. Load mod cog 2. Change username 3. Use `[p]names` on yourself. </issue> <code> [start of redbot/cogs/mod/events.py] 1 import logging 2 from datetime import datetime 3 from collections import defaultdict, deque 4 5 import discord 6 from redbot.core import i18n, modlog, commands 7 from redbot.core.utils.mod import is_mod_or_superior 8 from .abc import MixinMeta 9 10 _ = i18n.Translator("Mod", __file__) 11 log = logging.getLogger("red.mod") 12 13 14 class Events(MixinMeta): 15 """ 16 This is a mixin for the core mod cog 17 Has a bunch of things split off to here. 18 """ 19 20 async def check_duplicates(self, message): 21 guild = message.guild 22 author = message.author 23 24 guild_cache = self.cache.get(guild.id, None) 25 if guild_cache is None: 26 repeats = await self.settings.guild(guild).delete_repeats() 27 if repeats == -1: 28 return False 29 guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats)) 30 31 if not message.content: 32 return False 33 34 guild_cache[author].append(message.content) 35 msgs = guild_cache[author] 36 if len(msgs) == msgs.maxlen and len(set(msgs)) == 1: 37 try: 38 await message.delete() 39 return True 40 except discord.HTTPException: 41 pass 42 return False 43 44 async def check_mention_spam(self, message): 45 guild = message.guild 46 author = message.author 47 48 max_mentions = await self.settings.guild(guild).ban_mention_spam() 49 if max_mentions: 50 mentions = set(message.mentions) 51 if len(mentions) >= max_mentions: 52 try: 53 await guild.ban(author, reason=_("Mention spam (Autoban)")) 54 except discord.HTTPException: 55 log.info( 56 "Failed to ban member for mention spam in server {}.".format(guild.id) 57 ) 58 else: 59 try: 60 await modlog.create_case( 61 self.bot, 62 guild, 63 message.created_at, 64 "ban", 65 author, 66 guild.me, 67 _("Mention spam (Autoban)"), 68 until=None, 69 channel=None, 70 ) 71 except RuntimeError as e: 72 print(e) 73 return False 74 return True 75 return False 76 77 @commands.Cog.listener() 78 async def on_message(self, message): 79 author = message.author 80 if message.guild is None or self.bot.user == author: 81 return 82 valid_user = isinstance(author, discord.Member) and not author.bot 83 if not valid_user: 84 return 85 86 # Bots and mods or superior are ignored from the filter 87 mod_or_superior = await is_mod_or_superior(self.bot, obj=author) 88 if mod_or_superior: 89 return 90 # As are anyone configured to be 91 if await self.bot.is_automod_immune(message): 92 return 93 deleted = await self.check_duplicates(message) 94 if not deleted: 95 await self.check_mention_spam(message) 96 97 @commands.Cog.listener() 98 async def on_member_update(self, before: discord.Member, after: discord.Member): 99 if before.name != after.name: 100 async with self.settings.user(before).past_names() as name_list: 101 while None in name_list: # clean out null entries from a bug 102 name_list.remove(None) 103 if after.name in name_list: 104 # Ensure order is maintained without duplicates occuring 105 name_list.remove(after.name) 106 name_list.append(after.name) 107 while len(name_list) > 20: 108 name_list.pop(0) 109 110 if before.nick != after.nick and after.nick is not None: 111 async with self.settings.member(before).past_nicks() as nick_list: 112 while None in nick_list: # clean out null entries from a bug 113 nick_list.remove(None) 114 if after.nick in nick_list: 115 nick_list.remove(after.nick) 116 nick_list.append(after.nick) 117 while len(nick_list) > 20: 118 nick_list.pop(0) 119 [end of redbot/cogs/mod/events.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redbot/cogs/mod/events.py b/redbot/cogs/mod/events.py --- a/redbot/cogs/mod/events.py +++ b/redbot/cogs/mod/events.py @@ -95,7 +95,7 @@ await self.check_mention_spam(message) @commands.Cog.listener() - async def on_member_update(self, before: discord.Member, after: discord.Member): + async def on_user_update(self, before: discord.User, after: discord.User): if before.name != after.name: async with self.settings.user(before).past_names() as name_list: while None in name_list: # clean out null entries from a bug @@ -107,6 +107,8 @@ while len(name_list) > 20: name_list.pop(0) + @commands.Cog.listener() + async def on_member_update(self, before: discord.Member, after: discord.Member): if before.nick != after.nick and after.nick is not None: async with self.settings.member(before).past_nicks() as nick_list: while None in nick_list: # clean out null entries from a bug
{"golden_diff": "diff --git a/redbot/cogs/mod/events.py b/redbot/cogs/mod/events.py\n--- a/redbot/cogs/mod/events.py\n+++ b/redbot/cogs/mod/events.py\n@@ -95,7 +95,7 @@\n await self.check_mention_spam(message)\n \n @commands.Cog.listener()\n- async def on_member_update(self, before: discord.Member, after: discord.Member):\n+ async def on_user_update(self, before: discord.User, after: discord.User):\n if before.name != after.name:\n async with self.settings.user(before).past_names() as name_list:\n while None in name_list: # clean out null entries from a bug\n@@ -107,6 +107,8 @@\n while len(name_list) > 20:\n name_list.pop(0)\n \n+ @commands.Cog.listener()\n+ async def on_member_update(self, before: discord.Member, after: discord.Member):\n if before.nick != after.nick and after.nick is not None:\n async with self.settings.member(before).past_nicks() as nick_list:\n while None in nick_list: # clean out null entries from a bug\n", "issue": "[Mod] Bot doesn't record name changes correctly\n# Other bugs\r\n\r\n#### What were you trying to do?\r\n\r\nCheck past names using `[p]names` command\r\n\r\n#### What were you expecting to happen?\r\n\r\nGet past names\r\n\r\n#### What actually happened?\r\n\r\nI didn't get any names, because we're checking for it in wrong event (`on_member_update` instead of `on_user_update`).\r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Load mod cog\r\n2. Change username\r\n3. Use `[p]names` on yourself.\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\nfrom collections import defaultdict, deque\n\nimport discord\nfrom redbot.core import i18n, modlog, commands\nfrom redbot.core.utils.mod import is_mod_or_superior\nfrom .abc import MixinMeta\n\n_ = i18n.Translator(\"Mod\", __file__)\nlog = logging.getLogger(\"red.mod\")\n\n\nclass Events(MixinMeta):\n \"\"\"\n This is a mixin for the core mod cog\n Has a bunch of things split off to here.\n \"\"\"\n\n async def check_duplicates(self, message):\n guild = message.guild\n author = message.author\n\n guild_cache = self.cache.get(guild.id, None)\n if guild_cache is None:\n repeats = await self.settings.guild(guild).delete_repeats()\n if repeats == -1:\n return False\n guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))\n\n if not message.content:\n return False\n\n guild_cache[author].append(message.content)\n msgs = guild_cache[author]\n if len(msgs) == msgs.maxlen and len(set(msgs)) == 1:\n try:\n await message.delete()\n return True\n except discord.HTTPException:\n pass\n return False\n\n async def check_mention_spam(self, message):\n guild = message.guild\n author = message.author\n\n max_mentions = await self.settings.guild(guild).ban_mention_spam()\n if max_mentions:\n mentions = set(message.mentions)\n if len(mentions) >= max_mentions:\n try:\n await guild.ban(author, reason=_(\"Mention spam (Autoban)\"))\n except discord.HTTPException:\n log.info(\n \"Failed to ban member for mention spam in server {}.\".format(guild.id)\n )\n else:\n try:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"ban\",\n author,\n guild.me,\n _(\"Mention spam (Autoban)\"),\n until=None,\n channel=None,\n )\n except RuntimeError as e:\n print(e)\n return False\n return True\n return False\n\n @commands.Cog.listener()\n async def on_message(self, message):\n author = message.author\n if message.guild is None or self.bot.user == author:\n return\n valid_user = isinstance(author, discord.Member) and not author.bot\n if not valid_user:\n return\n\n # Bots and mods or superior are ignored from the filter\n mod_or_superior = await is_mod_or_superior(self.bot, obj=author)\n if mod_or_superior:\n return\n # As are anyone configured to be\n if await self.bot.is_automod_immune(message):\n return\n deleted = await self.check_duplicates(message)\n if not deleted:\n await self.check_mention_spam(message)\n\n @commands.Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member):\n if before.name != after.name:\n async with self.settings.user(before).past_names() as name_list:\n while None in name_list: # clean out null entries from a bug\n name_list.remove(None)\n if after.name in name_list:\n # Ensure order is maintained without duplicates occuring\n name_list.remove(after.name)\n name_list.append(after.name)\n while len(name_list) > 20:\n name_list.pop(0)\n\n if before.nick != after.nick and after.nick is not None:\n async with self.settings.member(before).past_nicks() as nick_list:\n while None in nick_list: # clean out null entries from a bug\n nick_list.remove(None)\n if after.nick in nick_list:\n nick_list.remove(after.nick)\n nick_list.append(after.nick)\n while len(nick_list) > 20:\n nick_list.pop(0)\n", "path": "redbot/cogs/mod/events.py"}]}
1,751
256
gh_patches_debug_38252
rasdani/github-patches
git_diff
doccano__doccano-1261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No way to restrict text classification labels to exactly one label to assign Most classification tasks require exactly one label for each instance. This is also true for most text classification tasks, for example with sentiment classificaiton, and the possible labels negative, neutral, positive, each instance should receive one of the three labels, assigning e.g. both neutral and positive would make not sense. Yet the text classification task in doccano still does not allow to restrict assignment to a single label, annotators are free to assign as many labels as they want, including all of them! This limits the use of doccano for text classification tasks rather severely. The option to allow for any number of labels (0 to all of them) would still be good to have for multilabel classification tasks (e.g. assigning topics), but that is a much rarer annotation task in general. </issue> <code> [start of app/api/views/annotation.py] 1 from django.shortcuts import get_object_or_404 2 from rest_framework import generics, status 3 from rest_framework.exceptions import ValidationError 4 from rest_framework.permissions import IsAuthenticated 5 from rest_framework.response import Response 6 from rest_framework.views import APIView 7 8 from ..models import Document, Project 9 from ..permissions import (IsAnnotationApprover, IsInProjectOrAdmin, 10 IsOwnAnnotation, IsProjectAdmin) 11 from ..serializers import ApproverSerializer 12 13 14 class AnnotationList(generics.ListCreateAPIView): 15 pagination_class = None 16 permission_classes = [IsAuthenticated & IsInProjectOrAdmin] 17 swagger_schema = None 18 19 def get_serializer_class(self): 20 project = get_object_or_404(Project, pk=self.kwargs['project_id']) 21 self.serializer_class = project.get_annotation_serializer() 22 return self.serializer_class 23 24 def get_queryset(self): 25 project = get_object_or_404(Project, pk=self.kwargs['project_id']) 26 model = project.get_annotation_class() 27 queryset = model.objects.filter(document=self.kwargs['doc_id']) 28 if not project.collaborative_annotation: 29 queryset = queryset.filter(user=self.request.user) 30 return queryset 31 32 def create(self, request, *args, **kwargs): 33 self.check_single_class_classification(self.kwargs['project_id'], self.kwargs['doc_id'], request.user) 34 request.data['document'] = self.kwargs['doc_id'] 35 return super().create(request, args, kwargs) 36 37 def perform_create(self, serializer): 38 serializer.save(document_id=self.kwargs['doc_id'], user=self.request.user) 39 40 def delete(self, request, *args, **kwargs): 41 queryset = self.get_queryset() 42 queryset.all().delete() 43 return Response(status=status.HTTP_204_NO_CONTENT) 44 45 @staticmethod 46 def check_single_class_classification(project_id, doc_id, user): 47 project = get_object_or_404(Project, pk=project_id) 48 if not project.single_class_classification: 49 return 50 51 model = project.get_annotation_class() 52 annotations = model.objects.filter(document_id=doc_id) 53 if not project.collaborative_annotation: 54 annotations = annotations.filter(user=user) 55 56 if annotations.exists(): 57 raise ValidationError('requested to create duplicate annotation for single-class-classification project') 58 59 60 class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView): 61 lookup_url_kwarg = 'annotation_id' 62 swagger_schema = None 63 64 def get_permissions(self): 65 project = get_object_or_404(Project, pk=self.kwargs['project_id']) 66 if project.collaborative_annotation: 67 self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin] 68 else: 69 self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin & IsOwnAnnotation] 70 return super().get_permissions() 71 72 def get_serializer_class(self): 73 project = get_object_or_404(Project, pk=self.kwargs['project_id']) 74 self.serializer_class = project.get_annotation_serializer() 75 return self.serializer_class 76 77 def get_queryset(self): 78 project = get_object_or_404(Project, pk=self.kwargs['project_id']) 79 model = project.get_annotation_class() 80 self.queryset = model.objects.all() 81 return self.queryset 82 83 84 class ApproveLabelsAPI(APIView): 85 permission_classes = [IsAuthenticated & (IsAnnotationApprover | IsProjectAdmin)] 86 87 def post(self, request, *args, **kwargs): 88 approved = self.request.data.get('approved', True) 89 document = get_object_or_404(Document, pk=self.kwargs['doc_id']) 90 document.annotations_approved_by = self.request.user if approved else None 91 document.save() 92 return Response(ApproverSerializer(document).data) 93 [end of app/api/views/annotation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/views/annotation.py b/app/api/views/annotation.py --- a/app/api/views/annotation.py +++ b/app/api/views/annotation.py @@ -1,6 +1,5 @@ from django.shortcuts import get_object_or_404 from rest_framework import generics, status -from rest_framework.exceptions import ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView @@ -16,21 +15,24 @@ permission_classes = [IsAuthenticated & IsInProjectOrAdmin] swagger_schema = None + @property + def project(self): + return get_object_or_404(Project, pk=self.kwargs['project_id']) + def get_serializer_class(self): - project = get_object_or_404(Project, pk=self.kwargs['project_id']) - self.serializer_class = project.get_annotation_serializer() + self.serializer_class = self.project.get_annotation_serializer() return self.serializer_class def get_queryset(self): - project = get_object_or_404(Project, pk=self.kwargs['project_id']) - model = project.get_annotation_class() + model = self.project.get_annotation_class() queryset = model.objects.filter(document=self.kwargs['doc_id']) - if not project.collaborative_annotation: + if not self.project.collaborative_annotation: queryset = queryset.filter(user=self.request.user) return queryset def create(self, request, *args, **kwargs): - self.check_single_class_classification(self.kwargs['project_id'], self.kwargs['doc_id'], request.user) + if self.project.single_class_classification: + self.get_queryset().delete() request.data['document'] = self.kwargs['doc_id'] return super().create(request, args, kwargs) @@ -42,20 +44,6 @@ queryset.all().delete() return Response(status=status.HTTP_204_NO_CONTENT) - @staticmethod - def check_single_class_classification(project_id, doc_id, user): - project = get_object_or_404(Project, pk=project_id) - if not project.single_class_classification: - return - - model = project.get_annotation_class() - annotations = model.objects.filter(document_id=doc_id) - if not project.collaborative_annotation: - annotations = annotations.filter(user=user) - - if annotations.exists(): - raise ValidationError('requested to create duplicate annotation for single-class-classification project') - class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView): lookup_url_kwarg = 'annotation_id'
{"golden_diff": "diff --git a/app/api/views/annotation.py b/app/api/views/annotation.py\n--- a/app/api/views/annotation.py\n+++ b/app/api/views/annotation.py\n@@ -1,6 +1,5 @@\n from django.shortcuts import get_object_or_404\n from rest_framework import generics, status\n-from rest_framework.exceptions import ValidationError\n from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n from rest_framework.views import APIView\n@@ -16,21 +15,24 @@\n permission_classes = [IsAuthenticated & IsInProjectOrAdmin]\n swagger_schema = None\n \n+ @property\n+ def project(self):\n+ return get_object_or_404(Project, pk=self.kwargs['project_id'])\n+\n def get_serializer_class(self):\n- project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n- self.serializer_class = project.get_annotation_serializer()\n+ self.serializer_class = self.project.get_annotation_serializer()\n return self.serializer_class\n \n def get_queryset(self):\n- project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n- model = project.get_annotation_class()\n+ model = self.project.get_annotation_class()\n queryset = model.objects.filter(document=self.kwargs['doc_id'])\n- if not project.collaborative_annotation:\n+ if not self.project.collaborative_annotation:\n queryset = queryset.filter(user=self.request.user)\n return queryset\n \n def create(self, request, *args, **kwargs):\n- self.check_single_class_classification(self.kwargs['project_id'], self.kwargs['doc_id'], request.user)\n+ if self.project.single_class_classification:\n+ self.get_queryset().delete()\n request.data['document'] = self.kwargs['doc_id']\n return super().create(request, args, kwargs)\n \n@@ -42,20 +44,6 @@\n queryset.all().delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n \n- @staticmethod\n- def check_single_class_classification(project_id, doc_id, user):\n- project = get_object_or_404(Project, pk=project_id)\n- if not project.single_class_classification:\n- return\n-\n- model = project.get_annotation_class()\n- annotations = model.objects.filter(document_id=doc_id)\n- if not project.collaborative_annotation:\n- annotations = annotations.filter(user=user)\n-\n- if annotations.exists():\n- raise ValidationError('requested to create duplicate annotation for single-class-classification project')\n-\n \n class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView):\n lookup_url_kwarg = 'annotation_id'\n", "issue": "No way to restrict text classification labels to exactly one label to assign\nMost classification tasks require exactly one label for each instance. This is also true for most text classification tasks, for example with sentiment classificaiton, and the possible labels negative, neutral, positive, each instance should receive one of the three labels, assigning e.g. both neutral and positive would make not sense.\r\n\r\nYet the text classification task in doccano still does not allow to restrict assignment to a single label, annotators are free to assign as many labels as they want, including all of them!\r\n\r\nThis limits the use of doccano for text classification tasks rather severely. The option to allow for any number of labels (0 to all of them) would still be good to have for multilabel classification tasks (e.g. assigning topics), but that is a much rarer annotation task in general. \n", "before_files": [{"content": "from django.shortcuts import get_object_or_404\nfrom rest_framework import generics, status\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom ..models import Document, Project\nfrom ..permissions import (IsAnnotationApprover, IsInProjectOrAdmin,\n IsOwnAnnotation, IsProjectAdmin)\nfrom ..serializers import ApproverSerializer\n\n\nclass AnnotationList(generics.ListCreateAPIView):\n pagination_class = None\n permission_classes = [IsAuthenticated & IsInProjectOrAdmin]\n swagger_schema = None\n\n def get_serializer_class(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n self.serializer_class = project.get_annotation_serializer()\n return self.serializer_class\n\n def get_queryset(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n model = project.get_annotation_class()\n queryset = model.objects.filter(document=self.kwargs['doc_id'])\n if not project.collaborative_annotation:\n queryset = queryset.filter(user=self.request.user)\n return queryset\n\n def create(self, request, *args, **kwargs):\n self.check_single_class_classification(self.kwargs['project_id'], self.kwargs['doc_id'], request.user)\n request.data['document'] = self.kwargs['doc_id']\n return super().create(request, args, kwargs)\n\n def perform_create(self, serializer):\n serializer.save(document_id=self.kwargs['doc_id'], user=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n queryset.all().delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @staticmethod\n def check_single_class_classification(project_id, doc_id, user):\n project = get_object_or_404(Project, pk=project_id)\n if not project.single_class_classification:\n return\n\n model = project.get_annotation_class()\n annotations = model.objects.filter(document_id=doc_id)\n if not project.collaborative_annotation:\n annotations = annotations.filter(user=user)\n\n if annotations.exists():\n raise ValidationError('requested to create duplicate annotation for single-class-classification project')\n\n\nclass AnnotationDetail(generics.RetrieveUpdateDestroyAPIView):\n lookup_url_kwarg = 'annotation_id'\n swagger_schema = None\n\n def get_permissions(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n if project.collaborative_annotation:\n self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin]\n else:\n self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin & IsOwnAnnotation]\n return super().get_permissions()\n\n def get_serializer_class(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n self.serializer_class = project.get_annotation_serializer()\n return self.serializer_class\n\n def get_queryset(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n model = project.get_annotation_class()\n self.queryset = model.objects.all()\n return self.queryset\n\n\nclass ApproveLabelsAPI(APIView):\n permission_classes = [IsAuthenticated & (IsAnnotationApprover | IsProjectAdmin)]\n\n def post(self, request, *args, **kwargs):\n approved = self.request.data.get('approved', True)\n document = get_object_or_404(Document, pk=self.kwargs['doc_id'])\n document.annotations_approved_by = self.request.user if approved else None\n document.save()\n return Response(ApproverSerializer(document).data)\n", "path": "app/api/views/annotation.py"}]}
1,670
562
gh_patches_debug_380
rasdani/github-patches
git_diff
dotkom__onlineweb4-1931
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SSO base template should extend the base template ## What kind of an issue is this? - [x] Bug report ## What is the expected behaviour? The template for the SSO app should extend the base template so we don't have to maintain multiple base templates. ## What is the current behaviour? It's a custom template, which looks copy/pasted from the base template. </issue> <code> [start of apps/sso/views.py] 1 # -*- encoding: utf-8 -*- 2 3 import logging 4 5 from django.contrib.auth.decorators import login_required 6 from django.shortcuts import render 7 from oauth2_provider.views.base import AuthorizationView as DefaultAuthorizationView # flake8: noqa 8 from oauth2_provider.views.base import RevokeTokenView, TokenView 9 10 _log = logging.getLogger('SSO') 11 12 13 @login_required 14 def index(request): 15 """ 16 This is the main SSO view 17 """ 18 19 context = {} 20 21 return render(request, 'sso/index.html', context) 22 23 24 class AuthorizationView(DefaultAuthorizationView): 25 template_name = 'sso/authorize.html' 26 [end of apps/sso/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/sso/views.py b/apps/sso/views.py --- a/apps/sso/views.py +++ b/apps/sso/views.py @@ -18,7 +18,7 @@ context = {} - return render(request, 'sso/index.html', context) + return render(request, 'sso/authorize.html', context) class AuthorizationView(DefaultAuthorizationView):
{"golden_diff": "diff --git a/apps/sso/views.py b/apps/sso/views.py\n--- a/apps/sso/views.py\n+++ b/apps/sso/views.py\n@@ -18,7 +18,7 @@\n \n context = {}\n \n- return render(request, 'sso/index.html', context)\n+ return render(request, 'sso/authorize.html', context)\n \n \n class AuthorizationView(DefaultAuthorizationView):\n", "issue": "SSO base template should extend the base template\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n\r\n\r\n## What is the expected behaviour?\r\n\r\nThe template for the SSO app should extend the base template so we don't have to maintain multiple base templates.\r\n\r\n\r\n## What is the current behaviour?\r\n\r\nIt's a custom template, which looks copy/pasted from the base template.\r\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom oauth2_provider.views.base import AuthorizationView as DefaultAuthorizationView # flake8: noqa\nfrom oauth2_provider.views.base import RevokeTokenView, TokenView\n\n_log = logging.getLogger('SSO')\n\n\n@login_required\ndef index(request):\n \"\"\"\n This is the main SSO view\n \"\"\"\n\n context = {}\n\n return render(request, 'sso/index.html', context)\n\n\nclass AuthorizationView(DefaultAuthorizationView):\n template_name = 'sso/authorize.html'\n", "path": "apps/sso/views.py"}]}
800
88
gh_patches_debug_9208
rasdani/github-patches
git_diff
plotly__dash-1643
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing classifiers for Python 3.8/3.9 Python 3.8 and 3.9 are missing from the classifiers in `setup.py`: https://github.com/plotly/dash/blob/358c5089c929b2e99996f9d4ee6ec634f65437fe/setup.py#L55-L63 But there is no restriction to 3.7 or below in `python_requires`: https://github.com/plotly/dash/blob/358c5089c929b2e99996f9d4ee6ec634f65437fe/setup.py#L29 Anecdotally, I've been using Dash on Python 3.9 with no issues. Reporting as an issue as I can't see any mention of Python 3.8 and 3.9 compatibility so far! </issue> <code> [start of setup.py] 1 import io 2 from setuptools import setup, find_packages 3 4 main_ns = {} 5 exec(open("dash/version.py").read(), main_ns) # pylint: disable=exec-used 6 7 8 def read_req_file(req_type): 9 with open("requires-{}.txt".format(req_type)) as fp: 10 requires = (line.strip() for line in fp) 11 return [req for req in requires if req and not req.startswith("#")] 12 13 14 setup( 15 name="dash", 16 version=main_ns["__version__"], 17 author="Chris Parmer", 18 author_email="[email protected]", 19 packages=find_packages(exclude=["tests*"]), 20 include_package_data=True, 21 license="MIT", 22 description=( 23 "A Python framework for building reactive web-apps. " 24 "Developed by Plotly." 25 ), 26 long_description=io.open("README.md", encoding="utf-8").read(), 27 long_description_content_type="text/markdown", 28 install_requires=read_req_file("install"), 29 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*", 30 extras_require={ 31 "dev": read_req_file("dev"), 32 "testing": read_req_file("testing"), 33 }, 34 entry_points={ 35 "console_scripts": [ 36 "dash-generate-components = " 37 "dash.development.component_generator:cli", 38 "renderer = dash.development.build_process:renderer", 39 ], 40 "pytest11": ["dash = dash.testing.plugin"], 41 }, 42 url="https://plotly.com/dash", 43 classifiers=[ 44 "Development Status :: 5 - Production/Stable", 45 "Environment :: Web Environment", 46 "Framework :: Dash", 47 "Framework :: Flask", 48 "Intended Audience :: Developers", 49 "Intended Audience :: Education", 50 "Intended Audience :: Financial and Insurance Industry", 51 "Intended Audience :: Healthcare Industry", 52 "Intended Audience :: Manufacturing", 53 "Intended Audience :: Science/Research", 54 "License :: OSI Approved :: MIT License", 55 "Programming Language :: Python", 56 "Programming Language :: Python :: 2", 57 "Programming Language :: Python :: 2.7", 58 "Programming Language :: Python :: 3", 59 "Programming Language :: Python :: 3.3", 60 "Programming Language :: Python :: 3.4", 61 "Programming Language :: Python :: 3.5", 62 "Programming Language :: Python :: 3.6", 63 "Programming Language :: Python :: 3.7", 64 "Topic :: Database :: Front-Ends", 65 "Topic :: Office/Business :: Financial :: Spreadsheet", 66 "Topic :: Scientific/Engineering :: Visualization", 67 "Topic :: Software Development :: Libraries :: Application Frameworks", 68 "Topic :: Software Development :: Widget Sets", 69 ], 70 ) 71 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,6 +61,8 @@ "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", "Topic :: Database :: Front-Ends", "Topic :: Office/Business :: Financial :: Spreadsheet", "Topic :: Scientific/Engineering :: Visualization",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,8 @@\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n", "issue": "Missing classifiers for Python 3.8/3.9\nPython 3.8 and 3.9 are missing from the classifiers in `setup.py`:\r\n\r\nhttps://github.com/plotly/dash/blob/358c5089c929b2e99996f9d4ee6ec634f65437fe/setup.py#L55-L63\r\n\r\nBut there is no restriction to 3.7 or below in `python_requires`:\r\nhttps://github.com/plotly/dash/blob/358c5089c929b2e99996f9d4ee6ec634f65437fe/setup.py#L29\r\n\r\nAnecdotally, I've been using Dash on Python 3.9 with no issues. Reporting as an issue as I can't see any mention of Python 3.8 and 3.9 compatibility so far!\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\").read(), main_ns) # pylint: disable=exec-used\n\n\ndef read_req_file(req_type):\n with open(\"requires-{}.txt\".format(req_type)) as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"Chris Parmer\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*\",\n extras_require={\n \"dev\": read_req_file(\"dev\"),\n \"testing\": read_req_file(\"testing\"),\n },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components = \"\n \"dash.development.component_generator:cli\",\n \"renderer = dash.development.build_process:renderer\",\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plotly.com/dash\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Dash\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n)\n", "path": "setup.py"}]}
1,460
128
gh_patches_debug_13492
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2642
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider sheetz is broken During the global build at 2021-08-11-14-42-19, spider **sheetz** failed with **526 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/sheetz.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/sheetz.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/sheetz.geojson)) </issue> <code> [start of locations/spiders/sheetz.py] 1 import json 2 import re 3 import scrapy 4 from locations.items import GeojsonPointItem 5 6 7 class SheetzSpider(scrapy.Spider): 8 name = "sheetz" 9 item_attributes = {'brand': "Sheetz"} 10 allowed_domains = ["orderz.sheetz.com"] 11 start_urls = ( 12 "https://orderz.sheetz.com/sas/store", 13 ) 14 15 def parse(self, response): 16 stores = json.loads(response.body_as_unicode()) 17 18 for store in stores: 19 properties = { 20 'addr_full': store['address'], 21 'city': store['city'], 22 'state': store['state'], 23 'postcode': store['zip'], 24 'ref': store['storeNumber'], 25 'phone': store.get('phone'), 26 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'], 27 'lat': float(store['latitude']), 28 'lon': float(store['longitude']), 29 'opening_hours': '24/7' if store['open24x7'] else None, 30 'extras': { 31 'amenity:chargingstation': store['evCharger'], 32 'amenity:fuel': True, 33 'atm': store['atm'], 34 'car_wash': store['carWash'], 35 'fax': store['fax'] if 'fax' in store else None, 36 'fuel:diesel': store['diesel'], 37 'fuel:e15': store['e15'], 38 'fuel:e85': store['e85'], 39 'fuel:kerosene': store['kerosene'], 40 'fuel:propane': store['propane'], 41 } 42 } 43 44 yield GeojsonPointItem(**properties) 45 [end of locations/spiders/sheetz.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/sheetz.py b/locations/spiders/sheetz.py --- a/locations/spiders/sheetz.py +++ b/locations/spiders/sheetz.py @@ -24,8 +24,8 @@ 'ref': store['storeNumber'], 'phone': store.get('phone'), 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'], - 'lat': float(store['latitude']), - 'lon': float(store['longitude']), + 'lat': store['latitude'], + 'lon': store['longitude'], 'opening_hours': '24/7' if store['open24x7'] else None, 'extras': { 'amenity:chargingstation': store['evCharger'],
{"golden_diff": "diff --git a/locations/spiders/sheetz.py b/locations/spiders/sheetz.py\n--- a/locations/spiders/sheetz.py\n+++ b/locations/spiders/sheetz.py\n@@ -24,8 +24,8 @@\n 'ref': store['storeNumber'],\n 'phone': store.get('phone'),\n 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'],\n- 'lat': float(store['latitude']),\n- 'lon': float(store['longitude']),\n+ 'lat': store['latitude'],\n+ 'lon': store['longitude'],\n 'opening_hours': '24/7' if store['open24x7'] else None,\n 'extras': {\n 'amenity:chargingstation': store['evCharger'],\n", "issue": "Spider sheetz is broken\nDuring the global build at 2021-08-11-14-42-19, spider **sheetz** failed with **526 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/sheetz.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/sheetz.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/sheetz.geojson))\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass SheetzSpider(scrapy.Spider):\n name = \"sheetz\"\n item_attributes = {'brand': \"Sheetz\"}\n allowed_domains = [\"orderz.sheetz.com\"]\n start_urls = (\n \"https://orderz.sheetz.com/sas/store\",\n )\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())\n\n for store in stores:\n properties = {\n 'addr_full': store['address'],\n 'city': store['city'],\n 'state': store['state'],\n 'postcode': store['zip'],\n 'ref': store['storeNumber'],\n 'phone': store.get('phone'),\n 'website': 'https://orderz.sheetz.com/#/main/location/store/'+store['storeNumber'],\n 'lat': float(store['latitude']),\n 'lon': float(store['longitude']),\n 'opening_hours': '24/7' if store['open24x7'] else None,\n 'extras': {\n 'amenity:chargingstation': store['evCharger'],\n 'amenity:fuel': True,\n 'atm': store['atm'],\n 'car_wash': store['carWash'],\n 'fax': store['fax'] if 'fax' in store else None,\n 'fuel:diesel': store['diesel'],\n 'fuel:e15': store['e15'],\n 'fuel:e85': store['e85'],\n 'fuel:kerosene': store['kerosene'],\n 'fuel:propane': store['propane'],\n }\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sheetz.py"}]}
1,171
178
gh_patches_debug_5235
rasdani/github-patches
git_diff
rasterio__rasterio-618
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> YCbCr JPEG-in-TIFF breaks rio-info One creates a YCbCr JPEG-in-TIFF with GDAL using `photometric=YCbCr` and `compress=JPEG` options. But reading the TIFFs tags to get the compression method returns "YCbCr JPEG", a value that's not in `rasterio.enums.Compression`. Reference: http://www.gdal.org/frmt_gtiff.html Solution: normalize "YCbCr JPEG" to "JPEG" and add source color space to rio-info's output. </issue> <code> [start of rasterio/enums.py] 1 2 from enum import Enum, IntEnum 3 4 5 class ColorInterp(IntEnum): 6 undefined=0 7 grey=1 8 gray=1 9 palette=2 10 red=3 11 green=4 12 blue=5 13 alpha=6 14 hue=7 15 saturation=8 16 lightness=9 17 cyan=10 18 magenta=11 19 yellow=12 20 black=13 21 22 23 class Resampling(Enum): 24 nearest='NEAREST' 25 gauss='GAUSS' 26 cubic='CUBIC' 27 average='AVERAGE' 28 mode='MODE' 29 average_magphase='AVERAGE_MAGPHASE' 30 none='NONE' 31 32 33 class Compression(Enum): 34 jpeg='JPEG' 35 lzw='LZW' 36 packbits='PACKBITS' 37 deflate='DEFLATE' 38 ccittrle='CCITTRLE' 39 ccittfax3='CCITTFAX3' 40 ccittfax4='CCITTFAX4' 41 lzma='LZMA' 42 none='NONE' 43 44 45 class Interleaving(Enum): 46 pixel='PIXEL' 47 line='LINE' 48 band='BAND' 49 50 51 class MaskFlags(IntEnum): 52 all_valid=1 53 per_dataset=2 54 alpha=4 55 nodata=8 56 [end of rasterio/enums.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/enums.py b/rasterio/enums.py --- a/rasterio/enums.py +++ b/rasterio/enums.py @@ -18,6 +18,9 @@ magenta=11 yellow=12 black=13 + Y=14 + Cb=15 + Cr=16 class Resampling(Enum): @@ -53,3 +56,14 @@ per_dataset=2 alpha=4 nodata=8 + + +class PhotometricInterp(Enum): + black='MINISBLACK' + white='MINISWHITE' + rgb='RGB' + cmyk='CMYK' + ycbcr='YCbCr' + cielab='CIELAB' + icclab='ICCLAB' + itulab='ITULAB'
{"golden_diff": "diff --git a/rasterio/enums.py b/rasterio/enums.py\n--- a/rasterio/enums.py\n+++ b/rasterio/enums.py\n@@ -18,6 +18,9 @@\n magenta=11\n yellow=12\n black=13\n+ Y=14\n+ Cb=15\n+ Cr=16\n \n \n class Resampling(Enum):\n@@ -53,3 +56,14 @@\n per_dataset=2\n alpha=4\n nodata=8\n+\n+\n+class PhotometricInterp(Enum):\n+ black='MINISBLACK'\n+ white='MINISWHITE'\n+ rgb='RGB'\n+ cmyk='CMYK'\n+ ycbcr='YCbCr'\n+ cielab='CIELAB'\n+ icclab='ICCLAB'\n+ itulab='ITULAB'\n", "issue": "YCbCr JPEG-in-TIFF breaks rio-info\nOne creates a YCbCr JPEG-in-TIFF with GDAL using `photometric=YCbCr` and `compress=JPEG` options. But reading the TIFFs tags to get the compression method returns \"YCbCr JPEG\", a value that's not in `rasterio.enums.Compression`.\n\nReference: http://www.gdal.org/frmt_gtiff.html\n\nSolution: normalize \"YCbCr JPEG\" to \"JPEG\" and add source color space to rio-info's output.\n\n", "before_files": [{"content": "\nfrom enum import Enum, IntEnum\n\n\nclass ColorInterp(IntEnum):\n undefined=0\n grey=1\n gray=1\n palette=2\n red=3\n green=4\n blue=5\n alpha=6\n hue=7\n saturation=8\n lightness=9\n cyan=10\n magenta=11\n yellow=12\n black=13\n\n\nclass Resampling(Enum):\n nearest='NEAREST'\n gauss='GAUSS'\n cubic='CUBIC'\n average='AVERAGE'\n mode='MODE'\n average_magphase='AVERAGE_MAGPHASE'\n none='NONE'\n\n\nclass Compression(Enum):\n jpeg='JPEG'\n lzw='LZW'\n packbits='PACKBITS'\n deflate='DEFLATE'\n ccittrle='CCITTRLE'\n ccittfax3='CCITTFAX3'\n ccittfax4='CCITTFAX4'\n lzma='LZMA'\n none='NONE'\n\n\nclass Interleaving(Enum):\n pixel='PIXEL'\n line='LINE'\n band='BAND'\n\n\nclass MaskFlags(IntEnum):\n all_valid=1\n per_dataset=2\n alpha=4\n nodata=8\n", "path": "rasterio/enums.py"}]}
1,038
208
gh_patches_debug_19931
rasdani/github-patches
git_diff
CiviWiki__OpenCiviWiki-1089
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Migration of frontend_views ### Idea summary Elaboration of issue #1070 ### Further details As mentioned in issue #1070, we need to migrate from ```py url(r"^") ``` to ```py path() ``` And for frontend_views the following points need to be kept in mind - [ ] usage of django.urls module for path and include - [ ] Including the following views using the include method - [ ] About_view - [ ] support_us_view - [ ] how_it_works_view - [ ] user_profile - [ ] issue_thread - [ ] base_view - [ ] civi2csv - [ ] use path for all of the above mentioned urls - [ ] Use the same name for reverse match. </issue> <code> [start of project/frontend_views/urls.py] 1 from django.conf.urls import url 2 from . import views as v 3 4 urlpatterns = [ 5 url(r"^about$", v.about_view, name="about"), 6 url(r"^support_us$", v.support_us_view, name="support us"), 7 url(r"^howitworks$", v.how_it_works_view, name="how it works"), 8 url(r"^profile/(?P<username>[a-zA-Z0-9-_]*)$", v.user_profile, name="profile"), 9 url(r"^profile/rep/(?P<username>\d+)$", v.user_profile, name="profile"), 10 url(r"^thread/(?P<thread_id>\w+)$", v.issue_thread, name="issue thread"), 11 url(r"^profile$", v.user_profile, name="default_profile"), 12 url(r"^$", v.base_view, name="base"), 13 url(r"^thread/(?P<thread_id>\w+)/csv$", v.civi2csv, name="civi2csv"), 14 ] 15 [end of project/frontend_views/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/project/frontend_views/urls.py b/project/frontend_views/urls.py --- a/project/frontend_views/urls.py +++ b/project/frontend_views/urls.py @@ -1,14 +1,13 @@ -from django.conf.urls import url -from . import views as v +from django.urls import path +from frontend_views import views urlpatterns = [ - url(r"^about$", v.about_view, name="about"), - url(r"^support_us$", v.support_us_view, name="support us"), - url(r"^howitworks$", v.how_it_works_view, name="how it works"), - url(r"^profile/(?P<username>[a-zA-Z0-9-_]*)$", v.user_profile, name="profile"), - url(r"^profile/rep/(?P<username>\d+)$", v.user_profile, name="profile"), - url(r"^thread/(?P<thread_id>\w+)$", v.issue_thread, name="issue thread"), - url(r"^profile$", v.user_profile, name="default_profile"), - url(r"^$", v.base_view, name="base"), - url(r"^thread/(?P<thread_id>\w+)/csv$", v.civi2csv, name="civi2csv"), + path("about/", views.about_view, name="about"), + path("support_us/", views.support_us_view, name="support us"), + path("howitworks/", views.how_it_works_view, name="how it works"), + path("profile/<str:username>/", views.user_profile, name="profile"), + path("thread/<int:thread_id>/", views.issue_thread, name="issue thread"), + path("profile/", views.user_profile, name="default_profile"), + path("", views.base_view, name="base"), + path("thread/<int:thread_id>/csv/", views.civi2csv, name="civi2csv"), ]
{"golden_diff": "diff --git a/project/frontend_views/urls.py b/project/frontend_views/urls.py\n--- a/project/frontend_views/urls.py\n+++ b/project/frontend_views/urls.py\n@@ -1,14 +1,13 @@\n-from django.conf.urls import url\n-from . import views as v\n+from django.urls import path\n+from frontend_views import views\n \n urlpatterns = [\n- url(r\"^about$\", v.about_view, name=\"about\"),\n- url(r\"^support_us$\", v.support_us_view, name=\"support us\"),\n- url(r\"^howitworks$\", v.how_it_works_view, name=\"how it works\"),\n- url(r\"^profile/(?P<username>[a-zA-Z0-9-_]*)$\", v.user_profile, name=\"profile\"),\n- url(r\"^profile/rep/(?P<username>\\d+)$\", v.user_profile, name=\"profile\"),\n- url(r\"^thread/(?P<thread_id>\\w+)$\", v.issue_thread, name=\"issue thread\"),\n- url(r\"^profile$\", v.user_profile, name=\"default_profile\"),\n- url(r\"^$\", v.base_view, name=\"base\"),\n- url(r\"^thread/(?P<thread_id>\\w+)/csv$\", v.civi2csv, name=\"civi2csv\"),\n+ path(\"about/\", views.about_view, name=\"about\"),\n+ path(\"support_us/\", views.support_us_view, name=\"support us\"),\n+ path(\"howitworks/\", views.how_it_works_view, name=\"how it works\"),\n+ path(\"profile/<str:username>/\", views.user_profile, name=\"profile\"),\n+ path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue thread\"),\n+ path(\"profile/\", views.user_profile, name=\"default_profile\"),\n+ path(\"\", views.base_view, name=\"base\"),\n+ path(\"thread/<int:thread_id>/csv/\", views.civi2csv, name=\"civi2csv\"),\n ]\n", "issue": "Migration of frontend_views \n### Idea summary\n\nElaboration of issue #1070\n\n### Further details\n\nAs mentioned in issue #1070, we need to migrate from\r\n```py\r\nurl(r\"^\")\r\n```\r\nto \r\n```py\r\npath()\r\n```\r\nAnd for frontend_views the following points need to be kept in mind\r\n- [ ] usage of django.urls module for path and include\r\n- [ ] Including the following views using the include method\r\n - [ ] About_view\r\n - [ ] support_us_view\r\n - [ ] how_it_works_view\r\n - [ ] user_profile\r\n - [ ] issue_thread\r\n - [ ] base_view\r\n - [ ] civi2csv\r\n - [ ] use path for all of the above mentioned urls\r\n - [ ] Use the same name for reverse match. \n", "before_files": [{"content": "from django.conf.urls import url\nfrom . import views as v\n\nurlpatterns = [\n url(r\"^about$\", v.about_view, name=\"about\"),\n url(r\"^support_us$\", v.support_us_view, name=\"support us\"),\n url(r\"^howitworks$\", v.how_it_works_view, name=\"how it works\"),\n url(r\"^profile/(?P<username>[a-zA-Z0-9-_]*)$\", v.user_profile, name=\"profile\"),\n url(r\"^profile/rep/(?P<username>\\d+)$\", v.user_profile, name=\"profile\"),\n url(r\"^thread/(?P<thread_id>\\w+)$\", v.issue_thread, name=\"issue thread\"),\n url(r\"^profile$\", v.user_profile, name=\"default_profile\"),\n url(r\"^$\", v.base_view, name=\"base\"),\n url(r\"^thread/(?P<thread_id>\\w+)/csv$\", v.civi2csv, name=\"civi2csv\"),\n]\n", "path": "project/frontend_views/urls.py"}]}
941
420
gh_patches_debug_36414
rasdani/github-patches
git_diff
facebookresearch__hydra-1695
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> strict flag was removed from the compose API without a prior deprecation Strict config composition functionality has become the default in Hydra 1.0 (See https://hydra.cc/docs/upgrades/0.11_to_1.0/strict_mode_flag_deprecated). This flag was completely removed in Hydra 1.1.0. Unfortunately, the Compose API strict flag was not deprecated and was thus an avoidable breaking change. A followup PR will re-introduce the strict flag to the Compose API as a deprecated flag. That flag will be removed in the major version of Hydra. </issue> <code> [start of hydra/experimental/compose.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 # DEPRECATED: remove in 1.2 3 import warnings 4 from typing import List, Optional 5 6 from omegaconf import DictConfig 7 8 9 def compose( 10 config_name: Optional[str] = None, 11 overrides: List[str] = [], 12 return_hydra_config: bool = False, 13 ) -> DictConfig: 14 from hydra import compose as real_compose 15 16 warnings.warn( 17 category=UserWarning, 18 message="hydra.experimental.compose() is no longer experimental." 19 " Use hydra.compose()", 20 ) 21 return real_compose( 22 config_name=config_name, 23 overrides=overrides, 24 return_hydra_config=return_hydra_config, 25 ) 26 [end of hydra/experimental/compose.py] [start of hydra/compose.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from typing import List, Optional 3 4 from omegaconf import DictConfig, open_dict 5 6 from hydra.core.global_hydra import GlobalHydra 7 from hydra.types import RunMode 8 9 10 def compose( 11 config_name: Optional[str] = None, 12 overrides: List[str] = [], 13 return_hydra_config: bool = False, 14 ) -> DictConfig: 15 """ 16 :param config_name: the name of the config 17 (usually the file name without the .yaml extension) 18 :param overrides: list of overrides for config file 19 :param return_hydra_config: True to return the hydra config node in the result 20 :return: the composed config 21 """ 22 assert ( 23 GlobalHydra().is_initialized() 24 ), "GlobalHydra is not initialized, use @hydra.main() or call one of the hydra initialization methods first" 25 26 gh = GlobalHydra.instance() 27 assert gh.hydra is not None 28 cfg = gh.hydra.compose_config( 29 config_name=config_name, 30 overrides=overrides, 31 run_mode=RunMode.RUN, 32 from_shell=False, 33 with_log_configuration=False, 34 ) 35 assert isinstance(cfg, DictConfig) 36 37 if not return_hydra_config: 38 if "hydra" in cfg: 39 with open_dict(cfg): 40 del cfg["hydra"] 41 return cfg 42 [end of hydra/compose.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hydra/compose.py b/hydra/compose.py --- a/hydra/compose.py +++ b/hydra/compose.py @@ -1,7 +1,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import warnings +from textwrap import dedent from typing import List, Optional -from omegaconf import DictConfig, open_dict +from omegaconf import DictConfig, OmegaConf, open_dict from hydra.core.global_hydra import GlobalHydra from hydra.types import RunMode @@ -11,12 +13,14 @@ config_name: Optional[str] = None, overrides: List[str] = [], return_hydra_config: bool = False, + strict: Optional[bool] = None, ) -> DictConfig: """ :param config_name: the name of the config (usually the file name without the .yaml extension) :param overrides: list of overrides for config file :param return_hydra_config: True to return the hydra config node in the result + :param strict: DEPRECATED. If true, returned config has struct mode disabled. :return: the composed config """ assert ( @@ -38,4 +42,18 @@ if "hydra" in cfg: with open_dict(cfg): del cfg["hydra"] + + if strict is not None: + # DEPRECATED: remove in 1.2 + warnings.warn( + dedent( + """\ + + The strict flag in the compose API is deprecated and will be removed in the next version of Hydra. + See https://hydra.cc/docs/upgrades/0.11_to_1.0/strict_mode_flag_deprecated for more info. + """ + ) + ) + OmegaConf.set_struct(cfg, strict) + return cfg diff --git a/hydra/experimental/compose.py b/hydra/experimental/compose.py --- a/hydra/experimental/compose.py +++ b/hydra/experimental/compose.py @@ -10,6 +10,7 @@ config_name: Optional[str] = None, overrides: List[str] = [], return_hydra_config: bool = False, + strict: Optional[bool] = None, ) -> DictConfig: from hydra import compose as real_compose @@ -22,4 +23,5 @@ config_name=config_name, overrides=overrides, return_hydra_config=return_hydra_config, + strict=strict, )
{"golden_diff": "diff --git a/hydra/compose.py b/hydra/compose.py\n--- a/hydra/compose.py\n+++ b/hydra/compose.py\n@@ -1,7 +1,9 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n+import warnings\n+from textwrap import dedent\n from typing import List, Optional\n \n-from omegaconf import DictConfig, open_dict\n+from omegaconf import DictConfig, OmegaConf, open_dict\n \n from hydra.core.global_hydra import GlobalHydra\n from hydra.types import RunMode\n@@ -11,12 +13,14 @@\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n return_hydra_config: bool = False,\n+ strict: Optional[bool] = None,\n ) -> DictConfig:\n \"\"\"\n :param config_name: the name of the config\n (usually the file name without the .yaml extension)\n :param overrides: list of overrides for config file\n :param return_hydra_config: True to return the hydra config node in the result\n+ :param strict: DEPRECATED. If true, returned config has struct mode disabled.\n :return: the composed config\n \"\"\"\n assert (\n@@ -38,4 +42,18 @@\n if \"hydra\" in cfg:\n with open_dict(cfg):\n del cfg[\"hydra\"]\n+\n+ if strict is not None:\n+ # DEPRECATED: remove in 1.2\n+ warnings.warn(\n+ dedent(\n+ \"\"\"\\\n+\n+ The strict flag in the compose API is deprecated and will be removed in the next version of Hydra.\n+ See https://hydra.cc/docs/upgrades/0.11_to_1.0/strict_mode_flag_deprecated for more info.\n+ \"\"\"\n+ )\n+ )\n+ OmegaConf.set_struct(cfg, strict)\n+\n return cfg\ndiff --git a/hydra/experimental/compose.py b/hydra/experimental/compose.py\n--- a/hydra/experimental/compose.py\n+++ b/hydra/experimental/compose.py\n@@ -10,6 +10,7 @@\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n return_hydra_config: bool = False,\n+ strict: Optional[bool] = None,\n ) -> DictConfig:\n from hydra import compose as real_compose\n \n@@ -22,4 +23,5 @@\n config_name=config_name,\n overrides=overrides,\n return_hydra_config=return_hydra_config,\n+ strict=strict,\n )\n", "issue": "strict flag was removed from the compose API without a prior deprecation\nStrict config composition functionality has become the default in Hydra 1.0 (See https://hydra.cc/docs/upgrades/0.11_to_1.0/strict_mode_flag_deprecated).\r\n\r\nThis flag was completely removed in Hydra 1.1.0.\r\nUnfortunately, the Compose API strict flag was not deprecated and was thus an avoidable breaking change.\r\n\r\nA followup PR will re-introduce the strict flag to the Compose API as a deprecated flag. That flag will be removed in the major version of Hydra.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# DEPRECATED: remove in 1.2\nimport warnings\nfrom typing import List, Optional\n\nfrom omegaconf import DictConfig\n\n\ndef compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n return_hydra_config: bool = False,\n) -> DictConfig:\n from hydra import compose as real_compose\n\n warnings.warn(\n category=UserWarning,\n message=\"hydra.experimental.compose() is no longer experimental.\"\n \" Use hydra.compose()\",\n )\n return real_compose(\n config_name=config_name,\n overrides=overrides,\n return_hydra_config=return_hydra_config,\n )\n", "path": "hydra/experimental/compose.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import List, Optional\n\nfrom omegaconf import DictConfig, open_dict\n\nfrom hydra.core.global_hydra import GlobalHydra\nfrom hydra.types import RunMode\n\n\ndef compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n return_hydra_config: bool = False,\n) -> DictConfig:\n \"\"\"\n :param config_name: the name of the config\n (usually the file name without the .yaml extension)\n :param overrides: list of overrides for config file\n :param return_hydra_config: True to return the hydra config node in the result\n :return: the composed config\n \"\"\"\n assert (\n GlobalHydra().is_initialized()\n ), \"GlobalHydra is not initialized, use @hydra.main() or call one of the hydra initialization methods first\"\n\n gh = GlobalHydra.instance()\n assert gh.hydra is not None\n cfg = gh.hydra.compose_config(\n config_name=config_name,\n overrides=overrides,\n run_mode=RunMode.RUN,\n from_shell=False,\n with_log_configuration=False,\n )\n assert isinstance(cfg, DictConfig)\n\n if not return_hydra_config:\n if \"hydra\" in cfg:\n with open_dict(cfg):\n del cfg[\"hydra\"]\n return cfg\n", "path": "hydra/compose.py"}]}
1,265
583
gh_patches_debug_22393
rasdani/github-patches
git_diff
pyload__pyload-1508
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [SkipRev] broken - cannot import name SkipDownload SkipRev plugin failed to import with following error: Errore durante l'importazione SkipRev: cannot import name SkipDownload I already tried following, without success: http://forum.pyload.org/viewtopic.php?f=7&t=4335 Debian GNU/Linux 7.8 (wheezy) x64 Python 2.7.3 pyLoad 0.4.9 SkipRev 0.30 </issue> <code> [start of module/plugins/hooks/SkipRev.py] 1 # -*- coding: utf-8 -*- 2 3 import re 4 import urllib 5 import urlparse 6 7 from types import MethodType 8 9 from module.PyFile import PyFile 10 from module.plugins.internal.Hook import Hook 11 from module.plugins.internal.Plugin import SkipDownload 12 13 14 class SkipRev(Hook): 15 __name__ = "SkipRev" 16 __type__ = "hook" 17 __version__ = "0.30" 18 19 __config__ = [("mode" , "Auto;Manual", "Choose recovery archives to skip" , "Auto"), 20 ("revtokeep", "int" , "Number of recovery archives to keep for package", 0 )] 21 22 __description__ = """Skip recovery archives (.rev)""" 23 __license__ = "GPLv3" 24 __authors__ = [("Walter Purcaro", "[email protected]")] 25 26 27 interval = 0 #@TODO: Remove in 0.4.10 28 29 30 def setup(self): 31 self.info = {} #@TODO: Remove in 0.4.10 32 33 34 @staticmethod 35 def _setup(self): 36 self.pyfile.plugin._setup() 37 if self.pyfile.hasStatus("skipped"): 38 raise SkipDownload(self.pyfile.statusname or self.pyfile.pluginname) 39 40 41 def _name(self, pyfile): 42 if hasattr(pyfile.pluginmodule, "getInfo"): #@NOTE: getInfo is deprecated in 0.4.10 43 return pyfile.pluginmodule.getInfo([pyfile.url]).next()[0] 44 else: 45 self.logWarning("Unable to grab file name") 46 return urlparse.urlparse(urllib.unquote(pyfile.url)).path.split('/')[-1] 47 48 49 def _pyfile(self, link): 50 return PyFile(self.core.files, 51 link.fid, 52 link.url, 53 link.name, 54 link.size, 55 link.status, 56 link.error, 57 link.plugin, 58 link.packageID, 59 link.order) 60 61 62 def downloadPreparing(self, pyfile): 63 name = self._name(pyfile) 64 65 if pyfile.statusname is _("unskipped") or not name.endswith(".rev") or not ".part" in name: 66 return 67 68 revtokeep = -1 if self.getConfig('mode') == "Auto" else self.getConfig('revtokeep') 69 70 if revtokeep: 71 status_list = (1, 4, 8, 9, 14) if revtokeep < 0 else (1, 3, 4, 8, 9, 14) 72 pyname = re.compile(r'%s\.part\d+\.rev$' % name.rsplit('.', 2)[0].replace('.', '\.')) 73 74 queued = [True for link in self.core.api.getPackageData(pyfile.package().id).links \ 75 if link.status not in status_list and pyname.match(link.name)].count(True) 76 77 if not queued or queued < revtokeep: #: keep one rev at least in auto mode 78 return 79 80 pyfile.setCustomStatus("SkipRev", "skipped") 81 82 if not hasattr(pyfile.plugin, "_setup"): 83 # Work-around: inject status checker inside the preprocessing routine of the plugin 84 pyfile.plugin._setup = pyfile.plugin.setup 85 pyfile.plugin.setup = MethodType(self._setup, pyfile.plugin) 86 87 88 def downloadFailed(self, pyfile): 89 #: Check if pyfile is still "failed", 90 # maybe might has been restarted in meantime 91 if pyfile.status != 8 or pyfile.name.rsplit('.', 1)[-1].strip() not in ("rar", "rev"): 92 return 93 94 revtokeep = -1 if self.getConfig('mode') == "Auto" else self.getConfig('revtokeep') 95 96 if not revtokeep: 97 return 98 99 pyname = re.compile(r'%s\.part\d+\.rev$' % pyfile.name.rsplit('.', 2)[0].replace('.', '\.')) 100 101 for link in self.core.api.getPackageData(pyfile.package().id).links: 102 if link.status is 4 and pyname.match(link.name): 103 pylink = self._pyfile(link) 104 105 if revtokeep > -1 or pyfile.name.endswith(".rev"): 106 pylink.setStatus("queued") 107 else: 108 pylink.setCustomStatus(_("unskipped"), "queued") 109 110 self.core.files.save() 111 pylink.release() 112 return 113 [end of module/plugins/hooks/SkipRev.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/module/plugins/hooks/SkipRev.py b/module/plugins/hooks/SkipRev.py --- a/module/plugins/hooks/SkipRev.py +++ b/module/plugins/hooks/SkipRev.py @@ -8,13 +8,13 @@ from module.PyFile import PyFile from module.plugins.internal.Hook import Hook -from module.plugins.internal.Plugin import SkipDownload +from module.plugins.internal.Plugin import Skip class SkipRev(Hook): __name__ = "SkipRev" __type__ = "hook" - __version__ = "0.30" + __version__ = "0.31" __config__ = [("mode" , "Auto;Manual", "Choose recovery archives to skip" , "Auto"), ("revtokeep", "int" , "Number of recovery archives to keep for package", 0 )] @@ -35,7 +35,7 @@ def _setup(self): self.pyfile.plugin._setup() if self.pyfile.hasStatus("skipped"): - raise SkipDownload(self.pyfile.statusname or self.pyfile.pluginname) + raise Skip(self.pyfile.statusname or self.pyfile.pluginname) def _name(self, pyfile):
{"golden_diff": "diff --git a/module/plugins/hooks/SkipRev.py b/module/plugins/hooks/SkipRev.py\n--- a/module/plugins/hooks/SkipRev.py\n+++ b/module/plugins/hooks/SkipRev.py\n@@ -8,13 +8,13 @@\n \n from module.PyFile import PyFile\n from module.plugins.internal.Hook import Hook\n-from module.plugins.internal.Plugin import SkipDownload\n+from module.plugins.internal.Plugin import Skip\n \n \n class SkipRev(Hook):\n __name__ = \"SkipRev\"\n __type__ = \"hook\"\n- __version__ = \"0.30\"\n+ __version__ = \"0.31\"\n \n __config__ = [(\"mode\" , \"Auto;Manual\", \"Choose recovery archives to skip\" , \"Auto\"),\n (\"revtokeep\", \"int\" , \"Number of recovery archives to keep for package\", 0 )]\n@@ -35,7 +35,7 @@\n def _setup(self):\n self.pyfile.plugin._setup()\n if self.pyfile.hasStatus(\"skipped\"):\n- raise SkipDownload(self.pyfile.statusname or self.pyfile.pluginname)\n+ raise Skip(self.pyfile.statusname or self.pyfile.pluginname)\n \n \n def _name(self, pyfile):\n", "issue": "[SkipRev] broken - cannot import name SkipDownload\nSkipRev plugin failed to import with following error:\nErrore durante l'importazione SkipRev: cannot import name SkipDownload\n\nI already tried following, without success: http://forum.pyload.org/viewtopic.php?f=7&t=4335\n\nDebian GNU/Linux 7.8 (wheezy) x64\nPython 2.7.3\npyLoad 0.4.9\nSkipRev 0.30\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\nimport urllib\nimport urlparse\n\nfrom types import MethodType\n\nfrom module.PyFile import PyFile\nfrom module.plugins.internal.Hook import Hook\nfrom module.plugins.internal.Plugin import SkipDownload\n\n\nclass SkipRev(Hook):\n __name__ = \"SkipRev\"\n __type__ = \"hook\"\n __version__ = \"0.30\"\n\n __config__ = [(\"mode\" , \"Auto;Manual\", \"Choose recovery archives to skip\" , \"Auto\"),\n (\"revtokeep\", \"int\" , \"Number of recovery archives to keep for package\", 0 )]\n\n __description__ = \"\"\"Skip recovery archives (.rev)\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Walter Purcaro\", \"[email protected]\")]\n\n\n interval = 0 #@TODO: Remove in 0.4.10\n\n\n def setup(self):\n self.info = {} #@TODO: Remove in 0.4.10\n\n\n @staticmethod\n def _setup(self):\n self.pyfile.plugin._setup()\n if self.pyfile.hasStatus(\"skipped\"):\n raise SkipDownload(self.pyfile.statusname or self.pyfile.pluginname)\n\n\n def _name(self, pyfile):\n if hasattr(pyfile.pluginmodule, \"getInfo\"): #@NOTE: getInfo is deprecated in 0.4.10\n return pyfile.pluginmodule.getInfo([pyfile.url]).next()[0]\n else:\n self.logWarning(\"Unable to grab file name\")\n return urlparse.urlparse(urllib.unquote(pyfile.url)).path.split('/')[-1]\n\n\n def _pyfile(self, link):\n return PyFile(self.core.files,\n link.fid,\n link.url,\n link.name,\n link.size,\n link.status,\n link.error,\n link.plugin,\n link.packageID,\n link.order)\n\n\n def downloadPreparing(self, pyfile):\n name = self._name(pyfile)\n\n if pyfile.statusname is _(\"unskipped\") or not name.endswith(\".rev\") or not \".part\" in name:\n return\n\n revtokeep = -1 if self.getConfig('mode') == \"Auto\" else self.getConfig('revtokeep')\n\n if revtokeep:\n status_list = (1, 4, 8, 9, 14) if revtokeep < 0 else (1, 3, 4, 8, 9, 14)\n pyname = re.compile(r'%s\\.part\\d+\\.rev$' % name.rsplit('.', 2)[0].replace('.', '\\.'))\n\n queued = [True for link in self.core.api.getPackageData(pyfile.package().id).links \\\n if link.status not in status_list and pyname.match(link.name)].count(True)\n\n if not queued or queued < revtokeep: #: keep one rev at least in auto mode\n return\n\n pyfile.setCustomStatus(\"SkipRev\", \"skipped\")\n\n if not hasattr(pyfile.plugin, \"_setup\"):\n # Work-around: inject status checker inside the preprocessing routine of the plugin\n pyfile.plugin._setup = pyfile.plugin.setup\n pyfile.plugin.setup = MethodType(self._setup, pyfile.plugin)\n\n\n def downloadFailed(self, pyfile):\n #: Check if pyfile is still \"failed\",\n # maybe might has been restarted in meantime\n if pyfile.status != 8 or pyfile.name.rsplit('.', 1)[-1].strip() not in (\"rar\", \"rev\"):\n return\n\n revtokeep = -1 if self.getConfig('mode') == \"Auto\" else self.getConfig('revtokeep')\n\n if not revtokeep:\n return\n\n pyname = re.compile(r'%s\\.part\\d+\\.rev$' % pyfile.name.rsplit('.', 2)[0].replace('.', '\\.'))\n\n for link in self.core.api.getPackageData(pyfile.package().id).links:\n if link.status is 4 and pyname.match(link.name):\n pylink = self._pyfile(link)\n\n if revtokeep > -1 or pyfile.name.endswith(\".rev\"):\n pylink.setStatus(\"queued\")\n else:\n pylink.setCustomStatus(_(\"unskipped\"), \"queued\")\n\n self.core.files.save()\n pylink.release()\n return\n", "path": "module/plugins/hooks/SkipRev.py"}]}
1,845
273
gh_patches_debug_19087
rasdani/github-patches
git_diff
cloudtools__troposphere-869
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Validation error in CodeCommit Trigger It appears that the validation for the CodeCommit Trigger class does not currently allow the use of any intrinsic functions for the `Events` property, and only accepts a list of hard coded values. https://github.com/cloudtools/troposphere/blob/45582eb1d21a6cc9cfa608f626d8acbf0317f37f/troposphere/codecommit.py#L18-L32 We are trying to allow for the dynamic selection of the values with a parameter, but encounter errors when attempting to use a `Ref`. A snippet of our trigger definition, and the error encountered is below: ``` repo_trigger1 = codecommit.Trigger( Name = Ref(trigger_1_name), CustomData = Ref(trigger_1_custom_data), DestinationArn = Ref(trigger_1_destination_arn), Branches = Ref(trigger_1_branches), Events = Ref(trigger_1_events), ) ``` We are able to successfully generate the template when changing the `Events` assignment to: ```Events = ["all"],``` I believe we just need to check if the value is one of the Helper functions before iterating through the events. I will try to get a fix pushed up for review. </issue> <code> [start of troposphere/codecommit.py] 1 # Copyright (c) 2016, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSObject, AWSProperty 7 8 9 class Trigger(AWSProperty): 10 props = { 11 'Branches': ([basestring], False), 12 'CustomData': (basestring, False), 13 'DestinationArn': (basestring, False), 14 'Events': ([basestring], False), 15 'Name': (basestring, False), 16 } 17 18 def validate(self): 19 valid = [ 20 'all', 21 'createReference', 22 'deleteReference', 23 'updateReference', 24 ] 25 events = self.properties.get('Events') 26 if events: 27 if 'all' in events and len(events) != 1: 28 raise ValueError('Trigger events: all must be used alone') 29 else: 30 for e in events: 31 if e not in valid: 32 raise ValueError('Trigger: invalid event %s' % e) 33 34 35 class Repository(AWSObject): 36 resource_type = "AWS::CodeCommit::Repository" 37 38 props = { 39 'RepositoryDescription': (basestring, False), 40 'RepositoryName': (basestring, True), 41 'Triggers': ([Trigger], False), 42 } 43 [end of troposphere/codecommit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/codecommit.py b/troposphere/codecommit.py --- a/troposphere/codecommit.py +++ b/troposphere/codecommit.py @@ -3,7 +3,7 @@ # # See LICENSE file for full license. -from . import AWSObject, AWSProperty +from . import AWSHelperFn, AWSObject, AWSProperty class Trigger(AWSProperty): @@ -23,12 +23,12 @@ 'updateReference', ] events = self.properties.get('Events') - if events: + if events and not isinstance(events, AWSHelperFn): if 'all' in events and len(events) != 1: raise ValueError('Trigger events: all must be used alone') else: for e in events: - if e not in valid: + if e not in valid and not isinstance(e, AWSHelperFn): raise ValueError('Trigger: invalid event %s' % e)
{"golden_diff": "diff --git a/troposphere/codecommit.py b/troposphere/codecommit.py\n--- a/troposphere/codecommit.py\n+++ b/troposphere/codecommit.py\n@@ -3,7 +3,7 @@\n #\n # See LICENSE file for full license.\n \n-from . import AWSObject, AWSProperty\n+from . import AWSHelperFn, AWSObject, AWSProperty\n \n \n class Trigger(AWSProperty):\n@@ -23,12 +23,12 @@\n 'updateReference',\n ]\n events = self.properties.get('Events')\n- if events:\n+ if events and not isinstance(events, AWSHelperFn):\n if 'all' in events and len(events) != 1:\n raise ValueError('Trigger events: all must be used alone')\n else:\n for e in events:\n- if e not in valid:\n+ if e not in valid and not isinstance(e, AWSHelperFn):\n raise ValueError('Trigger: invalid event %s' % e)\n", "issue": "Validation error in CodeCommit Trigger\nIt appears that the validation for the CodeCommit Trigger class does not currently allow the use of any intrinsic functions for the `Events` property, and only accepts a list of hard coded values.\r\n\r\nhttps://github.com/cloudtools/troposphere/blob/45582eb1d21a6cc9cfa608f626d8acbf0317f37f/troposphere/codecommit.py#L18-L32\r\n\r\nWe are trying to allow for the dynamic selection of the values with a parameter, but encounter errors when attempting to use a `Ref`. A snippet of our trigger definition, and the error encountered is below:\r\n\r\n```\r\nrepo_trigger1 = codecommit.Trigger(\r\n Name = Ref(trigger_1_name),\r\n CustomData = Ref(trigger_1_custom_data),\r\n DestinationArn = Ref(trigger_1_destination_arn),\r\n Branches = Ref(trigger_1_branches),\r\n Events = Ref(trigger_1_events),\r\n )\r\n```\r\nWe are able to successfully generate the template when changing the `Events` assignment to:\r\n\r\n```Events = [\"all\"],```\r\n\r\nI believe we just need to check if the value is one of the Helper functions before iterating through the events. I will try to get a fix pushed up for review.\n", "before_files": [{"content": "# Copyright (c) 2016, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\n\n\nclass Trigger(AWSProperty):\n props = {\n 'Branches': ([basestring], False),\n 'CustomData': (basestring, False),\n 'DestinationArn': (basestring, False),\n 'Events': ([basestring], False),\n 'Name': (basestring, False),\n }\n\n def validate(self):\n valid = [\n 'all',\n 'createReference',\n 'deleteReference',\n 'updateReference',\n ]\n events = self.properties.get('Events')\n if events:\n if 'all' in events and len(events) != 1:\n raise ValueError('Trigger events: all must be used alone')\n else:\n for e in events:\n if e not in valid:\n raise ValueError('Trigger: invalid event %s' % e)\n\n\nclass Repository(AWSObject):\n resource_type = \"AWS::CodeCommit::Repository\"\n\n props = {\n 'RepositoryDescription': (basestring, False),\n 'RepositoryName': (basestring, True),\n 'Triggers': ([Trigger], False),\n }\n", "path": "troposphere/codecommit.py"}]}
1,160
210
gh_patches_debug_6298
rasdani/github-patches
git_diff
vispy__vispy-1389
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Camera API documentation missing I could not find a list of available cameras in the docs: http://vispy.org/scene.html?highlight=cameras#module-vispy.scene.cameras </issue> <code> [start of vispy/scene/cameras/__init__.py] 1 # -*- coding: utf-8 -*- 2 # Copyright (c) Vispy Development Team. All Rights Reserved. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 4 """ 5 Cameras are responsible for determining which part of a scene is displayed 6 in a viewbox and for handling user input to change the view. 7 8 Several Camera subclasses are available to customize the projection of the 9 scene such as 3D perspective and orthographic projections, 2D 10 scale/translation, and other specialty cameras. A variety of user interaction 11 styles are available for each camera including arcball, turntable, 12 first-person, and pan/zoom interactions. 13 14 Internally, Cameras work by setting the transform of a SubScene object such 15 that a certain part of the scene is mapped to the bounding rectangle of the 16 ViewBox. 17 """ 18 from ._base import make_camera # noqa 19 from .base_camera import BaseCamera # noqa 20 from .panzoom import PanZoomCamera # noqa 21 from .arcball import ArcballCamera # noqa 22 from .turntable import TurntableCamera # noqa 23 from .fly import FlyCamera # noqa 24 from .magnify import MagnifyCamera, Magnify1DCamera # noqa 25 [end of vispy/scene/cameras/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vispy/scene/cameras/__init__.py b/vispy/scene/cameras/__init__.py --- a/vispy/scene/cameras/__init__.py +++ b/vispy/scene/cameras/__init__.py @@ -15,6 +15,9 @@ that a certain part of the scene is mapped to the bounding rectangle of the ViewBox. """ +__all__ = ['ArcballCamera', 'BaseCamera', 'FlyCamera', 'MagnifyCamera', + 'Magnify1DCamera', 'PanZoomCamera', 'TurntableCamera'] + from ._base import make_camera # noqa from .base_camera import BaseCamera # noqa from .panzoom import PanZoomCamera # noqa
{"golden_diff": "diff --git a/vispy/scene/cameras/__init__.py b/vispy/scene/cameras/__init__.py\n--- a/vispy/scene/cameras/__init__.py\n+++ b/vispy/scene/cameras/__init__.py\n@@ -15,6 +15,9 @@\n that a certain part of the scene is mapped to the bounding rectangle of the \n ViewBox.\n \"\"\"\n+__all__ = ['ArcballCamera', 'BaseCamera', 'FlyCamera', 'MagnifyCamera',\n+ 'Magnify1DCamera', 'PanZoomCamera', 'TurntableCamera']\n+\n from ._base import make_camera # noqa\n from .base_camera import BaseCamera # noqa\n from .panzoom import PanZoomCamera # noqa\n", "issue": "Camera API documentation missing\nI could not find a list of available cameras in the docs:\n\nhttp://vispy.org/scene.html?highlight=cameras#module-vispy.scene.cameras\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nCameras are responsible for determining which part of a scene is displayed\nin a viewbox and for handling user input to change the view.\n\nSeveral Camera subclasses are available to customize the projection of the \nscene such as 3D perspective and orthographic projections, 2D \nscale/translation, and other specialty cameras. A variety of user interaction\nstyles are available for each camera including arcball, turntable, \nfirst-person, and pan/zoom interactions.\n\nInternally, Cameras work by setting the transform of a SubScene object such \nthat a certain part of the scene is mapped to the bounding rectangle of the \nViewBox.\n\"\"\"\nfrom ._base import make_camera # noqa\nfrom .base_camera import BaseCamera # noqa\nfrom .panzoom import PanZoomCamera # noqa\nfrom .arcball import ArcballCamera # noqa\nfrom .turntable import TurntableCamera # noqa\nfrom .fly import FlyCamera # noqa\nfrom .magnify import MagnifyCamera, Magnify1DCamera # noqa\n", "path": "vispy/scene/cameras/__init__.py"}]}
885
169
gh_patches_debug_34075
rasdani/github-patches
git_diff
scikit-hep__awkward-2102
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> behaviors with decorated mixin methods cannot be pickled ### Version of Awkward Array main ### Description and code to reproduce The mixin machinery introduces a closure, which `pickle` can't serialise. </issue> <code> [start of src/awkward/behaviors/mixins.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 3 4 import sys 5 6 import awkward as ak 7 8 9 def mixin_class(registry, name=None): 10 """ 11 Args: 12 registry (dict): The destination behavior mapping registry. Typically, 13 this would be the global registry #ak.behavior, but one may wish 14 to register methods in an alternative way. 15 name (str): The name to assign to the behaviour class. 16 17 This decorator can be used to register a behavior mixin class. 18 19 Any inherited behaviors will automatically be made available to the decorated 20 class. 21 22 See the "Mixin decorators" section of #ak.behavior for further details. 23 """ 24 25 def register(cls): 26 cls_name = cls.__name__ 27 if name is None: 28 behavior_name = cls_name 29 else: 30 behavior_name = name 31 32 record = type( 33 cls_name + "Record", 34 (cls, ak.highlevel.Record), 35 {"__module__": cls.__module__}, 36 ) 37 setattr(sys.modules[cls.__module__], cls_name + "Record", record) 38 registry[behavior_name] = record 39 array = type( 40 cls_name + "Array", 41 (cls, ak.highlevel.Array), 42 {"__module__": cls.__module__}, 43 ) 44 setattr(sys.modules[cls.__module__], cls_name + "Array", array) 45 registry["*", behavior_name] = array 46 for basecls in cls.mro(): 47 for method in basecls.__dict__.values(): 48 if hasattr(method, "_awkward_mixin"): 49 ufunc, rhs, transpose = method._awkward_mixin 50 if rhs is None: 51 registry.setdefault((ufunc, behavior_name), method) 52 continue 53 for rhs_name in list(rhs) + [behavior_name]: 54 registry.setdefault((ufunc, behavior_name, rhs_name), method) 55 if transpose is not None and rhs_name != behavior_name: 56 registry.setdefault( 57 (ufunc, rhs_name, behavior_name), transpose 58 ) 59 if basecls.__name__ in rhs: 60 rhs.add(behavior_name) 61 return cls 62 63 return register 64 65 66 def mixin_class_method(ufunc, rhs=None, *, transpose=True): 67 """ 68 Args: 69 ufunc (numpy.ufunc): A universal function (or NEP18 callable) that is 70 hooked in Awkward Array, i.e. it can be the first argument of a behavior. 71 rhs (Set[type] or None): Set of right-hand side argument types, optional 72 if wrapping a unary function. The left-hand side is expected to 73 always be `self` of the parent class. 74 transpose (bool): If true, automatically create a transpose signature 75 (only makes sense for binary ufuncs). 76 77 This decorator can be used to register a mixin class method. 78 79 Using this decorator ensures that derived classes that are declared with the 80 #ak.mixin_class decorator will also have the behaviors that this class has. 81 """ 82 83 def register(method): 84 if not isinstance(rhs, (set, type(None))): 85 raise ak._errors.wrap_error( 86 ValueError("expected a set of right-hand-side argument types") 87 ) 88 if transpose and rhs is not None: 89 90 def transposed(left, right): 91 return method(right, left) 92 93 # make a copy of rhs, we will edit it later 94 method._awkward_mixin = (ufunc, set(rhs), transposed) 95 else: 96 method._awkward_mixin = (ufunc, rhs, None) 97 return method 98 99 return register 100 [end of src/awkward/behaviors/mixins.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/awkward/behaviors/mixins.py b/src/awkward/behaviors/mixins.py --- a/src/awkward/behaviors/mixins.py +++ b/src/awkward/behaviors/mixins.py @@ -1,6 +1,7 @@ # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE +import functools import sys import awkward as ak @@ -63,6 +64,10 @@ return register +def _call_transposed(func, left, right): + return func(right, left) + + def mixin_class_method(ufunc, rhs=None, *, transpose=True): """ Args: @@ -70,7 +75,8 @@ hooked in Awkward Array, i.e. it can be the first argument of a behavior. rhs (Set[type] or None): Set of right-hand side argument types, optional if wrapping a unary function. The left-hand side is expected to - always be `self` of the parent class. + always be `self` of the parent class. The current class is implicitly + included in this set. transpose (bool): If true, automatically create a transpose signature (only makes sense for binary ufuncs). @@ -86,12 +92,13 @@ ValueError("expected a set of right-hand-side argument types") ) if transpose and rhs is not None: - - def transposed(left, right): - return method(right, left) - # make a copy of rhs, we will edit it later - method._awkward_mixin = (ufunc, set(rhs), transposed) + # use partial & a module-scoped function so that this is pickleable + method._awkward_mixin = ( + ufunc, + set(rhs), + functools.partial(_call_transposed, method), + ) else: method._awkward_mixin = (ufunc, rhs, None) return method
{"golden_diff": "diff --git a/src/awkward/behaviors/mixins.py b/src/awkward/behaviors/mixins.py\n--- a/src/awkward/behaviors/mixins.py\n+++ b/src/awkward/behaviors/mixins.py\n@@ -1,6 +1,7 @@\n # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n \n \n+import functools\n import sys\n \n import awkward as ak\n@@ -63,6 +64,10 @@\n return register\n \n \n+def _call_transposed(func, left, right):\n+ return func(right, left)\n+\n+\n def mixin_class_method(ufunc, rhs=None, *, transpose=True):\n \"\"\"\n Args:\n@@ -70,7 +75,8 @@\n hooked in Awkward Array, i.e. it can be the first argument of a behavior.\n rhs (Set[type] or None): Set of right-hand side argument types, optional\n if wrapping a unary function. The left-hand side is expected to\n- always be `self` of the parent class.\n+ always be `self` of the parent class. The current class is implicitly\n+ included in this set.\n transpose (bool): If true, automatically create a transpose signature\n (only makes sense for binary ufuncs).\n \n@@ -86,12 +92,13 @@\n ValueError(\"expected a set of right-hand-side argument types\")\n )\n if transpose and rhs is not None:\n-\n- def transposed(left, right):\n- return method(right, left)\n-\n # make a copy of rhs, we will edit it later\n- method._awkward_mixin = (ufunc, set(rhs), transposed)\n+ # use partial & a module-scoped function so that this is pickleable\n+ method._awkward_mixin = (\n+ ufunc,\n+ set(rhs),\n+ functools.partial(_call_transposed, method),\n+ )\n else:\n method._awkward_mixin = (ufunc, rhs, None)\n return method\n", "issue": "behaviors with decorated mixin methods cannot be pickled\n### Version of Awkward Array\n\nmain\n\n### Description and code to reproduce\n\nThe mixin machinery introduces a closure, which `pickle` can't serialise.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n\nimport sys\n\nimport awkward as ak\n\n\ndef mixin_class(registry, name=None):\n \"\"\"\n Args:\n registry (dict): The destination behavior mapping registry. Typically,\n this would be the global registry #ak.behavior, but one may wish\n to register methods in an alternative way.\n name (str): The name to assign to the behaviour class.\n\n This decorator can be used to register a behavior mixin class.\n\n Any inherited behaviors will automatically be made available to the decorated\n class.\n\n See the \"Mixin decorators\" section of #ak.behavior for further details.\n \"\"\"\n\n def register(cls):\n cls_name = cls.__name__\n if name is None:\n behavior_name = cls_name\n else:\n behavior_name = name\n\n record = type(\n cls_name + \"Record\",\n (cls, ak.highlevel.Record),\n {\"__module__\": cls.__module__},\n )\n setattr(sys.modules[cls.__module__], cls_name + \"Record\", record)\n registry[behavior_name] = record\n array = type(\n cls_name + \"Array\",\n (cls, ak.highlevel.Array),\n {\"__module__\": cls.__module__},\n )\n setattr(sys.modules[cls.__module__], cls_name + \"Array\", array)\n registry[\"*\", behavior_name] = array\n for basecls in cls.mro():\n for method in basecls.__dict__.values():\n if hasattr(method, \"_awkward_mixin\"):\n ufunc, rhs, transpose = method._awkward_mixin\n if rhs is None:\n registry.setdefault((ufunc, behavior_name), method)\n continue\n for rhs_name in list(rhs) + [behavior_name]:\n registry.setdefault((ufunc, behavior_name, rhs_name), method)\n if transpose is not None and rhs_name != behavior_name:\n registry.setdefault(\n (ufunc, rhs_name, behavior_name), transpose\n )\n if basecls.__name__ in rhs:\n rhs.add(behavior_name)\n return cls\n\n return register\n\n\ndef mixin_class_method(ufunc, rhs=None, *, transpose=True):\n \"\"\"\n Args:\n ufunc (numpy.ufunc): A universal function (or NEP18 callable) that is\n hooked in Awkward Array, i.e. it can be the first argument of a behavior.\n rhs (Set[type] or None): Set of right-hand side argument types, optional\n if wrapping a unary function. The left-hand side is expected to\n always be `self` of the parent class.\n transpose (bool): If true, automatically create a transpose signature\n (only makes sense for binary ufuncs).\n\n This decorator can be used to register a mixin class method.\n\n Using this decorator ensures that derived classes that are declared with the\n #ak.mixin_class decorator will also have the behaviors that this class has.\n \"\"\"\n\n def register(method):\n if not isinstance(rhs, (set, type(None))):\n raise ak._errors.wrap_error(\n ValueError(\"expected a set of right-hand-side argument types\")\n )\n if transpose and rhs is not None:\n\n def transposed(left, right):\n return method(right, left)\n\n # make a copy of rhs, we will edit it later\n method._awkward_mixin = (ufunc, set(rhs), transposed)\n else:\n method._awkward_mixin = (ufunc, rhs, None)\n return method\n\n return register\n", "path": "src/awkward/behaviors/mixins.py"}]}
1,563
461
gh_patches_debug_14398
rasdani/github-patches
git_diff
akvo__akvo-rsr-3803
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> /related_project/ issue Attempted to POST the following data: {"relation":"1","project":"8012","related_iati_id":"23232"} The response is 400: `related_project: ["This field is required."]` However, we want to allow a selection of external project: <img width="1440" alt="Screenshot 2019-08-16 10 16 11" src="https://user-images.githubusercontent.com/1336477/63150230-effe3400-c00e-11e9-8aff-dc162686751a.png"> </issue> <code> [start of akvo/rest/serializers/related_project.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import RelatedProject 9 10 from .rsr_serializer import BaseRSRSerializer 11 12 from rest_framework import serializers 13 14 15 class RelatedProjectRawSerializer(BaseRSRSerializer): 16 17 class Meta: 18 model = RelatedProject 19 fields = '__all__' 20 21 22 class RelatedProjectSerializer(RelatedProjectRawSerializer): 23 24 related_project_name = serializers.ReadOnlyField(source='related_project.title') 25 related_project_show_link = serializers.ReadOnlyField() 26 relation_label = serializers.ReadOnlyField(source='iati_relation_unicode') 27 [end of akvo/rest/serializers/related_project.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/serializers/related_project.py b/akvo/rest/serializers/related_project.py --- a/akvo/rest/serializers/related_project.py +++ b/akvo/rest/serializers/related_project.py @@ -5,7 +5,7 @@ # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. -from akvo.rsr.models import RelatedProject +from akvo.rsr.models import RelatedProject, Project from .rsr_serializer import BaseRSRSerializer @@ -14,6 +14,9 @@ class RelatedProjectRawSerializer(BaseRSRSerializer): + related_project = serializers.PrimaryKeyRelatedField( + allow_null=True, queryset=Project.objects.all(), required=False, default=None) + class Meta: model = RelatedProject fields = '__all__'
{"golden_diff": "diff --git a/akvo/rest/serializers/related_project.py b/akvo/rest/serializers/related_project.py\n--- a/akvo/rest/serializers/related_project.py\n+++ b/akvo/rest/serializers/related_project.py\n@@ -5,7 +5,7 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n-from akvo.rsr.models import RelatedProject\n+from akvo.rsr.models import RelatedProject, Project\n \n from .rsr_serializer import BaseRSRSerializer\n \n@@ -14,6 +14,9 @@\n \n class RelatedProjectRawSerializer(BaseRSRSerializer):\n \n+ related_project = serializers.PrimaryKeyRelatedField(\n+ allow_null=True, queryset=Project.objects.all(), required=False, default=None)\n+\n class Meta:\n model = RelatedProject\n fields = '__all__'\n", "issue": "/related_project/ issue\nAttempted to POST the following data:\r\n{\"relation\":\"1\",\"project\":\"8012\",\"related_iati_id\":\"23232\"}\r\n\r\nThe response is 400:\r\n`related_project: [\"This field is required.\"]`\r\n\r\nHowever, we want to allow a selection of external project:\r\n<img width=\"1440\" alt=\"Screenshot 2019-08-16 10 16 11\" src=\"https://user-images.githubusercontent.com/1336477/63150230-effe3400-c00e-11e9-8aff-dc162686751a.png\">\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import RelatedProject\n\nfrom .rsr_serializer import BaseRSRSerializer\n\nfrom rest_framework import serializers\n\n\nclass RelatedProjectRawSerializer(BaseRSRSerializer):\n\n class Meta:\n model = RelatedProject\n fields = '__all__'\n\n\nclass RelatedProjectSerializer(RelatedProjectRawSerializer):\n\n related_project_name = serializers.ReadOnlyField(source='related_project.title')\n related_project_show_link = serializers.ReadOnlyField()\n relation_label = serializers.ReadOnlyField(source='iati_relation_unicode')\n", "path": "akvo/rest/serializers/related_project.py"}]}
933
194
gh_patches_debug_15838
rasdani/github-patches
git_diff
wagtail__wagtail-4730
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AppRegistryNotReady("Apps aren't loaded yet.") ### After updating wagtail from 2.1 to 2.2 I get an AppRegistryNotReady error when migrating. Removing **from .sitemap_generator import Sitemap** from **wagtail/contrib/sitemaps/__init__.py** fixed the problem. Maybe it has something to do with the User model; I use a custom one. </issue> <code> [start of wagtail/contrib/sitemaps/sitemap_generator.py] 1 import warnings 2 3 from django.contrib.sitemaps import Sitemap as DjangoSitemap 4 5 from wagtail.core.models import Site 6 from wagtail.core.utils import accepts_kwarg 7 from wagtail.utils.deprecation import RemovedInWagtail24Warning 8 9 10 class Sitemap(DjangoSitemap): 11 12 def __init__(self, request=None): 13 self.request = request 14 15 def location(self, obj): 16 return obj.get_full_url(self.request) 17 18 def lastmod(self, obj): 19 # fall back on latest_revision_created_at if last_published_at is null 20 # (for backwards compatibility from before last_published_at was added) 21 return (obj.last_published_at or obj.latest_revision_created_at) 22 23 def get_wagtail_site(self): 24 site = getattr(self.request, 'site', None) 25 if site is None: 26 return Site.objects.select_related( 27 'root_page' 28 ).get(is_default_site=True) 29 return site 30 31 def items(self): 32 return ( 33 self.get_wagtail_site() 34 .root_page 35 .get_descendants(inclusive=True) 36 .live() 37 .public() 38 .order_by('path') 39 .specific()) 40 41 def _urls(self, page, protocol, domain): 42 urls = [] 43 last_mods = set() 44 45 for item in self.paginator.page(page).object_list: 46 47 if not accepts_kwarg(item.get_sitemap_urls, 'request'): 48 warnings.warn( 49 "%s.get_sitemap_urls() must be updated to accept an optional " 50 "'request' keyword argument" % type(item).__name__, 51 category=RemovedInWagtail24Warning) 52 53 url_info_items = item.get_sitemap_urls() 54 else: 55 url_info_items = item.get_sitemap_urls(self.request) 56 57 for url_info in url_info_items: 58 urls.append(url_info) 59 last_mods.add(url_info.get('lastmod')) 60 61 # last_mods might be empty if the whole site is private 62 if last_mods and None not in last_mods: 63 self.latest_lastmod = max(last_mods) 64 return urls 65 [end of wagtail/contrib/sitemaps/sitemap_generator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/contrib/sitemaps/sitemap_generator.py b/wagtail/contrib/sitemaps/sitemap_generator.py --- a/wagtail/contrib/sitemaps/sitemap_generator.py +++ b/wagtail/contrib/sitemaps/sitemap_generator.py @@ -2,7 +2,6 @@ from django.contrib.sitemaps import Sitemap as DjangoSitemap -from wagtail.core.models import Site from wagtail.core.utils import accepts_kwarg from wagtail.utils.deprecation import RemovedInWagtail24Warning @@ -23,6 +22,7 @@ def get_wagtail_site(self): site = getattr(self.request, 'site', None) if site is None: + from wagtail.core.models import Site return Site.objects.select_related( 'root_page' ).get(is_default_site=True)
{"golden_diff": "diff --git a/wagtail/contrib/sitemaps/sitemap_generator.py b/wagtail/contrib/sitemaps/sitemap_generator.py\n--- a/wagtail/contrib/sitemaps/sitemap_generator.py\n+++ b/wagtail/contrib/sitemaps/sitemap_generator.py\n@@ -2,7 +2,6 @@\n \n from django.contrib.sitemaps import Sitemap as DjangoSitemap\n \n-from wagtail.core.models import Site\n from wagtail.core.utils import accepts_kwarg\n from wagtail.utils.deprecation import RemovedInWagtail24Warning\n \n@@ -23,6 +22,7 @@\n def get_wagtail_site(self):\n site = getattr(self.request, 'site', None)\n if site is None:\n+ from wagtail.core.models import Site\n return Site.objects.select_related(\n 'root_page'\n ).get(is_default_site=True)\n", "issue": "AppRegistryNotReady(\"Apps aren't loaded yet.\")\n### After updating wagtail from 2.1 to 2.2 I get an AppRegistryNotReady error when migrating.\r\n\r\nRemoving **from .sitemap_generator import Sitemap**\r\nfrom **wagtail/contrib/sitemaps/__init__.py**\r\nfixed the problem.\r\n\r\nMaybe it has something to do with the User model; I use a custom one.\n", "before_files": [{"content": "import warnings\n\nfrom django.contrib.sitemaps import Sitemap as DjangoSitemap\n\nfrom wagtail.core.models import Site\nfrom wagtail.core.utils import accepts_kwarg\nfrom wagtail.utils.deprecation import RemovedInWagtail24Warning\n\n\nclass Sitemap(DjangoSitemap):\n\n def __init__(self, request=None):\n self.request = request\n\n def location(self, obj):\n return obj.get_full_url(self.request)\n\n def lastmod(self, obj):\n # fall back on latest_revision_created_at if last_published_at is null\n # (for backwards compatibility from before last_published_at was added)\n return (obj.last_published_at or obj.latest_revision_created_at)\n\n def get_wagtail_site(self):\n site = getattr(self.request, 'site', None)\n if site is None:\n return Site.objects.select_related(\n 'root_page'\n ).get(is_default_site=True)\n return site\n\n def items(self):\n return (\n self.get_wagtail_site()\n .root_page\n .get_descendants(inclusive=True)\n .live()\n .public()\n .order_by('path')\n .specific())\n\n def _urls(self, page, protocol, domain):\n urls = []\n last_mods = set()\n\n for item in self.paginator.page(page).object_list:\n\n if not accepts_kwarg(item.get_sitemap_urls, 'request'):\n warnings.warn(\n \"%s.get_sitemap_urls() must be updated to accept an optional \"\n \"'request' keyword argument\" % type(item).__name__,\n category=RemovedInWagtail24Warning)\n\n url_info_items = item.get_sitemap_urls()\n else:\n url_info_items = item.get_sitemap_urls(self.request)\n\n for url_info in url_info_items:\n urls.append(url_info)\n last_mods.add(url_info.get('lastmod'))\n\n # last_mods might be empty if the whole site is private\n if last_mods and None not in last_mods:\n self.latest_lastmod = max(last_mods)\n return urls\n", "path": "wagtail/contrib/sitemaps/sitemap_generator.py"}]}
1,202
189
gh_patches_debug_14105
rasdani/github-patches
git_diff
interactions-py__interactions.py-1169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Cannot pop from Cache during dispatch. ### Describe the bug. When running functions leading to delete events, for example `channel.purge` the cache encounters a KeyError and kills the whole Process. This is caused by the implementation of #482 ### List the steps. n/A ### What you expected. n/A ### What you saw. ``` Websocket have raised an exception, closing. Traceback (most recent call last): File "C:\Users\\Desktop\PycharmProjects\library\interactions\client\bot.py", line 440, in _login await self._websocket.run() File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 279, in run await self._handle_stream(msg) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 332, in _handle_stream self._dispatch_event(event, data) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 562, in _dispatch_event _message_cache.pop(message_id) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\cache.py", line 131, in pop return self.values.pop(key, default) File "C:\Users\\Desktop\PycharmProjects\library\interactions\utils\dict_caches.py", line 39, in __getitem__ self.move_to_end(key) KeyError: Snowflake(1040316644695756912) Process finished with exit code 0 ``` ### What version of the library did you use? unstable ### Version specification The unstable unstable version ### Code of Conduct - [X] I agree to follow the contribution requirements. [BUG] Cannot pop from Cache during dispatch. ### Describe the bug. When running functions leading to delete events, for example `channel.purge` the cache encounters a KeyError and kills the whole Process. This is caused by the implementation of #482 ### List the steps. n/A ### What you expected. n/A ### What you saw. ``` Websocket have raised an exception, closing. Traceback (most recent call last): File "C:\Users\\Desktop\PycharmProjects\library\interactions\client\bot.py", line 440, in _login await self._websocket.run() File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 279, in run await self._handle_stream(msg) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 332, in _handle_stream self._dispatch_event(event, data) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\gateway\client.py", line 562, in _dispatch_event _message_cache.pop(message_id) File "C:\Users\\Desktop\PycharmProjects\library\interactions\api\cache.py", line 131, in pop return self.values.pop(key, default) File "C:\Users\\Desktop\PycharmProjects\library\interactions\utils\dict_caches.py", line 39, in __getitem__ self.move_to_end(key) KeyError: Snowflake(1040316644695756912) Process finished with exit code 0 ``` ### What version of the library did you use? unstable ### Version specification The unstable unstable version ### Code of Conduct - [X] I agree to follow the contribution requirements. </issue> <code> [start of interactions/utils/dict_caches.py] 1 from collections import OrderedDict 2 from typing import Generic, TypeVar 3 4 __all__ = ("FIFODict", "LRUDict") 5 6 _KT = TypeVar("_KT") 7 _VT = TypeVar("_VT") 8 9 10 class FIFODict(OrderedDict, Generic[_KT, _VT]): 11 """A dictionary that removes the old keys if over the item limit""" 12 13 def __init__(self, *args, max_items: int = float("inf"), **kwargs): 14 if max_items < 0: 15 raise RuntimeError("You cannot set max_items to negative numbers.") 16 17 super().__init__(*args, **kwargs) 18 self._max_items = max_items 19 20 def __setitem__(self, key: _KT, value: _VT): 21 super().__setitem__(key, value) 22 23 # Prevent buildup over time 24 while len(self) > self._max_items: 25 del self[next(iter(self))] 26 27 28 class LRUDict(OrderedDict, Generic[_KT, _VT]): 29 """A dictionary that removes the value that was the least recently used if over the item limit""" 30 31 def __init__(self, *args, max_items: int = float("inf"), **kwargs): 32 if max_items < 0: 33 raise RuntimeError("You cannot set max_items to negative numbers.") 34 35 super().__init__(*args, **kwargs) 36 self._max_items = max_items 37 38 def __getitem__(self, key: _KT) -> _VT: 39 self.move_to_end(key) 40 return super().__getitem__(key) 41 42 def __setitem__(self, key: _KT, value: _VT): 43 super().__setitem__(key, value) 44 45 # Prevent buildup over time 46 while len(self) > self._max_items: 47 del self[next(iter(self))] 48 [end of interactions/utils/dict_caches.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/interactions/utils/dict_caches.py b/interactions/utils/dict_caches.py --- a/interactions/utils/dict_caches.py +++ b/interactions/utils/dict_caches.py @@ -1,6 +1,8 @@ from collections import OrderedDict from typing import Generic, TypeVar +from .missing import MISSING + __all__ = ("FIFODict", "LRUDict") _KT = TypeVar("_KT") @@ -45,3 +47,14 @@ # Prevent buildup over time while len(self) > self._max_items: del self[next(iter(self))] + + __marker = object() + + def pop(self, key: _KT, default: _VT = __marker) -> _VT: + if key in self: + result = self[key] + del self[key] + return result + if default is MISSING: + raise KeyError(key) + return default
{"golden_diff": "diff --git a/interactions/utils/dict_caches.py b/interactions/utils/dict_caches.py\n--- a/interactions/utils/dict_caches.py\n+++ b/interactions/utils/dict_caches.py\n@@ -1,6 +1,8 @@\n from collections import OrderedDict\n from typing import Generic, TypeVar\n \n+from .missing import MISSING\n+\n __all__ = (\"FIFODict\", \"LRUDict\")\n \n _KT = TypeVar(\"_KT\")\n@@ -45,3 +47,14 @@\n # Prevent buildup over time\n while len(self) > self._max_items:\n del self[next(iter(self))]\n+\n+ __marker = object()\n+\n+ def pop(self, key: _KT, default: _VT = __marker) -> _VT:\n+ if key in self:\n+ result = self[key]\n+ del self[key]\n+ return result\n+ if default is MISSING:\n+ raise KeyError(key)\n+ return default\n", "issue": "[BUG] Cannot pop from Cache during dispatch.\n### Describe the bug.\r\n\r\nWhen running functions leading to delete events, for example `channel.purge` the cache encounters a KeyError and kills the whole Process.\r\n\r\nThis is caused by the implementation of #482\r\n\r\n### List the steps.\r\n\r\nn/A\r\n\r\n### What you expected.\r\n\r\nn/A\r\n\r\n### What you saw.\r\n```\r\nWebsocket have raised an exception, closing.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\client\\bot.py\", line 440, in _login\r\n await self._websocket.run()\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\gateway\\client.py\", line 279, in run\r\n await self._handle_stream(msg)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\gateway\\client.py\", line 332, in _handle_stream\r\n self._dispatch_event(event, data)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\gateway\\client.py\", line 562, in _dispatch_event\r\n _message_cache.pop(message_id)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\cache.py\", line 131, in pop\r\n return self.values.pop(key, default)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\utils\\dict_caches.py\", line 39, in __getitem__\r\n self.move_to_end(key)\r\nKeyError: Snowflake(1040316644695756912)\r\n\r\nProcess finished with exit code 0\r\n```\r\n\r\n### What version of the library did you use?\r\n\r\nunstable\r\n\r\n### Version specification\r\n\r\nThe unstable unstable version\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the contribution requirements.\n[BUG] Cannot pop from Cache during dispatch.\n### Describe the bug.\r\n\r\nWhen running functions leading to delete events, for example `channel.purge` the cache encounters a KeyError and kills the whole Process.\r\n\r\nThis is caused by the implementation of #482\r\n\r\n### List the steps.\r\n\r\nn/A\r\n\r\n### What you expected.\r\n\r\nn/A\r\n\r\n### What you saw.\r\n```\r\nWebsocket have raised an exception, closing.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\client\\bot.py\", line 440, in _login\r\n await self._websocket.run()\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\gateway\\client.py\", line 279, in run\r\n await self._handle_stream(msg)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\gateway\\client.py\", line 332, in _handle_stream\r\n self._dispatch_event(event, data)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\gateway\\client.py\", line 562, in _dispatch_event\r\n _message_cache.pop(message_id)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\api\\cache.py\", line 131, in pop\r\n return self.values.pop(key, default)\r\n File \"C:\\Users\\\\Desktop\\PycharmProjects\\library\\interactions\\utils\\dict_caches.py\", line 39, in __getitem__\r\n self.move_to_end(key)\r\nKeyError: Snowflake(1040316644695756912)\r\n\r\nProcess finished with exit code 0\r\n```\r\n\r\n### What version of the library did you use?\r\n\r\nunstable\r\n\r\n### Version specification\r\n\r\nThe unstable unstable version\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the contribution requirements.\n", "before_files": [{"content": "from collections import OrderedDict\nfrom typing import Generic, TypeVar\n\n__all__ = (\"FIFODict\", \"LRUDict\")\n\n_KT = TypeVar(\"_KT\")\n_VT = TypeVar(\"_VT\")\n\n\nclass FIFODict(OrderedDict, Generic[_KT, _VT]):\n \"\"\"A dictionary that removes the old keys if over the item limit\"\"\"\n\n def __init__(self, *args, max_items: int = float(\"inf\"), **kwargs):\n if max_items < 0:\n raise RuntimeError(\"You cannot set max_items to negative numbers.\")\n\n super().__init__(*args, **kwargs)\n self._max_items = max_items\n\n def __setitem__(self, key: _KT, value: _VT):\n super().__setitem__(key, value)\n\n # Prevent buildup over time\n while len(self) > self._max_items:\n del self[next(iter(self))]\n\n\nclass LRUDict(OrderedDict, Generic[_KT, _VT]):\n \"\"\"A dictionary that removes the value that was the least recently used if over the item limit\"\"\"\n\n def __init__(self, *args, max_items: int = float(\"inf\"), **kwargs):\n if max_items < 0:\n raise RuntimeError(\"You cannot set max_items to negative numbers.\")\n\n super().__init__(*args, **kwargs)\n self._max_items = max_items\n\n def __getitem__(self, key: _KT) -> _VT:\n self.move_to_end(key)\n return super().__getitem__(key)\n\n def __setitem__(self, key: _KT, value: _VT):\n super().__setitem__(key, value)\n\n # Prevent buildup over time\n while len(self) > self._max_items:\n del self[next(iter(self))]\n", "path": "interactions/utils/dict_caches.py"}]}
1,857
217
gh_patches_debug_21734
rasdani/github-patches
git_diff
marshmallow-code__webargs-99
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor tests The tests can be DRY'd up significantly. I suggest the following: - [ ] Implement the same HTTP API across all the supported frameworks - [ ] Use webtest to test endpoints; create a base test class that tests common functionality </issue> <code> [start of tasks.py] 1 # -*- coding: utf-8 -*- 2 import os 3 import sys 4 import webbrowser 5 6 from invoke import task, run 7 8 docs_dir = 'docs' 9 build_dir = os.path.join(docs_dir, '_build') 10 11 @task 12 def test(coverage=False, browse=False): 13 flake() 14 import pytest 15 args = [] 16 if coverage: 17 args.extend(['--cov=webargs', '--cov-report=term', '--cov-report=html']) 18 19 if sys.version_info < (3, 4, 1): 20 args.append('--ignore={0}'.format(os.path.join('tests', 'test_aiohttp'))) 21 retcode = pytest.main(args) 22 if coverage and browse: 23 webbrowser.open_new_tab(os.path.join('htmlcov', 'index.html')) 24 sys.exit(retcode) 25 26 @task 27 def flake(): 28 """Run flake8 on codebase.""" 29 cmd = 'flake8 .' 30 if sys.version_info < (3, 4, 1): 31 excludes = [ 32 os.path.join('tests', 'test_aiohttp'), 33 os.path.join('webargs', 'async.py'), 34 os.path.join('webargs', 'aiohttpparser.py'), 35 os.path.join('examples', 'annotations_example.py'), 36 'build', 37 ] 38 cmd += ' --exclude={0}'.format(','.join(excludes)) 39 run(cmd, echo=True) 40 41 @task 42 def clean(): 43 run("rm -rf build") 44 run("rm -rf dist") 45 run("rm -rf webargs.egg-info") 46 clean_docs() 47 print("Cleaned up.") 48 49 @task 50 def readme(browse=False): 51 run('rst2html.py README.rst > README.html') 52 if browse: 53 webbrowser.open_new_tab('README.html') 54 55 @task 56 def clean_docs(): 57 run("rm -rf %s" % build_dir) 58 59 @task 60 def browse_docs(): 61 path = os.path.join(build_dir, 'index.html') 62 webbrowser.open_new_tab(path) 63 64 @task 65 def docs(clean=False, browse=False, watch=False): 66 """Build the docs.""" 67 if clean: 68 clean_docs() 69 run("sphinx-build %s %s" % (docs_dir, build_dir), echo=True) 70 if browse: 71 browse_docs() 72 if watch: 73 watch_docs() 74 75 @task 76 def watch_docs(): 77 """Run build the docs when a file changes.""" 78 try: 79 import sphinx_autobuild # noqa 80 except ImportError: 81 print('ERROR: watch task requires the sphinx_autobuild package.') 82 print('Install it with:') 83 print(' pip install sphinx-autobuild') 84 sys.exit(1) 85 run('sphinx-autobuild {0} {1} --watch {2}'.format( 86 docs_dir, build_dir, 'webargs'), echo=True, pty=True) 87 88 @task 89 def publish(test=False): 90 """Publish to the cheeseshop.""" 91 clean() 92 if test: 93 run('python setup.py register -r test sdist bdist_wheel', echo=True) 94 run('twine upload dist/* -r test', echo=True) 95 else: 96 run('python setup.py register sdist bdist_wheel', echo=True) 97 run('twine upload dist/*', echo=True) 98 [end of tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tasks.py b/tasks.py --- a/tasks.py +++ b/tasks.py @@ -17,7 +17,7 @@ args.extend(['--cov=webargs', '--cov-report=term', '--cov-report=html']) if sys.version_info < (3, 4, 1): - args.append('--ignore={0}'.format(os.path.join('tests', 'test_aiohttp'))) + args.append('--ignore={0}'.format(os.path.join('tests', 'test_aiohttpparser.py'))) retcode = pytest.main(args) if coverage and browse: webbrowser.open_new_tab(os.path.join('htmlcov', 'index.html')) @@ -29,7 +29,8 @@ cmd = 'flake8 .' if sys.version_info < (3, 4, 1): excludes = [ - os.path.join('tests', 'test_aiohttp'), + os.path.join('tests', 'apps', 'aiohttp_app.py'), + os.path.join('tests', 'test_aiohttparser.py'), os.path.join('webargs', 'async.py'), os.path.join('webargs', 'aiohttpparser.py'), os.path.join('examples', 'annotations_example.py'),
{"golden_diff": "diff --git a/tasks.py b/tasks.py\n--- a/tasks.py\n+++ b/tasks.py\n@@ -17,7 +17,7 @@\n args.extend(['--cov=webargs', '--cov-report=term', '--cov-report=html'])\n \n if sys.version_info < (3, 4, 1):\n- args.append('--ignore={0}'.format(os.path.join('tests', 'test_aiohttp')))\n+ args.append('--ignore={0}'.format(os.path.join('tests', 'test_aiohttpparser.py')))\n retcode = pytest.main(args)\n if coverage and browse:\n webbrowser.open_new_tab(os.path.join('htmlcov', 'index.html'))\n@@ -29,7 +29,8 @@\n cmd = 'flake8 .'\n if sys.version_info < (3, 4, 1):\n excludes = [\n- os.path.join('tests', 'test_aiohttp'),\n+ os.path.join('tests', 'apps', 'aiohttp_app.py'),\n+ os.path.join('tests', 'test_aiohttparser.py'),\n os.path.join('webargs', 'async.py'),\n os.path.join('webargs', 'aiohttpparser.py'),\n os.path.join('examples', 'annotations_example.py'),\n", "issue": "Refactor tests\nThe tests can be DRY'd up significantly. I suggest the following:\n- [ ] Implement the same HTTP API across all the supported frameworks\n- [ ] Use webtest to test endpoints; create a base test class that tests common functionality\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport webbrowser\n\nfrom invoke import task, run\n\ndocs_dir = 'docs'\nbuild_dir = os.path.join(docs_dir, '_build')\n\n@task\ndef test(coverage=False, browse=False):\n flake()\n import pytest\n args = []\n if coverage:\n args.extend(['--cov=webargs', '--cov-report=term', '--cov-report=html'])\n\n if sys.version_info < (3, 4, 1):\n args.append('--ignore={0}'.format(os.path.join('tests', 'test_aiohttp')))\n retcode = pytest.main(args)\n if coverage and browse:\n webbrowser.open_new_tab(os.path.join('htmlcov', 'index.html'))\n sys.exit(retcode)\n\n@task\ndef flake():\n \"\"\"Run flake8 on codebase.\"\"\"\n cmd = 'flake8 .'\n if sys.version_info < (3, 4, 1):\n excludes = [\n os.path.join('tests', 'test_aiohttp'),\n os.path.join('webargs', 'async.py'),\n os.path.join('webargs', 'aiohttpparser.py'),\n os.path.join('examples', 'annotations_example.py'),\n 'build',\n ]\n cmd += ' --exclude={0}'.format(','.join(excludes))\n run(cmd, echo=True)\n\n@task\ndef clean():\n run(\"rm -rf build\")\n run(\"rm -rf dist\")\n run(\"rm -rf webargs.egg-info\")\n clean_docs()\n print(\"Cleaned up.\")\n\n@task\ndef readme(browse=False):\n run('rst2html.py README.rst > README.html')\n if browse:\n webbrowser.open_new_tab('README.html')\n\n@task\ndef clean_docs():\n run(\"rm -rf %s\" % build_dir)\n\n@task\ndef browse_docs():\n path = os.path.join(build_dir, 'index.html')\n webbrowser.open_new_tab(path)\n\n@task\ndef docs(clean=False, browse=False, watch=False):\n \"\"\"Build the docs.\"\"\"\n if clean:\n clean_docs()\n run(\"sphinx-build %s %s\" % (docs_dir, build_dir), echo=True)\n if browse:\n browse_docs()\n if watch:\n watch_docs()\n\n@task\ndef watch_docs():\n \"\"\"Run build the docs when a file changes.\"\"\"\n try:\n import sphinx_autobuild # noqa\n except ImportError:\n print('ERROR: watch task requires the sphinx_autobuild package.')\n print('Install it with:')\n print(' pip install sphinx-autobuild')\n sys.exit(1)\n run('sphinx-autobuild {0} {1} --watch {2}'.format(\n docs_dir, build_dir, 'webargs'), echo=True, pty=True)\n\n@task\ndef publish(test=False):\n \"\"\"Publish to the cheeseshop.\"\"\"\n clean()\n if test:\n run('python setup.py register -r test sdist bdist_wheel', echo=True)\n run('twine upload dist/* -r test', echo=True)\n else:\n run('python setup.py register sdist bdist_wheel', echo=True)\n run('twine upload dist/*', echo=True)\n", "path": "tasks.py"}]}
1,475
276
gh_patches_debug_12492
rasdani/github-patches
git_diff
TheAlgorithms__Python-9161
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> issue with permute_recursive ### What would you like to share? Your code looks mostly correct, but there's one issue in the `permute_recursive` function due to the modification of the `nums` list. Lists in Python are mutable, and when you use `nums.pop(0)`, it modifies the original `nums` list. This can lead to incorrect results and even an infinite loop. To fix this, you should pass a copy of the `nums` list to the recursive function. Here's the corrected `permute_recursive` function: def permute_recursive(nums: list[int]) -> list[list[int]]: """ Return all permutations. >>> permute_recursive([1, 2, 3]) [[3, 2, 1], [2, 3, 1], [1, 3, 2], [3, 1, 2], [2, 1, 3], [1, 2, 3]] """ result: list[list[int]] = [] if len(nums) == 0: return [[]] for _ in range(len(nums)): n = nums.pop(0) permutations = permute_recursive(nums[:]) # Make a copy of nums for perm in permutations: perm.append(n) result.extend(permutations) nums.append(n) return result ``` With this modification, your code should work correctly for both `permute_recursive` and `permute_backtrack`. ### Additional information _No response_ </issue> <code> [start of data_structures/arrays/permutations.py] 1 def permute_recursive(nums: list[int]) -> list[list[int]]: 2 """ 3 Return all permutations. 4 5 >>> permute_recursive([1, 2, 3]) 6 [[3, 2, 1], [2, 3, 1], [1, 3, 2], [3, 1, 2], [2, 1, 3], [1, 2, 3]] 7 """ 8 result: list[list[int]] = [] 9 if len(nums) == 0: 10 return [[]] 11 for _ in range(len(nums)): 12 n = nums.pop(0) 13 permutations = permute_recursive(nums) 14 for perm in permutations: 15 perm.append(n) 16 result.extend(permutations) 17 nums.append(n) 18 return result 19 20 21 def permute_backtrack(nums: list[int]) -> list[list[int]]: 22 """ 23 Return all permutations of the given list. 24 25 >>> permute_backtrack([1, 2, 3]) 26 [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]] 27 """ 28 29 def backtrack(start: int) -> None: 30 if start == len(nums) - 1: 31 output.append(nums[:]) 32 else: 33 for i in range(start, len(nums)): 34 nums[start], nums[i] = nums[i], nums[start] 35 backtrack(start + 1) 36 nums[start], nums[i] = nums[i], nums[start] # backtrack 37 38 output: list[list[int]] = [] 39 backtrack(0) 40 return output 41 42 43 if __name__ == "__main__": 44 import doctest 45 46 res = permute_backtrack([1, 2, 3]) 47 print(res) 48 doctest.testmod() 49 [end of data_structures/arrays/permutations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py --- a/data_structures/arrays/permutations.py +++ b/data_structures/arrays/permutations.py @@ -10,7 +10,7 @@ return [[]] for _ in range(len(nums)): n = nums.pop(0) - permutations = permute_recursive(nums) + permutations = permute_recursive(nums.copy()) for perm in permutations: perm.append(n) result.extend(permutations) @@ -43,6 +43,6 @@ if __name__ == "__main__": import doctest - res = permute_backtrack([1, 2, 3]) - print(res) + result = permute_backtrack([1, 2, 3]) + print(result) doctest.testmod()
{"golden_diff": "diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py\n--- a/data_structures/arrays/permutations.py\n+++ b/data_structures/arrays/permutations.py\n@@ -10,7 +10,7 @@\n return [[]]\n for _ in range(len(nums)):\n n = nums.pop(0)\n- permutations = permute_recursive(nums)\n+ permutations = permute_recursive(nums.copy())\n for perm in permutations:\n perm.append(n)\n result.extend(permutations)\n@@ -43,6 +43,6 @@\n if __name__ == \"__main__\":\n import doctest\n \n- res = permute_backtrack([1, 2, 3])\n- print(res)\n+ result = permute_backtrack([1, 2, 3])\n+ print(result)\n doctest.testmod()\n", "issue": "issue with permute_recursive\n### What would you like to share?\n\nYour code looks mostly correct, but there's one issue in the `permute_recursive` function due to the modification of the `nums` list. Lists in Python are mutable, and when you use `nums.pop(0)`, it modifies the original `nums` list. This can lead to incorrect results and even an infinite loop.\r\n\r\nTo fix this, you should pass a copy of the `nums` list to the recursive function. Here's the corrected `permute_recursive` function:\r\n\r\n\r\ndef permute_recursive(nums: list[int]) -> list[list[int]]:\r\n \"\"\"\r\n Return all permutations.\r\n\r\n >>> permute_recursive([1, 2, 3])\r\n [[3, 2, 1], [2, 3, 1], [1, 3, 2], [3, 1, 2], [2, 1, 3], [1, 2, 3]]\r\n \"\"\"\r\n result: list[list[int]] = []\r\n if len(nums) == 0:\r\n return [[]]\r\n for _ in range(len(nums)):\r\n n = nums.pop(0)\r\n permutations = permute_recursive(nums[:]) # Make a copy of nums\r\n for perm in permutations:\r\n perm.append(n)\r\n result.extend(permutations)\r\n nums.append(n)\r\n return result\r\n```\r\n\r\nWith this modification, your code should work correctly for both `permute_recursive` and `permute_backtrack`.\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "def permute_recursive(nums: list[int]) -> list[list[int]]:\n \"\"\"\n Return all permutations.\n\n >>> permute_recursive([1, 2, 3])\n [[3, 2, 1], [2, 3, 1], [1, 3, 2], [3, 1, 2], [2, 1, 3], [1, 2, 3]]\n \"\"\"\n result: list[list[int]] = []\n if len(nums) == 0:\n return [[]]\n for _ in range(len(nums)):\n n = nums.pop(0)\n permutations = permute_recursive(nums)\n for perm in permutations:\n perm.append(n)\n result.extend(permutations)\n nums.append(n)\n return result\n\n\ndef permute_backtrack(nums: list[int]) -> list[list[int]]:\n \"\"\"\n Return all permutations of the given list.\n\n >>> permute_backtrack([1, 2, 3])\n [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]]\n \"\"\"\n\n def backtrack(start: int) -> None:\n if start == len(nums) - 1:\n output.append(nums[:])\n else:\n for i in range(start, len(nums)):\n nums[start], nums[i] = nums[i], nums[start]\n backtrack(start + 1)\n nums[start], nums[i] = nums[i], nums[start] # backtrack\n\n output: list[list[int]] = []\n backtrack(0)\n return output\n\n\nif __name__ == \"__main__\":\n import doctest\n\n res = permute_backtrack([1, 2, 3])\n print(res)\n doctest.testmod()\n", "path": "data_structures/arrays/permutations.py"}]}
1,358
188
gh_patches_debug_7516
rasdani/github-patches
git_diff
pre-commit__pre-commit-624
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> commit-msg stage does not work Everything works as expected when running just `pre-commit install`, then hooks work. But when running `pre-commit install -t commit-msg` `IOError` happens, since template could not be found. Here's the detailed information. ## Env - `python2.7` - `pipenv 7.3.7` - `pre-commit 1.1.1` Actually tested with both `python2` and `python3`. ## Configuration ```yaml - repo: local hooks: - id: gitlint name: gitlint entry: "bash -c 'gitlint lint'" language: system stages: [commit-msg] - id: pytest name: pytest entry: "bash -c 'python manage.py test'" language: system - id: safety name: safety entry: "bash -c 'safety check'" language: system ``` ## Output ``` » pre-commit install -t commit-msg Running in migration mode with existing hooks at /Users/sobolev/Desktop/test/.git/hooks/commit-msg.legacy Use -f to use only pre-commit. An unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/Users/sobolev/.virtualenvs/test-p4WySO70/lib/python2.7/site-packages/pre_commit/resources/commit-msg-tmpl' Check the log at /Users/sobolev/.cache/pre-commit/pre-commit.log ``` When I do `ls /Users/sobolev/.virtualenvs/test-p4WySO70/lib/python2.7/site-packages/pre_commit/resources/commit-msg-tmpl` that's what is see: ``` (test-p4WySO70) ~/Desktop/test master ✗ ✚ 2 ⚡ » ls /Users/sobolev/.virtualenvs/test-p4WySO70/lib/python2.7/site-packages/pre_commit/resources empty_template pre-push-tmpl ruby-build.tar.gz hook-tmpl rbenv.tar.gz ruby-download.tar.gz ``` </issue> <code> [start of setup.py] 1 from setuptools import find_packages 2 from setuptools import setup 3 4 5 setup( 6 name='pre_commit', 7 description=( 8 'A framework for managing and maintaining multi-language pre-commit ' 9 'hooks.' 10 ), 11 url='https://github.com/pre-commit/pre-commit', 12 version='1.1.1', 13 14 author='Anthony Sottile', 15 author_email='[email protected]', 16 17 platforms='linux', 18 classifiers=[ 19 'License :: OSI Approved :: MIT License', 20 'Programming Language :: Python :: 2', 21 'Programming Language :: Python :: 2.7', 22 'Programming Language :: Python :: 3', 23 'Programming Language :: Python :: 3.5', 24 'Programming Language :: Python :: 3.6', 25 'Programming Language :: Python :: Implementation :: CPython', 26 'Programming Language :: Python :: Implementation :: PyPy', 27 ], 28 29 packages=find_packages(exclude=('tests*', 'testing*')), 30 package_data={ 31 'pre_commit': [ 32 'resources/hook-tmpl', 33 'resources/pre-push-tmpl', 34 'resources/rbenv.tar.gz', 35 'resources/ruby-build.tar.gz', 36 'resources/ruby-download.tar.gz', 37 'resources/empty_template/*', 38 'resources/empty_template/.npmignore', 39 ], 40 }, 41 install_requires=[ 42 'aspy.yaml', 43 'cached-property', 44 'identify>=1.0.0', 45 'nodeenv>=0.11.1', 46 'pyyaml', 47 'six', 48 'virtualenv', 49 ], 50 entry_points={ 51 'console_scripts': [ 52 'pre-commit = pre_commit.main:main', 53 'pre-commit-validate-config = pre_commit.clientlib:validate_config_main', # noqa 54 'pre-commit-validate-manifest = pre_commit.clientlib:validate_manifest_main', # noqa 55 ], 56 }, 57 ) 58 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,11 +29,7 @@ packages=find_packages(exclude=('tests*', 'testing*')), package_data={ 'pre_commit': [ - 'resources/hook-tmpl', - 'resources/pre-push-tmpl', - 'resources/rbenv.tar.gz', - 'resources/ruby-build.tar.gz', - 'resources/ruby-download.tar.gz', + 'resources/*', 'resources/empty_template/*', 'resources/empty_template/.npmignore', ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,11 +29,7 @@\n packages=find_packages(exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n- 'resources/hook-tmpl',\n- 'resources/pre-push-tmpl',\n- 'resources/rbenv.tar.gz',\n- 'resources/ruby-build.tar.gz',\n- 'resources/ruby-download.tar.gz',\n+ 'resources/*',\n 'resources/empty_template/*',\n 'resources/empty_template/.npmignore',\n ],\n", "issue": "commit-msg stage does not work\nEverything works as expected when running just `pre-commit install`, then hooks work.\r\nBut when running `pre-commit install -t commit-msg` `IOError` happens, since template could not be found.\r\n\r\nHere's the detailed information.\r\n\r\n## Env\r\n\r\n- `python2.7`\r\n- `pipenv 7.3.7`\r\n- `pre-commit 1.1.1`\r\n\r\nActually tested with both `python2` and `python3`.\r\n\r\n## Configuration\r\n\r\n```yaml\r\n- repo: local\r\n hooks:\r\n - id: gitlint\r\n name: gitlint\r\n entry: \"bash -c 'gitlint lint'\"\r\n language: system\r\n stages: [commit-msg]\r\n\r\n - id: pytest\r\n name: pytest\r\n entry: \"bash -c 'python manage.py test'\"\r\n language: system\r\n\r\n - id: safety\r\n name: safety\r\n entry: \"bash -c 'safety check'\"\r\n language: system\r\n```\r\n\r\n## Output\r\n\r\n```\r\n\u00bb pre-commit install -t commit-msg\r\nRunning in migration mode with existing hooks at /Users/sobolev/Desktop/test/.git/hooks/commit-msg.legacy\r\nUse -f to use only pre-commit.\r\nAn unexpected error has occurred: IOError: [Errno 2] No such file or directory: '/Users/sobolev/.virtualenvs/test-p4WySO70/lib/python2.7/site-packages/pre_commit/resources/commit-msg-tmpl'\r\nCheck the log at /Users/sobolev/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\nWhen I do `ls /Users/sobolev/.virtualenvs/test-p4WySO70/lib/python2.7/site-packages/pre_commit/resources/commit-msg-tmpl` that's what is see:\r\n\r\n```\r\n(test-p4WySO70) ~/Desktop/test master \u2717 \u271a 2 \u26a1\r\n\u00bb ls /Users/sobolev/.virtualenvs/test-p4WySO70/lib/python2.7/site-packages/pre_commit/resources \r\nempty_template pre-push-tmpl ruby-build.tar.gz\r\nhook-tmpl rbenv.tar.gz ruby-download.tar.gz\r\n```\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='1.1.1',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages(exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/hook-tmpl',\n 'resources/pre-push-tmpl',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n 'resources/empty_template/*',\n 'resources/empty_template/.npmignore',\n ],\n },\n install_requires=[\n 'aspy.yaml',\n 'cached-property',\n 'identify>=1.0.0',\n 'nodeenv>=0.11.1',\n 'pyyaml',\n 'six',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'pre-commit-validate-config = pre_commit.clientlib:validate_config_main', # noqa\n 'pre-commit-validate-manifest = pre_commit.clientlib:validate_manifest_main', # noqa\n ],\n },\n)\n", "path": "setup.py"}]}
1,497
131
gh_patches_debug_37403
rasdani/github-patches
git_diff
streamlink__streamlink-1670
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [plugin issue] plugin.vidio.com / can't handle stream URLs anymore - [x] This is a bug report. - [ ] This is a feature request. - [ ] This is a plugin (improvement) request. - [ ] I have read the contribution guidelines. ### Description plugin handling of https://www.vidio.com/live URls not working anymore due to changes at provided stream structure ### Expected / Actual behavior streamlink -l debug www.vidio.com/live/665-rcti-tv-stream [cli][debug] OS: Windows 10 [cli][debug] Python: 3.5.2 [cli][debug] Streamlink: 0.12.1 [cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0) [cli][info] Found matching plugin vidio for URL www.vidio.com/live/665-rcti-tv-stream [plugin.vidio][debug] HLS URL: https://kmklive-lh.akamaihd.net/i/rcti_ta_regular@94478/master.m3u8 error: Unable to open URL: https://kmklive-lh.akamaihd.net/i/rcti_ta_regular@94478/master.m3u8 (403 Client Error: Forbidden for url: https://kmklive-lh.akamaihd.net/i/rcti_ta_regular@94478/master.m3u8) </issue> <code> [start of src/streamlink/plugins/vidio.py] 1 ''' 2 Plugin for vidio.com 3 - https://www.vidio.com/live/5075-dw-tv-stream 4 - https://www.vidio.com/watch/766861-5-rekor-fantastis-zidane-bersama-real-madrid 5 ''' 6 import re 7 8 from streamlink.plugin import Plugin 9 from streamlink.plugin.api import http 10 from streamlink.stream import HLSStream 11 12 _url_re = re.compile(r"https?://(?:www\.)?vidio\.com/(?:en/)?(?P<type>live|watch)/(?P<id>\d+)-(?P<name>[^/?#&]+)") 13 _playlist_re = re.compile(r'''hls-url=["'](?P<url>[^"']+)["']''') 14 15 16 class Vidio(Plugin): 17 @classmethod 18 def can_handle_url(cls, url): 19 return _url_re.match(url) 20 21 def _get_streams(self): 22 res = http.get(self.url) 23 24 match = _playlist_re.search(res.text) 25 if match is None: 26 return 27 28 url = match.group('url') 29 30 if url: 31 self.logger.debug('HLS URL: {0}'.format(url)) 32 for s in HLSStream.parse_variant_playlist(self.session, url).items(): 33 yield s 34 35 36 __plugin__ = Vidio 37 [end of src/streamlink/plugins/vidio.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/vidio.py b/src/streamlink/plugins/vidio.py --- a/src/streamlink/plugins/vidio.py +++ b/src/streamlink/plugins/vidio.py @@ -1,36 +1,61 @@ -''' +""" Plugin for vidio.com - https://www.vidio.com/live/5075-dw-tv-stream - https://www.vidio.com/watch/766861-5-rekor-fantastis-zidane-bersama-real-madrid -''' +""" import re from streamlink.plugin import Plugin -from streamlink.plugin.api import http +from streamlink.plugin.api import http, useragents, validate from streamlink.stream import HLSStream - -_url_re = re.compile(r"https?://(?:www\.)?vidio\.com/(?:en/)?(?P<type>live|watch)/(?P<id>\d+)-(?P<name>[^/?#&]+)") -_playlist_re = re.compile(r'''hls-url=["'](?P<url>[^"']+)["']''') +from streamlink.utils import parse_json class Vidio(Plugin): + _url_re = re.compile(r"https?://(?:www\.)?vidio\.com/(?:en/)?(?P<type>live|watch)/(?P<id>\d+)-(?P<name>[^/?#&]+)") + _playlist_re = re.compile(r'''hls-url=["'](?P<url>[^"']+)["']''') + _data_id_re = re.compile(r'''meta\s+data-id=["'](?P<id>[^"']+)["']''') + + csrf_tokens_url = "https://www.vidio.com/csrf_tokens" + tokens_url = "https://www.vidio.com/live/{id}/tokens" + token_schema = validate.Schema(validate.transform(parse_json), + {"token": str}, + validate.get("token")) + @classmethod def can_handle_url(cls, url): - return _url_re.match(url) + return cls._url_re.match(url) + + def get_csrf_tokens(self): + return http.get(self.csrf_tokens_url, + schema=self.token_schema) + + def get_url_tokens(self, stream_id): + self.logger.debug("Getting stream tokens") + csrf_token = self.get_csrf_tokens() + return http.post(self.tokens_url.format(id=stream_id), + files={"authenticity_token": (None, csrf_token)}, + headers={"User-Agent": useragents.CHROME, + "Referer": self.url}, + schema=self.token_schema) def _get_streams(self): res = http.get(self.url) - match = _playlist_re.search(res.text) - if match is None: - return + plmatch = self._playlist_re.search(res.text) + idmatch = self._data_id_re.search(res.text) + + hls_url = plmatch and plmatch.group("url") + stream_id = idmatch and idmatch.group("id") - url = match.group('url') + tokens = self.get_url_tokens(stream_id) - if url: - self.logger.debug('HLS URL: {0}'.format(url)) - for s in HLSStream.parse_variant_playlist(self.session, url).items(): - yield s + if hls_url: + self.logger.debug("HLS URL: {0}".format(hls_url)) + self.logger.debug("Tokens: {0}".format(tokens)) + return HLSStream.parse_variant_playlist(self.session, hls_url+"?"+tokens, + headers={"User-Agent": useragents.CHROME, + "Referer": self.url}) __plugin__ = Vidio
{"golden_diff": "diff --git a/src/streamlink/plugins/vidio.py b/src/streamlink/plugins/vidio.py\n--- a/src/streamlink/plugins/vidio.py\n+++ b/src/streamlink/plugins/vidio.py\n@@ -1,36 +1,61 @@\n-'''\n+\"\"\"\n Plugin for vidio.com\n - https://www.vidio.com/live/5075-dw-tv-stream\n - https://www.vidio.com/watch/766861-5-rekor-fantastis-zidane-bersama-real-madrid\n-'''\n+\"\"\"\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http\n+from streamlink.plugin.api import http, useragents, validate\n from streamlink.stream import HLSStream\n-\n-_url_re = re.compile(r\"https?://(?:www\\.)?vidio\\.com/(?:en/)?(?P<type>live|watch)/(?P<id>\\d+)-(?P<name>[^/?#&]+)\")\n-_playlist_re = re.compile(r'''hls-url=[\"'](?P<url>[^\"']+)[\"']''')\n+from streamlink.utils import parse_json\n \n \n class Vidio(Plugin):\n+ _url_re = re.compile(r\"https?://(?:www\\.)?vidio\\.com/(?:en/)?(?P<type>live|watch)/(?P<id>\\d+)-(?P<name>[^/?#&]+)\")\n+ _playlist_re = re.compile(r'''hls-url=[\"'](?P<url>[^\"']+)[\"']''')\n+ _data_id_re = re.compile(r'''meta\\s+data-id=[\"'](?P<id>[^\"']+)[\"']''')\n+\n+ csrf_tokens_url = \"https://www.vidio.com/csrf_tokens\"\n+ tokens_url = \"https://www.vidio.com/live/{id}/tokens\"\n+ token_schema = validate.Schema(validate.transform(parse_json),\n+ {\"token\": str},\n+ validate.get(\"token\"))\n+\n @classmethod\n def can_handle_url(cls, url):\n- return _url_re.match(url)\n+ return cls._url_re.match(url)\n+\n+ def get_csrf_tokens(self):\n+ return http.get(self.csrf_tokens_url,\n+ schema=self.token_schema)\n+\n+ def get_url_tokens(self, stream_id):\n+ self.logger.debug(\"Getting stream tokens\")\n+ csrf_token = self.get_csrf_tokens()\n+ return http.post(self.tokens_url.format(id=stream_id),\n+ files={\"authenticity_token\": (None, csrf_token)},\n+ headers={\"User-Agent\": useragents.CHROME,\n+ \"Referer\": self.url},\n+ schema=self.token_schema)\n \n def _get_streams(self):\n res = http.get(self.url)\n \n- match = _playlist_re.search(res.text)\n- if match is None:\n- return\n+ plmatch = self._playlist_re.search(res.text)\n+ idmatch = self._data_id_re.search(res.text)\n+\n+ hls_url = plmatch and plmatch.group(\"url\")\n+ stream_id = idmatch and idmatch.group(\"id\")\n \n- url = match.group('url')\n+ tokens = self.get_url_tokens(stream_id)\n \n- if url:\n- self.logger.debug('HLS URL: {0}'.format(url))\n- for s in HLSStream.parse_variant_playlist(self.session, url).items():\n- yield s\n+ if hls_url:\n+ self.logger.debug(\"HLS URL: {0}\".format(hls_url))\n+ self.logger.debug(\"Tokens: {0}\".format(tokens))\n+ return HLSStream.parse_variant_playlist(self.session, hls_url+\"?\"+tokens,\n+ headers={\"User-Agent\": useragents.CHROME,\n+ \"Referer\": self.url})\n \n \n __plugin__ = Vidio\n", "issue": "[plugin issue] plugin.vidio.com / can't handle stream URLs anymore\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\nplugin handling of https://www.vidio.com/live URls not working anymore due to changes at provided stream structure\r\n\r\n### Expected / Actual behavior\r\nstreamlink -l debug www.vidio.com/live/665-rcti-tv-stream\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.5.2\r\n[cli][debug] Streamlink: 0.12.1\r\n[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)\r\n[cli][info] Found matching plugin vidio for URL www.vidio.com/live/665-rcti-tv-stream\r\n[plugin.vidio][debug] HLS URL: https://kmklive-lh.akamaihd.net/i/rcti_ta_regular@94478/master.m3u8\r\nerror: Unable to open URL: https://kmklive-lh.akamaihd.net/i/rcti_ta_regular@94478/master.m3u8 (403 Client Error: Forbidden for url: https://kmklive-lh.akamaihd.net/i/rcti_ta_regular@94478/master.m3u8)\r\n\n", "before_files": [{"content": "'''\nPlugin for vidio.com\n- https://www.vidio.com/live/5075-dw-tv-stream\n- https://www.vidio.com/watch/766861-5-rekor-fantastis-zidane-bersama-real-madrid\n'''\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\n_url_re = re.compile(r\"https?://(?:www\\.)?vidio\\.com/(?:en/)?(?P<type>live|watch)/(?P<id>\\d+)-(?P<name>[^/?#&]+)\")\n_playlist_re = re.compile(r'''hls-url=[\"'](?P<url>[^\"']+)[\"']''')\n\n\nclass Vidio(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n\n match = _playlist_re.search(res.text)\n if match is None:\n return\n\n url = match.group('url')\n\n if url:\n self.logger.debug('HLS URL: {0}'.format(url))\n for s in HLSStream.parse_variant_playlist(self.session, url).items():\n yield s\n\n\n__plugin__ = Vidio\n", "path": "src/streamlink/plugins/vidio.py"}]}
1,241
836
gh_patches_debug_7782
rasdani/github-patches
git_diff
microsoft__ptvsd-818
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> test_path_names_uppercase_enabled fails ``` 2018-09-18T18:50:20.6706273Z ====================================================================== 2018-09-18T18:50:20.6706627Z FAIL [0.001s]: test_path_names_uppercase_enabled (tests.ptvsd.test_pathutils.PathUtilTests) 2018-09-18T18:50:20.6706979Z ---------------------------------------------------------------------- 2018-09-18T18:50:20.6707253Z Traceback (most recent call last): 2018-09-18T18:50:20.6707620Z File "D:\a\1\s\tests\ptvsd\test_pathutils.py", line 78, in test_path_names_uppercase_enabled 2018-09-18T18:50:20.6708077Z self.assertEqual(result, ACTUAL) 2018-09-18T18:50:20.6708307Z AssertionError: 'D:\\A\\1\\S\\TESTS\\PTVSD\\test_pathutils.py' != 'D:\\a\\1\\s\\tests\\ptvsd\\test_pathutils.py' 2018-09-18T18:50:20.6708746Z - D:\A\1\S\TESTS\PTVSD\test_pathutils.py 2018-09-18T18:50:20.6708945Z + D:\a\1\s\tests\ptvsd\test_pathutils.py ``` </issue> <code> [start of ptvsd/pathutils.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 from __future__ import print_function, with_statement, absolute_import 6 7 from glob import glob 8 import os.path 9 import platform 10 11 12 MAX_FILES_TO_CACHE = 1000 13 14 15 class PathUnNormcase(object): 16 """Ensures path names of files are returned as they exist on the fs.""" 17 18 def __init__(self): 19 self._dict = {} 20 self._enabled = False 21 22 def enable(self): 23 self._enabled = platform.system() == 'Windows' 24 25 def un_normcase(self, file_path): 26 if not self._enabled or len(file_path) == 0: 27 return file_path 28 if file_path in self._dict: 29 return self._dict[file_path] 30 file_path_to_return = self._get_actual_filename(file_path) 31 self.track_file_path_case(file_path_to_return) 32 return file_path_to_return 33 34 def track_file_path_case(self, file_path): 35 if not self._enabled: 36 return 37 if len(self._dict) > MAX_FILES_TO_CACHE: 38 self._dict.clear() 39 self._dict[file_path] = file_path 40 41 def _get_actual_filename(self, name): 42 """ 43 Use glob to search for a file by building a regex. 44 Original source from https://stackoverflow.com/a/30374360/4443457 45 (Modified to match file name as well). 46 """ 47 48 sep = os.path.sep 49 parts = os.path.normpath(name).split(sep) 50 dirs = parts[0:-1] 51 filename = '{}[{}]'.format(parts[-1][:-1], parts[-1][-1:]) 52 path_fragment1 = dirs[0].upper() 53 if dirs[0] == os.path.splitdrive(name)[0]: 54 fragments = [path_fragment1] 55 else: 56 dir_names = os.listdir(os.getcwd()) 57 fragments = list(filter( 58 lambda x: x.upper() == path_fragment1, dir_names)) 59 fragments += list(d for d in dirs[1:] if d) 60 path = glob(sep.join(fragments)) 61 if not path: 62 return name 63 res = glob(sep.join((path[0], filename))) 64 if not res: 65 return name 66 return res[0] 67 [end of ptvsd/pathutils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ptvsd/pathutils.py b/ptvsd/pathutils.py --- a/ptvsd/pathutils.py +++ b/ptvsd/pathutils.py @@ -56,7 +56,8 @@ dir_names = os.listdir(os.getcwd()) fragments = list(filter( lambda x: x.upper() == path_fragment1, dir_names)) - fragments += list(d for d in dirs[1:] if d) + fragments += list('{}[{}]'.format(d[:-1], d[-1]) + for d in dirs[1:] if d) path = glob(sep.join(fragments)) if not path: return name
{"golden_diff": "diff --git a/ptvsd/pathutils.py b/ptvsd/pathutils.py\n--- a/ptvsd/pathutils.py\n+++ b/ptvsd/pathutils.py\n@@ -56,7 +56,8 @@\n dir_names = os.listdir(os.getcwd())\n fragments = list(filter(\n lambda x: x.upper() == path_fragment1, dir_names))\n- fragments += list(d for d in dirs[1:] if d)\n+ fragments += list('{}[{}]'.format(d[:-1], d[-1])\n+ for d in dirs[1:] if d)\n path = glob(sep.join(fragments))\n if not path:\n return name\n", "issue": "test_path_names_uppercase_enabled fails\n```\r\n2018-09-18T18:50:20.6706273Z ======================================================================\r\n2018-09-18T18:50:20.6706627Z FAIL [0.001s]: test_path_names_uppercase_enabled (tests.ptvsd.test_pathutils.PathUtilTests)\r\n2018-09-18T18:50:20.6706979Z ----------------------------------------------------------------------\r\n2018-09-18T18:50:20.6707253Z Traceback (most recent call last):\r\n2018-09-18T18:50:20.6707620Z File \"D:\\a\\1\\s\\tests\\ptvsd\\test_pathutils.py\", line 78, in test_path_names_uppercase_enabled\r\n2018-09-18T18:50:20.6708077Z self.assertEqual(result, ACTUAL)\r\n2018-09-18T18:50:20.6708307Z AssertionError: 'D:\\\\A\\\\1\\\\S\\\\TESTS\\\\PTVSD\\\\test_pathutils.py' != 'D:\\\\a\\\\1\\\\s\\\\tests\\\\ptvsd\\\\test_pathutils.py'\r\n2018-09-18T18:50:20.6708746Z - D:\\A\\1\\S\\TESTS\\PTVSD\\test_pathutils.py\r\n2018-09-18T18:50:20.6708945Z + D:\\a\\1\\s\\tests\\ptvsd\\test_pathutils.py\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import print_function, with_statement, absolute_import\n\nfrom glob import glob\nimport os.path\nimport platform\n\n\nMAX_FILES_TO_CACHE = 1000\n\n\nclass PathUnNormcase(object):\n \"\"\"Ensures path names of files are returned as they exist on the fs.\"\"\"\n\n def __init__(self):\n self._dict = {}\n self._enabled = False\n\n def enable(self):\n self._enabled = platform.system() == 'Windows'\n\n def un_normcase(self, file_path):\n if not self._enabled or len(file_path) == 0:\n return file_path\n if file_path in self._dict:\n return self._dict[file_path]\n file_path_to_return = self._get_actual_filename(file_path)\n self.track_file_path_case(file_path_to_return)\n return file_path_to_return\n\n def track_file_path_case(self, file_path):\n if not self._enabled:\n return\n if len(self._dict) > MAX_FILES_TO_CACHE:\n self._dict.clear()\n self._dict[file_path] = file_path\n\n def _get_actual_filename(self, name):\n \"\"\"\n Use glob to search for a file by building a regex.\n Original source from https://stackoverflow.com/a/30374360/4443457\n (Modified to match file name as well).\n \"\"\"\n\n sep = os.path.sep\n parts = os.path.normpath(name).split(sep)\n dirs = parts[0:-1]\n filename = '{}[{}]'.format(parts[-1][:-1], parts[-1][-1:])\n path_fragment1 = dirs[0].upper()\n if dirs[0] == os.path.splitdrive(name)[0]:\n fragments = [path_fragment1]\n else:\n dir_names = os.listdir(os.getcwd())\n fragments = list(filter(\n lambda x: x.upper() == path_fragment1, dir_names))\n fragments += list(d for d in dirs[1:] if d)\n path = glob(sep.join(fragments))\n if not path:\n return name\n res = glob(sep.join((path[0], filename)))\n if not res:\n return name\n return res[0]\n", "path": "ptvsd/pathutils.py"}]}
1,610
148
gh_patches_debug_8485
rasdani/github-patches
git_diff
SigmaHQ__sigma-1026
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error while pushing sigma to misp using sigma2misp script (venv) hydra@Kaushals-MacBook-Air session2/sigma (master %) » tools/sigma2misp @misp.conf --insecure --same-event --info "Test Event" -r rules/windows/sysmon 2 ↵ Traceback (most recent call last): File "tools/sigma2misp", line 57, in <module> eventid = create_new_event() File "tools/sigma2misp", line 14, in create_new_event event = misp.MISPEvent() AttributeError: 'PyMISP' object has no attribute 'MISPEvent' </issue> <code> [start of tools/sigma/sigma2misp.py] 1 #!/usr/bin/env python3 2 # Import given Sigma rules to MISP 3 4 import argparse 5 import pathlib 6 import urllib3 7 urllib3.disable_warnings() 8 from pymisp import PyMISP 9 10 def create_new_event(args, misp): 11 if hasattr(misp, "new_event"): 12 return misp.new_event(info=args.info)["Event"]["id"] 13 14 event = misp.MISPEvent() 15 event.info = args.info 16 return misp.add_event(event)["Event"]["id"] 17 18 19 class MISPImportArgumentParser(argparse.ArgumentParser): 20 def __init__(self, *args, **kwargs): 21 super().__init__( 22 description="Import Sigma rules into MISP events", 23 epilog="Parameters can be read from a file by a @filename parameter. The file should contain one parameter per line. Dashes may be omitted.", 24 fromfile_prefix_chars="@", 25 ) 26 27 def convert_arg_line_to_args(self, line : str): 28 return ("--" + line.lstrip("--")).split() 29 30 def main(): 31 argparser = MISPImportArgumentParser() 32 argparser.add_argument("--url", "-u", default="https://localhost", help="URL of MISP instance") 33 argparser.add_argument("--key", "-k", required=True, help="API key") 34 argparser.add_argument("--insecure", "-I", action="store_false", help="Disable TLS certifcate validation.") 35 argparser.add_argument("--event", "-e", type=int, help="Add Sigma rule to event with this ID. If not set, create new event.") 36 argparser.add_argument("--same-event", "-s", action="store_true", help="Import all Sigma rules to the same event, if no event is set.") 37 argparser.add_argument("--info", "-i", default="Sigma import", help="Event Information field for newly created MISP event.") 38 argparser.add_argument("--recursive", "-r", action="store_true", help="Recursive traversal of directory") 39 argparser.add_argument("sigma", nargs="+", help="Sigma rule file that should be imported") 40 args = argparser.parse_args() 41 42 if args.recursive: 43 paths = [ p for pathname in args.sigma for p in pathlib.Path(pathname).glob("**/*") if p.is_file() ] 44 else: 45 paths = [ pathlib.Path(sigma) for sigma in args.sigma ] 46 47 misp = PyMISP(args.url, args.key, args.insecure) 48 if args.event: 49 if hasattr(misp, "get"): 50 eventid = misp.get(args.event)["Event"]["id"] 51 else: 52 eventid = misp.get_event(args.event)["Event"]["id"] 53 54 first = True 55 56 for sigma in paths: 57 if not args.event and (first or not args.same_event): 58 eventid = create_new_event(args, misp) 59 print("Importing Sigma rule {} into MISP event {}...".format(sigma, eventid, end="")) 60 f = sigma.open("rt") 61 62 if hasattr(misp, "add_named_attribute"): 63 misp.add_named_attribute(eventid, "sigma", f.read()) 64 else: 65 event = misp.get_event(eventid, pythonify=True) 66 event.add_attribute("sigma", f.read()) 67 misp.update_event(event) 68 69 f.close() 70 first = False 71 72 if __name__ == "__main__": 73 main() 74 [end of tools/sigma/sigma2misp.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/sigma/sigma2misp.py b/tools/sigma/sigma2misp.py --- a/tools/sigma/sigma2misp.py +++ b/tools/sigma/sigma2misp.py @@ -5,13 +5,13 @@ import pathlib import urllib3 urllib3.disable_warnings() -from pymisp import PyMISP +from pymisp import PyMISP, MISPEvent def create_new_event(args, misp): if hasattr(misp, "new_event"): return misp.new_event(info=args.info)["Event"]["id"] - event = misp.MISPEvent() + event = MISPEvent() event.info = args.info return misp.add_event(event)["Event"]["id"]
{"golden_diff": "diff --git a/tools/sigma/sigma2misp.py b/tools/sigma/sigma2misp.py\n--- a/tools/sigma/sigma2misp.py\n+++ b/tools/sigma/sigma2misp.py\n@@ -5,13 +5,13 @@\n import pathlib\n import urllib3\n urllib3.disable_warnings()\n-from pymisp import PyMISP\n+from pymisp import PyMISP, MISPEvent\n \n def create_new_event(args, misp):\n if hasattr(misp, \"new_event\"):\n return misp.new_event(info=args.info)[\"Event\"][\"id\"]\n \n- event = misp.MISPEvent()\n+ event = MISPEvent()\n event.info = args.info\n return misp.add_event(event)[\"Event\"][\"id\"]\n", "issue": "Error while pushing sigma to misp using sigma2misp script\n(venv) hydra@Kaushals-MacBook-Air session2/sigma (master %) \u00bb tools/sigma2misp @misp.conf --insecure --same-event --info \"Test Event\" -r rules/windows/sysmon 2 \u21b5\r\nTraceback (most recent call last):\r\n File \"tools/sigma2misp\", line 57, in <module>\r\n eventid = create_new_event()\r\n File \"tools/sigma2misp\", line 14, in create_new_event\r\n event = misp.MISPEvent()\r\nAttributeError: 'PyMISP' object has no attribute 'MISPEvent'\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Import given Sigma rules to MISP\n\nimport argparse\nimport pathlib\nimport urllib3\nurllib3.disable_warnings()\nfrom pymisp import PyMISP\n\ndef create_new_event(args, misp):\n if hasattr(misp, \"new_event\"):\n return misp.new_event(info=args.info)[\"Event\"][\"id\"]\n \n event = misp.MISPEvent()\n event.info = args.info\n return misp.add_event(event)[\"Event\"][\"id\"]\n\n\nclass MISPImportArgumentParser(argparse.ArgumentParser):\n def __init__(self, *args, **kwargs):\n super().__init__(\n description=\"Import Sigma rules into MISP events\",\n epilog=\"Parameters can be read from a file by a @filename parameter. The file should contain one parameter per line. Dashes may be omitted.\",\n fromfile_prefix_chars=\"@\",\n )\n\n def convert_arg_line_to_args(self, line : str):\n return (\"--\" + line.lstrip(\"--\")).split()\n\ndef main():\n argparser = MISPImportArgumentParser()\n argparser.add_argument(\"--url\", \"-u\", default=\"https://localhost\", help=\"URL of MISP instance\")\n argparser.add_argument(\"--key\", \"-k\", required=True, help=\"API key\")\n argparser.add_argument(\"--insecure\", \"-I\", action=\"store_false\", help=\"Disable TLS certifcate validation.\")\n argparser.add_argument(\"--event\", \"-e\", type=int, help=\"Add Sigma rule to event with this ID. If not set, create new event.\")\n argparser.add_argument(\"--same-event\", \"-s\", action=\"store_true\", help=\"Import all Sigma rules to the same event, if no event is set.\")\n argparser.add_argument(\"--info\", \"-i\", default=\"Sigma import\", help=\"Event Information field for newly created MISP event.\")\n argparser.add_argument(\"--recursive\", \"-r\", action=\"store_true\", help=\"Recursive traversal of directory\")\n argparser.add_argument(\"sigma\", nargs=\"+\", help=\"Sigma rule file that should be imported\")\n args = argparser.parse_args()\n\n if args.recursive:\n paths = [ p for pathname in args.sigma for p in pathlib.Path(pathname).glob(\"**/*\") if p.is_file() ]\n else:\n paths = [ pathlib.Path(sigma) for sigma in args.sigma ]\n\n misp = PyMISP(args.url, args.key, args.insecure)\n if args.event:\n if hasattr(misp, \"get\"):\n eventid = misp.get(args.event)[\"Event\"][\"id\"]\n else:\n eventid = misp.get_event(args.event)[\"Event\"][\"id\"]\n\n first = True\n\n for sigma in paths:\n if not args.event and (first or not args.same_event):\n eventid = create_new_event(args, misp)\n print(\"Importing Sigma rule {} into MISP event {}...\".format(sigma, eventid, end=\"\"))\n f = sigma.open(\"rt\")\n\n if hasattr(misp, \"add_named_attribute\"):\n misp.add_named_attribute(eventid, \"sigma\", f.read())\n else:\n event = misp.get_event(eventid, pythonify=True)\n event.add_attribute(\"sigma\", f.read())\n misp.update_event(event)\n\n f.close()\n first = False\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2misp.py"}]}
1,550
165
gh_patches_debug_13449
rasdani/github-patches
git_diff
cloudtools__troposphere-178
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cloudwatch Alarm Threshold Type The parameter type 'Threshold' within Cloudwatch Alarms is currently of type 'integer' whereas the AWS documentations notes this should be a String. http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cw-alarm.html#cfn-cloudwatch-alarms-threshold I am hitting an issue when using alarms to check instance health - to monitor StatusCheckFailed I have an implementation which sets Threshold to 0.5 to evaluate a healthcheck of sorts. This works in Cloudformation but fails when I try to use it in the troposphere code. I think the line 'Threshold': (integer, True), should be 'Threshold': (basestring, True), within cloudwatch.py Any thoughts? </issue> <code> [start of troposphere/cloudwatch.py] 1 # Copyright (c) 2013, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSObject, AWSProperty, Ref 7 from .validators import integer, positive_integer, boolean 8 9 10 class MetricDimension(AWSProperty): 11 props = { 12 'Name': (basestring, True), 13 'Value': (basestring, True), 14 } 15 16 17 class Alarm(AWSObject): 18 resource_type = "AWS::CloudWatch::Alarm" 19 20 props = { 21 'ActionsEnabled': (boolean, False), 22 'AlarmActions': ([basestring, Ref], False), 23 'AlarmDescription': (basestring, False), 24 'AlarmName': (basestring, False), 25 'ComparisonOperator': (basestring, True), 26 'Dimensions': ([MetricDimension], False), 27 'EvaluationPeriods': (positive_integer, True), 28 'InsufficientDataActions': ([basestring, Ref], False), 29 'MetricName': (basestring, True), 30 'Namespace': (basestring, True), 31 'OKActions': ([basestring, Ref], False), 32 'Period': (positive_integer, True), 33 'Statistic': (basestring, True), 34 'Threshold': (integer, True), 35 'Unit': (basestring, False), 36 } 37 [end of troposphere/cloudwatch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/cloudwatch.py b/troposphere/cloudwatch.py --- a/troposphere/cloudwatch.py +++ b/troposphere/cloudwatch.py @@ -4,7 +4,7 @@ # See LICENSE file for full license. from . import AWSObject, AWSProperty, Ref -from .validators import integer, positive_integer, boolean +from .validators import positive_integer, boolean class MetricDimension(AWSProperty): @@ -31,6 +31,6 @@ 'OKActions': ([basestring, Ref], False), 'Period': (positive_integer, True), 'Statistic': (basestring, True), - 'Threshold': (integer, True), + 'Threshold': (basestring, True), 'Unit': (basestring, False), }
{"golden_diff": "diff --git a/troposphere/cloudwatch.py b/troposphere/cloudwatch.py\n--- a/troposphere/cloudwatch.py\n+++ b/troposphere/cloudwatch.py\n@@ -4,7 +4,7 @@\n # See LICENSE file for full license.\n \n from . import AWSObject, AWSProperty, Ref\n-from .validators import integer, positive_integer, boolean\n+from .validators import positive_integer, boolean\n \n \n class MetricDimension(AWSProperty):\n@@ -31,6 +31,6 @@\n 'OKActions': ([basestring, Ref], False),\n 'Period': (positive_integer, True),\n 'Statistic': (basestring, True),\n- 'Threshold': (integer, True),\n+ 'Threshold': (basestring, True),\n 'Unit': (basestring, False),\n }\n", "issue": "Cloudwatch Alarm Threshold Type\nThe parameter type 'Threshold' within Cloudwatch Alarms is currently of type 'integer' whereas the AWS documentations notes this should be a String.\n\nhttp://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cw-alarm.html#cfn-cloudwatch-alarms-threshold\n\nI am hitting an issue when using alarms to check instance health - to monitor StatusCheckFailed I have an implementation which sets Threshold to 0.5 to evaluate a healthcheck of sorts. This works in Cloudformation but fails when I try to use it in the troposphere code.\n\nI think the line 'Threshold': (integer, True), should be 'Threshold': (basestring, True), within cloudwatch.py\n\nAny thoughts?\n\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Ref\nfrom .validators import integer, positive_integer, boolean\n\n\nclass MetricDimension(AWSProperty):\n props = {\n 'Name': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Alarm(AWSObject):\n resource_type = \"AWS::CloudWatch::Alarm\"\n\n props = {\n 'ActionsEnabled': (boolean, False),\n 'AlarmActions': ([basestring, Ref], False),\n 'AlarmDescription': (basestring, False),\n 'AlarmName': (basestring, False),\n 'ComparisonOperator': (basestring, True),\n 'Dimensions': ([MetricDimension], False),\n 'EvaluationPeriods': (positive_integer, True),\n 'InsufficientDataActions': ([basestring, Ref], False),\n 'MetricName': (basestring, True),\n 'Namespace': (basestring, True),\n 'OKActions': ([basestring, Ref], False),\n 'Period': (positive_integer, True),\n 'Statistic': (basestring, True),\n 'Threshold': (integer, True),\n 'Unit': (basestring, False),\n }\n", "path": "troposphere/cloudwatch.py"}]}
1,039
174
gh_patches_debug_14449
rasdani/github-patches
git_diff
scrapy__scrapy-602
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow sending HTML emails with scrapy.mail.MailSender I've patched this locally by changing the `send` method: ``` def send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None): if attachs: msg = MIMEMultipart() else: msg = MIMENonMultipart(*mime.split('/')) ``` But it seems fragile. Any thoughts? Allow sending HTML emails with scrapy.mail.MailSender I've patched this locally by changing the `send` method: ``` def send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None): if attachs: msg = MIMEMultipart() else: msg = MIMENonMultipart(*mime.split('/')) ``` But it seems fragile. Any thoughts? </issue> <code> [start of scrapy/mail.py] 1 """ 2 Mail sending helpers 3 4 See documentation in docs/topics/email.rst 5 """ 6 from cStringIO import StringIO 7 from email.MIMEMultipart import MIMEMultipart 8 from email.MIMENonMultipart import MIMENonMultipart 9 from email.MIMEBase import MIMEBase 10 from email.MIMEText import MIMEText 11 from email.Utils import COMMASPACE, formatdate 12 from email import Encoders 13 14 from twisted.internet import defer, reactor, ssl 15 from twisted.mail.smtp import ESMTPSenderFactory 16 17 from scrapy import log 18 19 class MailSender(object): 20 21 def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost', 22 smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False): 23 self.smtphost = smtphost 24 self.smtpport = smtpport 25 self.smtpuser = smtpuser 26 self.smtppass = smtppass 27 self.smtptls = smtptls 28 self.smtpssl = smtpssl 29 self.mailfrom = mailfrom 30 self.debug = debug 31 32 @classmethod 33 def from_settings(cls, settings): 34 return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'], 35 settings['MAIL_PASS'], settings.getint('MAIL_PORT'), 36 settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL')) 37 38 def send(self, to, subject, body, cc=None, attachs=(), _callback=None): 39 if attachs: 40 msg = MIMEMultipart() 41 else: 42 msg = MIMENonMultipart('text', 'plain') 43 msg['From'] = self.mailfrom 44 msg['To'] = COMMASPACE.join(to) 45 msg['Date'] = formatdate(localtime=True) 46 msg['Subject'] = subject 47 rcpts = to[:] 48 if cc: 49 rcpts.extend(cc) 50 msg['Cc'] = COMMASPACE.join(cc) 51 52 if attachs: 53 msg.attach(MIMEText(body)) 54 for attach_name, mimetype, f in attachs: 55 part = MIMEBase(*mimetype.split('/')) 56 part.set_payload(f.read()) 57 Encoders.encode_base64(part) 58 part.add_header('Content-Disposition', 'attachment; filename="%s"' \ 59 % attach_name) 60 msg.attach(part) 61 else: 62 msg.set_payload(body) 63 64 if _callback: 65 _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg) 66 67 if self.debug: 68 log.msg(format='Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s Subject="%(mailsubject)s" Attachs=%(mailattachs)d', 69 level=log.DEBUG, mailto=to, mailcc=cc, mailsubject=subject, mailattachs=len(attachs)) 70 return 71 72 dfd = self._sendmail(rcpts, msg.as_string()) 73 dfd.addCallbacks(self._sent_ok, self._sent_failed, 74 callbackArgs=[to, cc, subject, len(attachs)], 75 errbackArgs=[to, cc, subject, len(attachs)]) 76 reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd) 77 return dfd 78 79 def _sent_ok(self, result, to, cc, subject, nattachs): 80 log.msg(format='Mail sent OK: To=%(mailto)s Cc=%(mailcc)s ' 81 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d', 82 mailto=to, mailcc=cc, mailsubject=subject, mailattachs=nattachs) 83 84 def _sent_failed(self, failure, to, cc, subject, nattachs): 85 errstr = str(failure.value) 86 log.msg(format='Unable to send mail: To=%(mailto)s Cc=%(mailcc)s ' 87 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d' 88 '- %(mailerr)s', 89 level=log.ERROR, mailto=to, mailcc=cc, mailsubject=subject, 90 mailattachs=nattachs, mailerr=errstr) 91 92 def _sendmail(self, to_addrs, msg): 93 msg = StringIO(msg) 94 d = defer.Deferred() 95 factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \ 96 to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \ 97 requireTransportSecurity=self.smtptls) 98 factory.noisy = False 99 100 if self.smtpssl: 101 reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory()) 102 else: 103 reactor.connectTCP(self.smtphost, self.smtpport, factory) 104 105 return d 106 [end of scrapy/mail.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/mail.py b/scrapy/mail.py --- a/scrapy/mail.py +++ b/scrapy/mail.py @@ -35,11 +35,11 @@ settings['MAIL_PASS'], settings.getint('MAIL_PORT'), settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL')) - def send(self, to, subject, body, cc=None, attachs=(), _callback=None): + def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', _callback=None): if attachs: msg = MIMEMultipart() else: - msg = MIMENonMultipart('text', 'plain') + msg = MIMENonMultipart(*mimetype.split('/', 1)) msg['From'] = self.mailfrom msg['To'] = COMMASPACE.join(to) msg['Date'] = formatdate(localtime=True)
{"golden_diff": "diff --git a/scrapy/mail.py b/scrapy/mail.py\n--- a/scrapy/mail.py\n+++ b/scrapy/mail.py\n@@ -35,11 +35,11 @@\n settings['MAIL_PASS'], settings.getint('MAIL_PORT'),\n settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))\n \n- def send(self, to, subject, body, cc=None, attachs=(), _callback=None):\n+ def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n- msg = MIMENonMultipart('text', 'plain')\n+ msg = MIMENonMultipart(*mimetype.split('/', 1))\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n", "issue": "Allow sending HTML emails with scrapy.mail.MailSender\nI've patched this locally by changing the `send` method:\n\n```\ndef send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mime.split('/'))\n```\n\nBut it seems fragile. Any thoughts?\n\nAllow sending HTML emails with scrapy.mail.MailSender\nI've patched this locally by changing the `send` method:\n\n```\ndef send(self, to, subject, body, cc=None, attachs=(), mime='text/plain', _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mime.split('/'))\n```\n\nBut it seems fragile. Any thoughts?\n\n", "before_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nfrom cStringIO import StringIO\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMENonMultipart import MIMENonMultipart\nfrom email.MIMEBase import MIMEBase\nfrom email.MIMEText import MIMEText\nfrom email.Utils import COMMASPACE, formatdate\nfrom email import Encoders\n\nfrom twisted.internet import defer, reactor, ssl\nfrom twisted.mail.smtp import ESMTPSenderFactory\n\nfrom scrapy import log\n\nclass MailSender(object):\n\n def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',\n smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = smtpuser\n self.smtppass = smtppass\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],\n settings['MAIL_PASS'], settings.getint('MAIL_PORT'),\n settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))\n\n def send(self, to, subject, body, cc=None, attachs=(), _callback=None):\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart('text', 'plain')\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg['Cc'] = COMMASPACE.join(cc)\n\n if attachs:\n msg.attach(MIMEText(body))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split('/'))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' \\\n % attach_name)\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n log.msg(format='Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n level=log.DEBUG, mailto=to, mailcc=cc, mailsubject=subject, mailattachs=len(attachs))\n return\n\n dfd = self._sendmail(rcpts, msg.as_string())\n dfd.addCallbacks(self._sent_ok, self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)])\n reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n log.msg(format='Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n mailto=to, mailcc=cc, mailsubject=subject, mailattachs=nattachs)\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n log.msg(format='Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n '- %(mailerr)s',\n level=log.ERROR, mailto=to, mailcc=cc, mailsubject=subject,\n mailattachs=nattachs, mailerr=errstr)\n\n def _sendmail(self, to_addrs, msg):\n msg = StringIO(msg)\n d = defer.Deferred()\n factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \\\n to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \\\n requireTransportSecurity=self.smtptls)\n factory.noisy = False\n\n if self.smtpssl:\n reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n", "path": "scrapy/mail.py"}]}
1,964
207
gh_patches_debug_31987
rasdani/github-patches
git_diff
vas3k__vas3k.club-142
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Визуализировать результаты батлов <img width="1113" alt="image" src="https://user-images.githubusercontent.com/19980512/81127819-1f135780-8f48-11ea-83bc-7c56e6e849e4.png"> Было бы круто как-то визуализировать полоску в зависимости от результатов баттла. Чтобы такой раз — и увидел результат, а не подсчитывал, где больше аргументов и плюсов </issue> <code> [start of posts/templatetags/battle.py] 1 from django import template 2 from django.template import loader 3 4 register = template.Library() 5 6 7 battle_stats_template = loader.get_template("posts/widgets/battle_stats.html") 8 9 10 def _is_argument_for_side(comment, side): 11 for_side = comment.metadata and comment.metadata.get("battle", {}).get("side") == side 12 13 return not comment.is_deleted and not comment.reply_to_id and for_side 14 15 16 @register.simple_tag() 17 def battle_stats(post, comments): 18 arguments_for_a = [c for c in comments if _is_argument_for_side(c, "a")] 19 arguments_for_b = [c for c in comments if _is_argument_for_side(c, "b")] 20 21 total_votes_a = sum(c.upvotes for c in arguments_for_a) 22 total_votes_b = sum(c.upvotes for c in arguments_for_b) 23 return battle_stats_template.render({ 24 "total_arguments": { 25 "a": len(arguments_for_a), 26 "b": len(arguments_for_b), 27 }, 28 "total_votes": { 29 "a": total_votes_a, 30 "b": total_votes_b, 31 }, 32 "battle": post, 33 }) 34 35 36 @register.filter() 37 def side_name(battle, side_code): 38 if battle and battle.metadata and battle.metadata.get("battle"): 39 return battle.metadata["battle"]["sides"][side_code]["name"] 40 return "" 41 42 [end of posts/templatetags/battle.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/posts/templatetags/battle.py b/posts/templatetags/battle.py --- a/posts/templatetags/battle.py +++ b/posts/templatetags/battle.py @@ -3,7 +3,6 @@ register = template.Library() - battle_stats_template = loader.get_template("posts/widgets/battle_stats.html") @@ -20,6 +19,7 @@ total_votes_a = sum(c.upvotes for c in arguments_for_a) total_votes_b = sum(c.upvotes for c in arguments_for_b) + return battle_stats_template.render({ "total_arguments": { "a": len(arguments_for_a), @@ -29,6 +29,7 @@ "a": total_votes_a, "b": total_votes_b, }, + "graph": graph_percentages(len(arguments_for_a), len(arguments_for_b), total_votes_a, total_votes_b), "battle": post, }) @@ -39,3 +40,27 @@ return battle.metadata["battle"]["sides"][side_code]["name"] return "" + +def graph_percentages(a_arguments: int, b_arguments: int, a_votes: int, b_votes: int): + """Counts percentages for battle graph + + Percentage for a side is a rounded up arithmetic average of side's argument and upvote percentages + + For each side: (argument % of total arguments amount + vote % of total votes amount ) / 2 + """ + percent_a = 0 + percent_b = 0 + total_arguments = a_arguments + b_arguments + total_upvotes = a_votes + b_votes + if total_arguments > 0: + argument_percent = 100 / total_arguments + percent_a = a_arguments * argument_percent + percent_b = b_arguments * argument_percent + if total_upvotes > 0: + upvote_percent = 100 / total_upvotes + percent_a = (percent_a + a_votes * upvote_percent) / 2 + percent_b = (percent_b + b_votes * upvote_percent) / 2 + return { + "percent_a": round(percent_a), + "percent_b": round(percent_b) + }
{"golden_diff": "diff --git a/posts/templatetags/battle.py b/posts/templatetags/battle.py\n--- a/posts/templatetags/battle.py\n+++ b/posts/templatetags/battle.py\n@@ -3,7 +3,6 @@\n \n register = template.Library()\n \n-\n battle_stats_template = loader.get_template(\"posts/widgets/battle_stats.html\")\n \n \n@@ -20,6 +19,7 @@\n \n total_votes_a = sum(c.upvotes for c in arguments_for_a)\n total_votes_b = sum(c.upvotes for c in arguments_for_b)\n+\n return battle_stats_template.render({\n \"total_arguments\": {\n \"a\": len(arguments_for_a),\n@@ -29,6 +29,7 @@\n \"a\": total_votes_a,\n \"b\": total_votes_b,\n },\n+ \"graph\": graph_percentages(len(arguments_for_a), len(arguments_for_b), total_votes_a, total_votes_b),\n \"battle\": post,\n })\n \n@@ -39,3 +40,27 @@\n return battle.metadata[\"battle\"][\"sides\"][side_code][\"name\"]\n return \"\"\n \n+\n+def graph_percentages(a_arguments: int, b_arguments: int, a_votes: int, b_votes: int):\n+ \"\"\"Counts percentages for battle graph\n+\n+ Percentage for a side is a rounded up arithmetic average of side's argument and upvote percentages\n+\n+ For each side: (argument % of total arguments amount + vote % of total votes amount ) / 2\n+ \"\"\"\n+ percent_a = 0\n+ percent_b = 0\n+ total_arguments = a_arguments + b_arguments\n+ total_upvotes = a_votes + b_votes\n+ if total_arguments > 0:\n+ argument_percent = 100 / total_arguments\n+ percent_a = a_arguments * argument_percent\n+ percent_b = b_arguments * argument_percent\n+ if total_upvotes > 0:\n+ upvote_percent = 100 / total_upvotes\n+ percent_a = (percent_a + a_votes * upvote_percent) / 2\n+ percent_b = (percent_b + b_votes * upvote_percent) / 2\n+ return {\n+ \"percent_a\": round(percent_a),\n+ \"percent_b\": round(percent_b)\n+ }\n", "issue": "\u0412\u0438\u0437\u0443\u0430\u043b\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u0431\u0430\u0442\u043b\u043e\u0432\n<img width=\"1113\" alt=\"image\" src=\"https://user-images.githubusercontent.com/19980512/81127819-1f135780-8f48-11ea-83bc-7c56e6e849e4.png\">\r\n\r\n\u0411\u044b\u043b\u043e \u0431\u044b \u043a\u0440\u0443\u0442\u043e \u043a\u0430\u043a-\u0442\u043e \u0432\u0438\u0437\u0443\u0430\u043b\u0438\u0437\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u043f\u043e\u043b\u043e\u0441\u043a\u0443 \u0432 \u0437\u0430\u0432\u0438\u0441\u0438\u043c\u043e\u0441\u0442\u0438 \u043e\u0442 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u043e\u0432 \u0431\u0430\u0442\u0442\u043b\u0430. \u0427\u0442\u043e\u0431\u044b \u0442\u0430\u043a\u043e\u0439 \u0440\u0430\u0437 \u2014 \u0438 \u0443\u0432\u0438\u0434\u0435\u043b \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442, \u0430 \u043d\u0435 \u043f\u043e\u0434\u0441\u0447\u0438\u0442\u044b\u0432\u0430\u043b, \u0433\u0434\u0435 \u0431\u043e\u043b\u044c\u0448\u0435 \u0430\u0440\u0433\u0443\u043c\u0435\u043d\u0442\u043e\u0432 \u0438 \u043f\u043b\u044e\u0441\u043e\u0432\n", "before_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\n\nbattle_stats_template = loader.get_template(\"posts/widgets/battle_stats.html\")\n\n\ndef _is_argument_for_side(comment, side):\n for_side = comment.metadata and comment.metadata.get(\"battle\", {}).get(\"side\") == side\n\n return not comment.is_deleted and not comment.reply_to_id and for_side\n\n\[email protected]_tag()\ndef battle_stats(post, comments):\n arguments_for_a = [c for c in comments if _is_argument_for_side(c, \"a\")]\n arguments_for_b = [c for c in comments if _is_argument_for_side(c, \"b\")]\n\n total_votes_a = sum(c.upvotes for c in arguments_for_a)\n total_votes_b = sum(c.upvotes for c in arguments_for_b)\n return battle_stats_template.render({\n \"total_arguments\": {\n \"a\": len(arguments_for_a),\n \"b\": len(arguments_for_b),\n },\n \"total_votes\": {\n \"a\": total_votes_a,\n \"b\": total_votes_b,\n },\n \"battle\": post,\n })\n\n\[email protected]()\ndef side_name(battle, side_code):\n if battle and battle.metadata and battle.metadata.get(\"battle\"):\n return battle.metadata[\"battle\"][\"sides\"][side_code][\"name\"]\n return \"\"\n\n", "path": "posts/templatetags/battle.py"}]}
1,048
509
gh_patches_debug_2773
rasdani/github-patches
git_diff
Netflix__lemur-3166
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DNS Providers list doesn't show type In the DNS Providers list, there is a column for the provider type, but it's always empty. Looking at the code, and the API requests, the issue seems to be with the dns_providers API call, which returns the list of all providers. There should be a providerType value in the JSON, but it's not there. A quick glance at the `DnsProvidersNestedOutputSchema` shows that the value is called `providerType`, but in the database the field is called `provider_type` similar to `api_endpoint` which is called `api_endpoint` in the OutputSchema, so I guess, it's probably just mislabeled in the OutputSchema, and needs to be adjusted there, and maybe in the angular template. </issue> <code> [start of lemur/dns_providers/schemas.py] 1 from marshmallow import fields 2 3 from lemur.common.fields import ArrowDateTime 4 from lemur.common.schema import LemurInputSchema, LemurOutputSchema 5 6 7 class DnsProvidersNestedOutputSchema(LemurOutputSchema): 8 __envelope__ = False 9 id = fields.Integer() 10 name = fields.String() 11 providerType = fields.String() 12 description = fields.String() 13 credentials = fields.String() 14 api_endpoint = fields.String() 15 date_created = ArrowDateTime() 16 17 18 class DnsProvidersNestedInputSchema(LemurInputSchema): 19 __envelope__ = False 20 name = fields.String() 21 description = fields.String() 22 provider_type = fields.Dict() 23 24 25 dns_provider_output_schema = DnsProvidersNestedOutputSchema() 26 27 dns_provider_input_schema = DnsProvidersNestedInputSchema() 28 [end of lemur/dns_providers/schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lemur/dns_providers/schemas.py b/lemur/dns_providers/schemas.py --- a/lemur/dns_providers/schemas.py +++ b/lemur/dns_providers/schemas.py @@ -8,7 +8,7 @@ __envelope__ = False id = fields.Integer() name = fields.String() - providerType = fields.String() + provider_type = fields.String() description = fields.String() credentials = fields.String() api_endpoint = fields.String()
{"golden_diff": "diff --git a/lemur/dns_providers/schemas.py b/lemur/dns_providers/schemas.py\n--- a/lemur/dns_providers/schemas.py\n+++ b/lemur/dns_providers/schemas.py\n@@ -8,7 +8,7 @@\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n- providerType = fields.String()\n+ provider_type = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n", "issue": "DNS Providers list doesn't show type\nIn the DNS Providers list, there is a column for the provider type, but it's always empty.\r\n\r\nLooking at the code, and the API requests, the issue seems to be with the dns_providers API call, which returns the list of all providers.\r\n\r\nThere should be a providerType value in the JSON, but it's not there. \r\n\r\nA quick glance at the `DnsProvidersNestedOutputSchema` shows that the value is called `providerType`, but in the database the field is called `provider_type` similar to `api_endpoint` which is called `api_endpoint` in the OutputSchema, so I guess, it's probably just mislabeled in the OutputSchema, and needs to be adjusted there, and maybe in the angular template.\n", "before_files": [{"content": "from marshmallow import fields\n\nfrom lemur.common.fields import ArrowDateTime\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\n\n\nclass DnsProvidersNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n name = fields.String()\n providerType = fields.String()\n description = fields.String()\n credentials = fields.String()\n api_endpoint = fields.String()\n date_created = ArrowDateTime()\n\n\nclass DnsProvidersNestedInputSchema(LemurInputSchema):\n __envelope__ = False\n name = fields.String()\n description = fields.String()\n provider_type = fields.Dict()\n\n\ndns_provider_output_schema = DnsProvidersNestedOutputSchema()\n\ndns_provider_input_schema = DnsProvidersNestedInputSchema()\n", "path": "lemur/dns_providers/schemas.py"}]}
922
118
gh_patches_debug_26623
rasdani/github-patches
git_diff
e-valuation__EvaP-1291
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Django 2.1 upgrade https://docs.djangoproject.com/en/2.1/releases/2.1/ There is a guide for upgrading: https://docs.djangoproject.com/en/2.1/howto/upgrade-version/ Basically * Read the release notes * update dependencies * run tests with `python -Wa` and solve deprecation warnings * put the new django into the requirements * run tests, fix failures if any * run tests with `python -Wa` and solve deprecation warnings again * if there was any new feature in the release notes that might help us, use it also, we need to check the installed python version on production, django 2.1 supports python 3.5 and newer. </issue> <code> [start of evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py] 1 # Generated by Django 1.11.3 on 2017-07-03 18:31 2 3 from django.db import migrations, models 4 import uuid 5 6 7 def fill_textanswer_uuid(apps, schema_editor): 8 db_alias = schema_editor.connection.alias 9 TextAnswer = apps.get_model('evaluation', 'TextAnswer') 10 for obj in TextAnswer.objects.using(db_alias).all(): 11 obj.uuid = uuid.uuid4() 12 obj.save() 13 14 15 class Migration(migrations.Migration): 16 17 dependencies = [ 18 ('evaluation', '0061_editor_review_reminder_template'), 19 ] 20 21 # Based on 22 # https://gist.github.com/smcoll/8bb867dc631433c01fd0 23 24 operations = [ 25 migrations.AddField( 26 model_name='textanswer', 27 name='uuid', 28 field=models.UUIDField(null=True), 29 ), 30 migrations.RunPython(fill_textanswer_uuid, migrations.RunPython.noop), 31 migrations.AlterField( 32 model_name='textanswer', 33 name='uuid', 34 field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False), 35 ), 36 # rename the old id field before deleting it at the end of the 37 # migration for compatibility with the sqlite driver 38 migrations.RenameField( 39 model_name='textanswer', 40 old_name='id', 41 new_name='old_id' 42 ), 43 migrations.RenameField( 44 model_name='textanswer', 45 old_name='uuid', 46 new_name='id' 47 ), 48 migrations.AlterField( 49 model_name='textanswer', 50 name='id', 51 field=models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False), 52 ), 53 migrations.AlterModelOptions( 54 name='textanswer', 55 options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'}, 56 ), 57 migrations.RemoveField(model_name='textanswer', name='old_id'), 58 ] 59 [end of evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py --- a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py +++ b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py @@ -33,13 +33,12 @@ name='uuid', field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False), ), - # rename the old id field before deleting it at the end of the - # migration for compatibility with the sqlite driver - migrations.RenameField( - model_name='textanswer', - old_name='id', - new_name='old_id' - ), + # this causes trouble with sqlite. We have two open bug reports with django for this, see + # https://code.djangoproject.com/ticket/29790 and https://code.djangoproject.com/ticket/28541 + # We can not get this to work with sqlite and postgres right now and we want django2.1, we only + # support postgres here. For sqlite, you need to rename the field here and move the RemoveField to + # the end. + migrations.RemoveField(model_name='textanswer', name='id'), migrations.RenameField( model_name='textanswer', old_name='uuid', @@ -54,5 +53,4 @@ name='textanswer', options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'}, ), - migrations.RemoveField(model_name='textanswer', name='old_id'), ]
{"golden_diff": "diff --git a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py\n--- a/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py\n+++ b/evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py\n@@ -33,13 +33,12 @@\n name='uuid',\n field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),\n ),\n- # rename the old id field before deleting it at the end of the\n- # migration for compatibility with the sqlite driver\n- migrations.RenameField(\n- model_name='textanswer',\n- old_name='id',\n- new_name='old_id'\n- ),\n+ # this causes trouble with sqlite. We have two open bug reports with django for this, see\n+ # https://code.djangoproject.com/ticket/29790 and https://code.djangoproject.com/ticket/28541\n+ # We can not get this to work with sqlite and postgres right now and we want django2.1, we only\n+ # support postgres here. For sqlite, you need to rename the field here and move the RemoveField to\n+ # the end.\n+ migrations.RemoveField(model_name='textanswer', name='id'),\n migrations.RenameField(\n model_name='textanswer',\n old_name='uuid',\n@@ -54,5 +53,4 @@\n name='textanswer',\n options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},\n ),\n- migrations.RemoveField(model_name='textanswer', name='old_id'),\n ]\n", "issue": "Django 2.1 upgrade\nhttps://docs.djangoproject.com/en/2.1/releases/2.1/\r\n\r\nThere is a guide for upgrading: https://docs.djangoproject.com/en/2.1/howto/upgrade-version/\r\n\r\nBasically\r\n* Read the release notes\r\n* update dependencies\r\n* run tests with `python -Wa` and solve deprecation warnings\r\n* put the new django into the requirements\r\n* run tests, fix failures if any\r\n* run tests with `python -Wa` and solve deprecation warnings again\r\n* if there was any new feature in the release notes that might help us, use it\r\n\r\nalso, we need to check the installed python version on production, django 2.1 supports python 3.5 and newer.\n", "before_files": [{"content": "# Generated by Django 1.11.3 on 2017-07-03 18:31\n\nfrom django.db import migrations, models\nimport uuid\n\n\ndef fill_textanswer_uuid(apps, schema_editor):\n db_alias = schema_editor.connection.alias\n TextAnswer = apps.get_model('evaluation', 'TextAnswer')\n for obj in TextAnswer.objects.using(db_alias).all():\n obj.uuid = uuid.uuid4()\n obj.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('evaluation', '0061_editor_review_reminder_template'),\n ]\n\n # Based on\n # https://gist.github.com/smcoll/8bb867dc631433c01fd0\n\n operations = [\n migrations.AddField(\n model_name='textanswer',\n name='uuid',\n field=models.UUIDField(null=True),\n ),\n migrations.RunPython(fill_textanswer_uuid, migrations.RunPython.noop),\n migrations.AlterField(\n model_name='textanswer',\n name='uuid',\n field=models.UUIDField(primary_key=False, default=uuid.uuid4, serialize=False, editable=False),\n ),\n # rename the old id field before deleting it at the end of the\n # migration for compatibility with the sqlite driver\n migrations.RenameField(\n model_name='textanswer',\n old_name='id',\n new_name='old_id'\n ),\n migrations.RenameField(\n model_name='textanswer',\n old_name='uuid',\n new_name='id'\n ),\n migrations.AlterField(\n model_name='textanswer',\n name='id',\n field=models.UUIDField(primary_key=True, default=uuid.uuid4, serialize=False, editable=False),\n ),\n migrations.AlterModelOptions(\n name='textanswer',\n options={'ordering': ['id'], 'verbose_name': 'text answer', 'verbose_name_plural': 'text answers'},\n ),\n migrations.RemoveField(model_name='textanswer', name='old_id'),\n ]\n", "path": "evap/evaluation/migrations/0062_replace_textanswer_id_with_uuid.py"}]}
1,258
399
gh_patches_debug_2234
rasdani/github-patches
git_diff
redis__redis-py-2674
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Canceling async Redis command leaves connection open, in unsafe state for future commands **Version**: 4.5.3 **Platform**: Python 3.8 on Ubuntu / Generic **Description**: Canceling async Redis command leaves connection open, in unsafe state for future commands This is a reincarnation of #2624, which was closed with an incomplete fix and a possibly unreliable test case. This is the same issue that recently got a lot of attention due to ChatGPT outage, and that remains only partially fixed. The cancellation shielding introduced in #2641 addressed only the cancellation of Redis pipeline operation, but non-pipelined ops are still vulnerable. This time I am attaching a script that reproduces the issue reliably without relying on an external, slow Redis server. This is achieved by inserting a small TCP socket proxy between the Redis client and local Redis server, with the proxy introducing a 0.1 second delay when sending data in either direction. Running this script with a Redis server running locally on port 6379 produces the following output: ``` $ python redis_cancel.py managed to cancel the task, connection is left open with unread response bar: b'foo' ping: False foo: b'PONG' ``` ```python import asyncio from redis.asyncio import Redis async def pipe(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, delay: float, name=''): while data := await reader.read(1000): # print(name, 'received:', data) await asyncio.sleep(delay) writer.write(data) await writer.drain() class DelayProxy: def __init__(self, addr, redis_addr, delay: float): self.addr = addr self.redis_addr = redis_addr self.delay = delay async def start(self): server = await asyncio.start_server(self.handle, *self.addr) asyncio.create_task(server.serve_forever()) async def handle(self, reader, writer): # establish connection to redis redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr) pipe1 = asyncio.create_task(pipe(reader, redis_writer, self.delay, 'to redis:')) pipe2 = asyncio.create_task(pipe(redis_reader, writer, self.delay, 'from redis:')) await asyncio.gather(pipe1, pipe2) async def main(): # create a tcp socket proxy that relays data to Redis and back, inserting 0.1 seconds of delay dp = DelayProxy(addr=('localhost', 6380), redis_addr=('localhost', 6379), delay=0.1) await dp.start() # note that we connect to proxy, rather than to Redis directly async with Redis(host='localhost', port=6380) as r: await r.set('foo', 'foo') await r.set('bar', 'bar') t = asyncio.create_task(r.get('foo')) await asyncio.sleep(0.050) t.cancel() try: await t print('try again, we did not cancel the task in time') except asyncio.CancelledError: print('managed to cancel the task, connection is left open with unread response') print('bar:', await r.get('bar')) print('ping:', await r.ping()) print('foo:', await r.get('foo')) if __name__ == '__main__': asyncio.run(main()) ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 from setuptools import find_packages, setup 3 4 setup( 5 name="redis", 6 description="Python client for Redis database and key-value store", 7 long_description=open("README.md").read().strip(), 8 long_description_content_type="text/markdown", 9 keywords=["Redis", "key-value store", "database"], 10 license="MIT", 11 version="4.5.3", 12 packages=find_packages( 13 include=[ 14 "redis", 15 "redis.asyncio", 16 "redis.commands", 17 "redis.commands.bf", 18 "redis.commands.json", 19 "redis.commands.search", 20 "redis.commands.timeseries", 21 "redis.commands.graph", 22 ] 23 ), 24 url="https://github.com/redis/redis-py", 25 project_urls={ 26 "Documentation": "https://redis.readthedocs.io/en/latest/", 27 "Changes": "https://github.com/redis/redis-py/releases", 28 "Code": "https://github.com/redis/redis-py", 29 "Issue tracker": "https://github.com/redis/redis-py/issues", 30 }, 31 author="Redis Inc.", 32 author_email="[email protected]", 33 python_requires=">=3.7", 34 install_requires=[ 35 'importlib-metadata >= 1.0; python_version < "3.8"', 36 'typing-extensions; python_version<"3.8"', 37 'async-timeout>=4.0.2; python_version<="3.11.2"', 38 ], 39 classifiers=[ 40 "Development Status :: 5 - Production/Stable", 41 "Environment :: Console", 42 "Intended Audience :: Developers", 43 "License :: OSI Approved :: MIT License", 44 "Operating System :: OS Independent", 45 "Programming Language :: Python", 46 "Programming Language :: Python :: 3", 47 "Programming Language :: Python :: 3 :: Only", 48 "Programming Language :: Python :: 3.7", 49 "Programming Language :: Python :: 3.8", 50 "Programming Language :: Python :: 3.9", 51 "Programming Language :: Python :: 3.10", 52 "Programming Language :: Python :: 3.11", 53 "Programming Language :: Python :: Implementation :: CPython", 54 "Programming Language :: Python :: Implementation :: PyPy", 55 ], 56 extras_require={ 57 "hiredis": ["hiredis>=1.0.0"], 58 "ocsp": ["cryptography>=36.0.1", "pyopenssl==20.0.1", "requests>=2.26.0"], 59 }, 60 ) 61 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ long_description_content_type="text/markdown", keywords=["Redis", "key-value store", "database"], license="MIT", - version="4.5.3", + version="4.5.4", packages=find_packages( include=[ "redis",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n- version=\"4.5.3\",\n+ version=\"4.5.4\",\n packages=find_packages(\n include=[\n \"redis\",\n", "issue": "Canceling async Redis command leaves connection open, in unsafe state for future commands\n\r\n**Version**: 4.5.3\r\n\r\n**Platform**: Python 3.8 on Ubuntu / Generic\r\n\r\n**Description**: Canceling async Redis command leaves connection open, in unsafe state for future commands\r\n\r\nThis is a reincarnation of #2624, which was closed with an incomplete fix and a possibly unreliable test case. This is the same issue that recently got a lot of attention due to ChatGPT outage, and that remains only partially fixed. The cancellation shielding introduced in #2641 addressed only the cancellation of Redis pipeline operation, but non-pipelined ops are still vulnerable.\r\n\r\nThis time I am attaching a script that reproduces the issue reliably without relying on an external, slow Redis server. This is achieved by inserting a small TCP socket proxy between the Redis client and local Redis server, with the proxy introducing a 0.1 second delay when sending data in either direction. \r\n\r\nRunning this script with a Redis server running locally on port 6379 produces the following output:\r\n```\r\n$ python redis_cancel.py \r\nmanaged to cancel the task, connection is left open with unread response\r\nbar: b'foo'\r\nping: False\r\nfoo: b'PONG'\r\n```\r\n\r\n```python\r\nimport asyncio\r\n\r\nfrom redis.asyncio import Redis\r\n\r\n\r\nasync def pipe(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, delay: float, name=''):\r\n while data := await reader.read(1000):\r\n # print(name, 'received:', data)\r\n await asyncio.sleep(delay)\r\n writer.write(data)\r\n await writer.drain()\r\n\r\n\r\nclass DelayProxy:\r\n\r\n def __init__(self, addr, redis_addr, delay: float):\r\n self.addr = addr\r\n self.redis_addr = redis_addr\r\n self.delay = delay\r\n\r\n async def start(self):\r\n server = await asyncio.start_server(self.handle, *self.addr)\r\n asyncio.create_task(server.serve_forever())\r\n\r\n async def handle(self, reader, writer):\r\n # establish connection to redis\r\n redis_reader, redis_writer = await asyncio.open_connection(*self.redis_addr)\r\n pipe1 = asyncio.create_task(pipe(reader, redis_writer, self.delay, 'to redis:'))\r\n pipe2 = asyncio.create_task(pipe(redis_reader, writer, self.delay, 'from redis:'))\r\n await asyncio.gather(pipe1, pipe2)\r\n\r\n\r\nasync def main():\r\n\r\n # create a tcp socket proxy that relays data to Redis and back, inserting 0.1 seconds of delay\r\n dp = DelayProxy(addr=('localhost', 6380), redis_addr=('localhost', 6379), delay=0.1)\r\n await dp.start()\r\n\r\n # note that we connect to proxy, rather than to Redis directly\r\n async with Redis(host='localhost', port=6380) as r:\r\n\r\n await r.set('foo', 'foo')\r\n await r.set('bar', 'bar')\r\n\r\n t = asyncio.create_task(r.get('foo'))\r\n await asyncio.sleep(0.050)\r\n t.cancel()\r\n try:\r\n await t\r\n print('try again, we did not cancel the task in time')\r\n except asyncio.CancelledError:\r\n print('managed to cancel the task, connection is left open with unread response')\r\n\r\n print('bar:', await r.get('bar'))\r\n print('ping:', await r.ping())\r\n print('foo:', await r.get('foo'))\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main())\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=\"4.5.3\",\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.asyncio\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n project_urls={\n \"Documentation\": \"https://redis.readthedocs.io/en/latest/\",\n \"Changes\": \"https://github.com/redis/redis-py/releases\",\n \"Code\": \"https://github.com/redis/redis-py\",\n \"Issue tracker\": \"https://github.com/redis/redis-py/issues\",\n },\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n install_requires=[\n 'importlib-metadata >= 1.0; python_version < \"3.8\"',\n 'typing-extensions; python_version<\"3.8\"',\n 'async-timeout>=4.0.2; python_version<=\"3.11.2\"',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n \"ocsp\": [\"cryptography>=36.0.1\", \"pyopenssl==20.0.1\", \"requests>=2.26.0\"],\n },\n)\n", "path": "setup.py"}]}
1,918
91
gh_patches_debug_3335
rasdani/github-patches
git_diff
mkdocs__mkdocs-413
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `mkdocs new` will overwrite an existing index.md file without warning. If you run the command: `mkdocs new /path/to/dir` and `/path/to/dir/docs/index.md` already exists it will be replaced with out placeholder. </issue> <code> [start of mkdocs/new.py] 1 # coding: utf-8 2 from __future__ import print_function 3 import os 4 5 config_text = 'site_name: My Docs\n' 6 index_text = """# Welcome to MkDocs 7 8 For full documentation visit [mkdocs.org](http://mkdocs.org). 9 10 ## Commands 11 12 * `mkdocs new [dir-name]` - Create a new project. 13 * `mkdocs serve` - Start the live-reloading docs server. 14 * `mkdocs build` - Build the documentation site. 15 * `mkdocs help` - Print this help message. 16 17 ## Project layout 18 19 mkdocs.yml # The configuration file. 20 docs/ 21 index.md # The documentation homepage. 22 ... # Other markdown pages, images and other files. 23 """ 24 25 26 def new(args, options): 27 if len(args) != 1: 28 print("Usage 'mkdocs new [directory-name]'") 29 return 30 31 output_dir = args[0] 32 33 docs_dir = os.path.join(output_dir, 'docs') 34 config_path = os.path.join(output_dir, 'mkdocs.yml') 35 index_path = os.path.join(docs_dir, 'index.md') 36 37 if os.path.exists(config_path): 38 print('Project already exists.') 39 return 40 41 if not os.path.exists(output_dir): 42 print('Creating project directory: %s' % output_dir) 43 os.mkdir(output_dir) 44 45 print('Writing config file: %s' % config_path) 46 open(config_path, 'w').write(config_text) 47 48 print('Writing initial docs: %s' % index_path) 49 if not os.path.exists(docs_dir): 50 os.mkdir(docs_dir) 51 open(index_path, 'w').write(index_text) 52 [end of mkdocs/new.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/new.py b/mkdocs/new.py --- a/mkdocs/new.py +++ b/mkdocs/new.py @@ -45,6 +45,9 @@ print('Writing config file: %s' % config_path) open(config_path, 'w').write(config_text) + if os.path.exists(index_path): + return + print('Writing initial docs: %s' % index_path) if not os.path.exists(docs_dir): os.mkdir(docs_dir)
{"golden_diff": "diff --git a/mkdocs/new.py b/mkdocs/new.py\n--- a/mkdocs/new.py\n+++ b/mkdocs/new.py\n@@ -45,6 +45,9 @@\n print('Writing config file: %s' % config_path)\n open(config_path, 'w').write(config_text)\n \n+ if os.path.exists(index_path):\n+ return\n+\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n", "issue": "`mkdocs new` will overwrite an existing index.md file without warning.\nIf you run the command: `mkdocs new /path/to/dir` and `/path/to/dir/docs/index.md` already exists it will be replaced with out placeholder.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\nimport os\n\nconfig_text = 'site_name: My Docs\\n'\nindex_text = \"\"\"# Welcome to MkDocs\n\nFor full documentation visit [mkdocs.org](http://mkdocs.org).\n\n## Commands\n\n* `mkdocs new [dir-name]` - Create a new project.\n* `mkdocs serve` - Start the live-reloading docs server.\n* `mkdocs build` - Build the documentation site.\n* `mkdocs help` - Print this help message.\n\n## Project layout\n\n mkdocs.yml # The configuration file.\n docs/\n index.md # The documentation homepage.\n ... # Other markdown pages, images and other files.\n\"\"\"\n\n\ndef new(args, options):\n if len(args) != 1:\n print(\"Usage 'mkdocs new [directory-name]'\")\n return\n\n output_dir = args[0]\n\n docs_dir = os.path.join(output_dir, 'docs')\n config_path = os.path.join(output_dir, 'mkdocs.yml')\n index_path = os.path.join(docs_dir, 'index.md')\n\n if os.path.exists(config_path):\n print('Project already exists.')\n return\n\n if not os.path.exists(output_dir):\n print('Creating project directory: %s' % output_dir)\n os.mkdir(output_dir)\n\n print('Writing config file: %s' % config_path)\n open(config_path, 'w').write(config_text)\n\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n open(index_path, 'w').write(index_text)\n", "path": "mkdocs/new.py"}]}
1,039
115
gh_patches_debug_25234
rasdani/github-patches
git_diff
cupy__cupy-1947
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `cupy.allclose` does not support comparison of complex-number arrays As title. The reason is that in this line of the ufunc helper https://github.com/cupy/cupy/blob/bb99716ffee178368ec71c875ace0553053cadc2/cupy/logic/comparison.py#L6 only `float16` (`e`), `float32` (`f`), and `float64` (`d`) arrays are included. Note that the NumPy counterpart does support comparing complex arrays using the same comparison logic, and I believe this can be easily patched by adding another ufunc helper for `complex64` and `complex128` arrays. PR to follow. </issue> <code> [start of cupy/logic/comparison.py] 1 from cupy import core 2 3 4 _is_close = core.create_ufunc( 5 'cupy_is_close', 6 ('eeee?->?', 'ffff?->?', 'dddd?->?'), 7 ''' 8 bool equal_nan = in4; 9 if (isfinite(in0) && isfinite(in1)) { 10 out0 = fabs(in0 - in1) <= in3 + in2 * fabs(in1); 11 } else if (equal_nan) { 12 out0 = (in0 == in1) || (isnan(in0) && isnan(in1)); 13 } else { 14 out0 = (in0 == in1); 15 } 16 ''' 17 ) 18 19 20 def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): 21 """Returns True if two arrays are element-wise equal within a tolerance. 22 23 Two values in ``a`` and ``b`` are considiered equal when the following 24 equation is satisfied. 25 26 .. math:: 27 28 |a - b| \\le \\mathrm{atol} + \\mathrm{rtol} |b| 29 30 Args: 31 a (cupy.ndarray): Input array to compare. 32 b (cupy.ndarray): Input array to compare. 33 rtol (float): The relative tolerance. 34 atol (float): The absolute tolerance. 35 equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal 36 to NaN's in ``b``. 37 38 Returns: 39 bool: if ``True``, two arrays are element-wise equal within a 40 tolerance. 41 42 .. seealso:: :func:`numpy.allclose` 43 44 """ 45 res = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan).all() 46 return bool(res) 47 48 49 def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): 50 """Returns a boolean array where two arrays are equal within a tolerance. 51 52 Two values in ``a`` and ``b`` are considiered equal when the following 53 equation is satisfied. 54 55 .. math:: 56 57 |a - b| \\le \\mathrm{atol} + \\mathrm{rtol} |b| 58 59 Args: 60 a (cupy.ndarray): Input array to compare. 61 b (cupy.ndarray): Input array to compare. 62 rtol (float): The relative tolerance. 63 atol (float): The absolute tolerance. 64 equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal 65 to NaN's in ``b``. 66 67 Returns: 68 cupy.ndarray: A boolean array storing where ``a`` and ``b`` are equal. 69 70 .. seealso:: :func:`numpy.isclose` 71 72 """ 73 return _is_close(a, b, rtol, atol, equal_nan) 74 75 76 # TODO(okuta): Implement array_equal 77 78 79 # TODO(okuta): Implement array_equiv 80 81 82 greater = core.greater 83 84 85 greater_equal = core.greater_equal 86 87 88 less = core.less 89 90 91 less_equal = core.less_equal 92 93 94 equal = core.equal 95 96 97 not_equal = core.not_equal 98 [end of cupy/logic/comparison.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/logic/comparison.py b/cupy/logic/comparison.py --- a/cupy/logic/comparison.py +++ b/cupy/logic/comparison.py @@ -1,3 +1,6 @@ +import numpy + +import cupy from cupy import core @@ -16,6 +19,24 @@ ''' ) +# Note that in cupy/core/include/cupy/complex.cuh, we already got isfinite and +# isnan working for complex numbers, so just replace fabs above by abs (from +# thrust) and we are ready to go +_is_close_complex = core.create_ufunc( + 'cupy_is_close_complex', + ('FFff?->?', 'DDdd?->?'), + ''' + bool equal_nan = in4; + if (isfinite(in0) && isfinite(in1)) { + out0 = abs(in0 - in1) <= in3 + in2 * abs(in1); + } else if (equal_nan) { + out0 = (in0 == in1) || (isnan(in0) && isnan(in1)); + } else { + out0 = (in0 == in1); + } + ''' +) + def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """Returns True if two arrays are element-wise equal within a tolerance. @@ -70,7 +91,13 @@ .. seealso:: :func:`numpy.isclose` """ - return _is_close(a, b, rtol, atol, equal_nan) + a = cupy.asanyarray(a) + b = cupy.asanyarray(b) + if (a.dtype in [numpy.complex64, numpy.complex128]) or \ + (b.dtype in [numpy.complex64, numpy.complex128]): + return _is_close_complex(a, b, rtol, atol, equal_nan) + else: + return _is_close(a, b, rtol, atol, equal_nan) # TODO(okuta): Implement array_equal
{"golden_diff": "diff --git a/cupy/logic/comparison.py b/cupy/logic/comparison.py\n--- a/cupy/logic/comparison.py\n+++ b/cupy/logic/comparison.py\n@@ -1,3 +1,6 @@\n+import numpy\n+\n+import cupy\n from cupy import core\n \n \n@@ -16,6 +19,24 @@\n '''\n )\n \n+# Note that in cupy/core/include/cupy/complex.cuh, we already got isfinite and\n+# isnan working for complex numbers, so just replace fabs above by abs (from\n+# thrust) and we are ready to go\n+_is_close_complex = core.create_ufunc(\n+ 'cupy_is_close_complex',\n+ ('FFff?->?', 'DDdd?->?'),\n+ '''\n+ bool equal_nan = in4;\n+ if (isfinite(in0) && isfinite(in1)) {\n+ out0 = abs(in0 - in1) <= in3 + in2 * abs(in1);\n+ } else if (equal_nan) {\n+ out0 = (in0 == in1) || (isnan(in0) && isnan(in1));\n+ } else {\n+ out0 = (in0 == in1);\n+ }\n+ '''\n+)\n+\n \n def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns True if two arrays are element-wise equal within a tolerance.\n@@ -70,7 +91,13 @@\n .. seealso:: :func:`numpy.isclose`\n \n \"\"\"\n- return _is_close(a, b, rtol, atol, equal_nan)\n+ a = cupy.asanyarray(a)\n+ b = cupy.asanyarray(b)\n+ if (a.dtype in [numpy.complex64, numpy.complex128]) or \\\n+ (b.dtype in [numpy.complex64, numpy.complex128]):\n+ return _is_close_complex(a, b, rtol, atol, equal_nan)\n+ else:\n+ return _is_close(a, b, rtol, atol, equal_nan)\n \n \n # TODO(okuta): Implement array_equal\n", "issue": "`cupy.allclose` does not support comparison of complex-number arrays\nAs title. The reason is that in this line of the ufunc helper \r\nhttps://github.com/cupy/cupy/blob/bb99716ffee178368ec71c875ace0553053cadc2/cupy/logic/comparison.py#L6\r\nonly `float16` (`e`), `float32` (`f`), and `float64` (`d`) arrays are included. Note that the NumPy counterpart does support comparing complex arrays using the same comparison logic, and I believe this can be easily patched by adding another ufunc helper for `complex64` and `complex128` arrays. PR to follow.\n", "before_files": [{"content": "from cupy import core\n\n\n_is_close = core.create_ufunc(\n 'cupy_is_close',\n ('eeee?->?', 'ffff?->?', 'dddd?->?'),\n '''\n bool equal_nan = in4;\n if (isfinite(in0) && isfinite(in1)) {\n out0 = fabs(in0 - in1) <= in3 + in2 * fabs(in1);\n } else if (equal_nan) {\n out0 = (in0 == in1) || (isnan(in0) && isnan(in1));\n } else {\n out0 = (in0 == in1);\n }\n '''\n)\n\n\ndef allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns True if two arrays are element-wise equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considiered equal when the following\n equation is satisfied.\n\n .. math::\n\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Args:\n a (cupy.ndarray): Input array to compare.\n b (cupy.ndarray): Input array to compare.\n rtol (float): The relative tolerance.\n atol (float): The absolute tolerance.\n equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal\n to NaN's in ``b``.\n\n Returns:\n bool: if ``True``, two arrays are element-wise equal within a\n tolerance.\n\n .. seealso:: :func:`numpy.allclose`\n\n \"\"\"\n res = isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan).all()\n return bool(res)\n\n\ndef isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n \"\"\"Returns a boolean array where two arrays are equal within a tolerance.\n\n Two values in ``a`` and ``b`` are considiered equal when the following\n equation is satisfied.\n\n .. math::\n\n |a - b| \\\\le \\\\mathrm{atol} + \\\\mathrm{rtol} |b|\n\n Args:\n a (cupy.ndarray): Input array to compare.\n b (cupy.ndarray): Input array to compare.\n rtol (float): The relative tolerance.\n atol (float): The absolute tolerance.\n equal_nan (bool): If ``True``, NaN's in ``a`` will be considered equal\n to NaN's in ``b``.\n\n Returns:\n cupy.ndarray: A boolean array storing where ``a`` and ``b`` are equal.\n\n .. seealso:: :func:`numpy.isclose`\n\n \"\"\"\n return _is_close(a, b, rtol, atol, equal_nan)\n\n\n# TODO(okuta): Implement array_equal\n\n\n# TODO(okuta): Implement array_equiv\n\n\ngreater = core.greater\n\n\ngreater_equal = core.greater_equal\n\n\nless = core.less\n\n\nless_equal = core.less_equal\n\n\nequal = core.equal\n\n\nnot_equal = core.not_equal\n", "path": "cupy/logic/comparison.py"}]}
1,588
485
gh_patches_debug_5142
rasdani/github-patches
git_diff
microsoft__qlib-1246
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Misleading Error "Please install necessary libs for CatBoostModel." ## 🐛 Bug Description Qlib does not require the installation of packages like `CatBoostModel` But the output looks a little misleading. ## To Reproduce Run `examples/workflow_by_code.ipynb` in jupyter notebook. ## Expected Behavior Successfully run the script without installing CatBoostModel and warning. ## Screenshot ![image](https://user-images.githubusercontent.com/465606/177905185-57a43596-b180-4c52-8df9-0e88b95640b4.png) <!-- A screenshot of the error message or anything shouldn't appear--> </issue> <code> [start of qlib/contrib/model/__init__.py] 1 # Copyright (c) Microsoft Corporation. 2 # Licensed under the MIT License. 3 try: 4 from .catboost_model import CatBoostModel 5 except ModuleNotFoundError: 6 CatBoostModel = None 7 print("Please install necessary libs for CatBoostModel.") 8 try: 9 from .double_ensemble import DEnsembleModel 10 from .gbdt import LGBModel 11 except ModuleNotFoundError: 12 DEnsembleModel, LGBModel = None, None 13 print( 14 "ModuleNotFoundError. DEnsembleModel and LGBModel are skipped. (optional: maybe installing lightgbm can fix it.)" 15 ) 16 try: 17 from .xgboost import XGBModel 18 except ModuleNotFoundError: 19 XGBModel = None 20 print("ModuleNotFoundError. XGBModel is skipped(optional: maybe installing xgboost can fix it).") 21 try: 22 from .linear import LinearModel 23 except ModuleNotFoundError: 24 LinearModel = None 25 print("ModuleNotFoundError. LinearModel is skipped(optional: maybe installing scipy and sklearn can fix it).") 26 # import pytorch models 27 try: 28 from .pytorch_alstm import ALSTM 29 from .pytorch_gats import GATs 30 from .pytorch_gru import GRU 31 from .pytorch_lstm import LSTM 32 from .pytorch_nn import DNNModelPytorch 33 from .pytorch_tabnet import TabnetModel 34 from .pytorch_sfm import SFM_Model 35 from .pytorch_tcn import TCN 36 from .pytorch_add import ADD 37 38 pytorch_classes = (ALSTM, GATs, GRU, LSTM, DNNModelPytorch, TabnetModel, SFM_Model, TCN, ADD) 39 except ModuleNotFoundError: 40 pytorch_classes = () 41 print("ModuleNotFoundError. PyTorch models are skipped (optional: maybe installing pytorch can fix it).") 42 43 all_model_classes = (CatBoostModel, DEnsembleModel, LGBModel, XGBModel, LinearModel) + pytorch_classes 44 [end of qlib/contrib/model/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qlib/contrib/model/__init__.py b/qlib/contrib/model/__init__.py --- a/qlib/contrib/model/__init__.py +++ b/qlib/contrib/model/__init__.py @@ -4,7 +4,7 @@ from .catboost_model import CatBoostModel except ModuleNotFoundError: CatBoostModel = None - print("Please install necessary libs for CatBoostModel.") + print("ModuleNotFoundError. CatBoostModel are skipped. (optional: maybe installing CatBoostModel can fix it.)") try: from .double_ensemble import DEnsembleModel from .gbdt import LGBModel
{"golden_diff": "diff --git a/qlib/contrib/model/__init__.py b/qlib/contrib/model/__init__.py\n--- a/qlib/contrib/model/__init__.py\n+++ b/qlib/contrib/model/__init__.py\n@@ -4,7 +4,7 @@\n from .catboost_model import CatBoostModel\n except ModuleNotFoundError:\n CatBoostModel = None\n- print(\"Please install necessary libs for CatBoostModel.\")\n+ print(\"ModuleNotFoundError. CatBoostModel are skipped. (optional: maybe installing CatBoostModel can fix it.)\")\n try:\n from .double_ensemble import DEnsembleModel\n from .gbdt import LGBModel\n", "issue": "Misleading Error \"Please install necessary libs for CatBoostModel.\"\n## \ud83d\udc1b Bug Description\r\n\r\nQlib does not require the installation of packages like `CatBoostModel`\r\n\r\nBut the output looks a little misleading.\r\n\r\n\r\n## To Reproduce\r\nRun `examples/workflow_by_code.ipynb` in jupyter notebook.\r\n\r\n## Expected Behavior\r\n\r\nSuccessfully run the script without installing CatBoostModel and warning.\r\n\r\n## Screenshot\r\n![image](https://user-images.githubusercontent.com/465606/177905185-57a43596-b180-4c52-8df9-0e88b95640b4.png)\r\n\r\n<!-- A screenshot of the error message or anything shouldn't appear-->\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\ntry:\n from .catboost_model import CatBoostModel\nexcept ModuleNotFoundError:\n CatBoostModel = None\n print(\"Please install necessary libs for CatBoostModel.\")\ntry:\n from .double_ensemble import DEnsembleModel\n from .gbdt import LGBModel\nexcept ModuleNotFoundError:\n DEnsembleModel, LGBModel = None, None\n print(\n \"ModuleNotFoundError. DEnsembleModel and LGBModel are skipped. (optional: maybe installing lightgbm can fix it.)\"\n )\ntry:\n from .xgboost import XGBModel\nexcept ModuleNotFoundError:\n XGBModel = None\n print(\"ModuleNotFoundError. XGBModel is skipped(optional: maybe installing xgboost can fix it).\")\ntry:\n from .linear import LinearModel\nexcept ModuleNotFoundError:\n LinearModel = None\n print(\"ModuleNotFoundError. LinearModel is skipped(optional: maybe installing scipy and sklearn can fix it).\")\n# import pytorch models\ntry:\n from .pytorch_alstm import ALSTM\n from .pytorch_gats import GATs\n from .pytorch_gru import GRU\n from .pytorch_lstm import LSTM\n from .pytorch_nn import DNNModelPytorch\n from .pytorch_tabnet import TabnetModel\n from .pytorch_sfm import SFM_Model\n from .pytorch_tcn import TCN\n from .pytorch_add import ADD\n\n pytorch_classes = (ALSTM, GATs, GRU, LSTM, DNNModelPytorch, TabnetModel, SFM_Model, TCN, ADD)\nexcept ModuleNotFoundError:\n pytorch_classes = ()\n print(\"ModuleNotFoundError. PyTorch models are skipped (optional: maybe installing pytorch can fix it).\")\n\nall_model_classes = (CatBoostModel, DEnsembleModel, LGBModel, XGBModel, LinearModel) + pytorch_classes\n", "path": "qlib/contrib/model/__init__.py"}]}
1,215
148
gh_patches_debug_41207
rasdani/github-patches
git_diff
svthalia__concrexit-2399
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add sales order API ### Describe the solution you'd like - An api endpoint `api/v2/sales/order/<uuid>/` that returns the order information that is shown on the website when you pay for the url from a qr code. - It should be possible to pay the order through `api/v2/payments/sales/order/<uuid>`. This might already be possible, I haven't checked. ### Motivation This way people will be able to pay from the app, instead of through the website where they may even need to log in. I think this is an obvious case where the ease of use of an app is very welcome. </issue> <code> [start of website/sales/api/v2/views.py] 1 from django.db.models import Q 2 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope 3 from rest_framework.exceptions import PermissionDenied 4 from rest_framework.generics import ( 5 ListAPIView, 6 RetrieveAPIView, 7 CreateAPIView, 8 UpdateAPIView, 9 DestroyAPIView, 10 ) 11 from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly 12 13 from sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer 14 from sales.api.v2.admin.views import ( 15 OrderListView, 16 OrderDetailView, 17 ShiftDetailView, 18 ShiftListView, 19 ) 20 from sales.api.v2.serializers.user_order import UserOrderSerializer 21 from sales.api.v2.serializers.user_shift import UserShiftSerializer 22 from sales.models.shift import SelfOrderPeriod, Shift 23 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod 24 25 26 class UserShiftListView(ShiftListView): 27 serializer_class = UserShiftSerializer 28 # queryset = SelfOrderPeriod.objects.all() 29 permission_classes = [ 30 IsAuthenticatedOrTokenHasScope, 31 DjangoModelPermissionsOrAnonReadOnly, 32 ] 33 required_scopes = ["sales:read"] 34 35 36 class UserShiftDetailView(ShiftDetailView): 37 serializer_class = UserShiftSerializer 38 # queryset = SelfOrderPeriod.objects.all() 39 permission_classes = [ 40 IsAuthenticatedOrTokenHasScope, 41 DjangoModelPermissionsOrAnonReadOnly, 42 ] 43 required_scopes = ["sales:read"] 44 45 46 class UserOrderListView(OrderListView): 47 permission_classes = [ 48 IsAuthenticatedOrTokenHasScopeForMethod, 49 ] 50 required_scopes_per_method = { 51 "GET": ["sales:read"], 52 "POST": ["sales:order"], 53 } 54 method_serializer_classes = { 55 ("GET",): OrderListSerializer, 56 ("POST",): UserOrderSerializer, 57 } 58 59 def create(self, request, *args, **kwargs): 60 shift = Shift.objects.get(pk=kwargs["pk"]) 61 if not shift.user_orders_allowed: 62 raise PermissionDenied 63 return super(UserOrderListView, self).create(request, *args, **kwargs) 64 65 def perform_create(self, serializer): 66 serializer.save( 67 payer_id=self.request.member.pk, created_by_id=self.request.member.pk 68 ) 69 70 def get_queryset(self): 71 queryset = super(UserOrderListView, self).get_queryset() 72 return queryset.filter( 73 Q(payer=self.request.member) | Q(created_by=self.request.member) 74 ) 75 76 77 class UserOrderDetailView(OrderDetailView): 78 serializer_class = UserOrderSerializer 79 permission_classes = [ 80 IsAuthenticatedOrTokenHasScopeForMethod, 81 ] 82 required_scopes_per_method = { 83 "GET": ["sales:read"], 84 "PATCH": ["sales:order"], 85 "PUT": ["sales:order"], 86 "DELETE": ["sales:order"], 87 } 88 89 def get_queryset(self): 90 queryset = super(UserOrderDetailView, self).get_queryset() 91 return queryset.filter( 92 Q(payer=self.request.member) | Q(created_by=self.request.member) 93 ) 94 95 def update(self, request, *args, **kwargs): 96 if not self.get_object().shift.user_orders_allowed: 97 raise PermissionDenied 98 if self.get_object().payment: 99 raise PermissionDenied 100 return super(UserOrderDetailView, self).update(request, *args, **kwargs) 101 102 def partial_update(self, request, *args, **kwargs): 103 if not self.get_object().shift.user_orders_allowed: 104 raise PermissionDenied 105 if self.get_object().payment: 106 raise PermissionDenied 107 return super(UserOrderDetailView, self).partial_update(request, *args, **kwargs) 108 109 def destroy(self, request, *args, **kwargs): 110 if not self.get_object().shift.user_orders_allowed: 111 raise PermissionDenied 112 if self.get_object().payment: 113 raise PermissionDenied 114 return super(UserOrderDetailView, self).destroy(request, *args, **kwargs) 115 [end of website/sales/api/v2/views.py] [start of website/sales/api/v2/urls.py] 1 from django.urls import path 2 3 from sales.api.v2.views import ( 4 UserShiftListView, 5 UserShiftDetailView, 6 UserOrderListView, 7 UserOrderDetailView, 8 ) 9 10 app_name = "sales" 11 12 urlpatterns = [ 13 path("sales/shifts/", UserShiftListView.as_view(), name="user-shift-list"), 14 path( 15 "sales/shifts/<int:pk>/", 16 UserShiftDetailView.as_view(), 17 name="user-shift-detail", 18 ), 19 path( 20 "sales/shifts/<int:pk>/orders/", 21 UserOrderListView.as_view(), 22 name="user-order-list", 23 ), 24 path( 25 "sales/orders/<uuid:pk>/", 26 UserOrderDetailView.as_view(), 27 name="user-order-detail", 28 ), 29 ] 30 [end of website/sales/api/v2/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/sales/api/v2/urls.py b/website/sales/api/v2/urls.py --- a/website/sales/api/v2/urls.py +++ b/website/sales/api/v2/urls.py @@ -1,6 +1,7 @@ from django.urls import path from sales.api.v2.views import ( + OrderClaimView, UserShiftListView, UserShiftDetailView, UserOrderListView, @@ -10,6 +11,7 @@ app_name = "sales" urlpatterns = [ + path("sales/order/<uuid:pk>/claim/", OrderClaimView.as_view(), name="order-claim"), path("sales/shifts/", UserShiftListView.as_view(), name="user-shift-list"), path( "sales/shifts/<int:pk>/", diff --git a/website/sales/api/v2/views.py b/website/sales/api/v2/views.py --- a/website/sales/api/v2/views.py +++ b/website/sales/api/v2/views.py @@ -1,25 +1,23 @@ from django.db.models import Q from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope from rest_framework.exceptions import PermissionDenied -from rest_framework.generics import ( - ListAPIView, - RetrieveAPIView, - CreateAPIView, - UpdateAPIView, - DestroyAPIView, -) +from rest_framework.generics import GenericAPIView from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly +from rest_framework.response import Response +from rest_framework.schemas.openapi import AutoSchema -from sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer +from sales.api.v2.admin.serializers.order import OrderListSerializer from sales.api.v2.admin.views import ( - OrderListView, OrderDetailView, + OrderListView, ShiftDetailView, ShiftListView, ) +from sales import services from sales.api.v2.serializers.user_order import UserOrderSerializer from sales.api.v2.serializers.user_shift import UserShiftSerializer -from sales.models.shift import SelfOrderPeriod, Shift +from sales.models.shift import Shift +from sales.models.order import Order from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod @@ -111,4 +109,40 @@ raise PermissionDenied if self.get_object().payment: raise PermissionDenied - return super(UserOrderDetailView, self).destroy(request, *args, **kwargs) + + +class OrderClaimView(GenericAPIView): + """Claims an order to be paid by the current user.""" + + class OrderClaimViewSchema(AutoSchema): + def get_request_serializer(self, path, method): + # This endpoint does not expect any content in the request body. + return None + + queryset = Order.objects.all() + serializer_class = UserOrderSerializer + schema = OrderClaimViewSchema(operation_id_base="claimOrder") + permission_classes = [IsAuthenticatedOrTokenHasScope] + required_scopes = ["sales:order"] + + def patch(self, request, *args, **kwargs): + if request.member is None: + raise PermissionDenied("You need to be a member to pay for an order.") + + order = self.get_object() + if order.payment: + raise PermissionDenied(detail="This order was already paid for.") + + if order.payer is not None and order.payer != request.member: + raise PermissionDenied(detail="This order is not yours.") + + order.payer = request.member + order.save() + + if order.age_restricted and not services.is_adult(request.member): + raise PermissionDenied( + "The age restrictions on this order do not allow you to pay for this order." + ) + + serializer = self.get_serializer(order) + return Response(serializer.data)
{"golden_diff": "diff --git a/website/sales/api/v2/urls.py b/website/sales/api/v2/urls.py\n--- a/website/sales/api/v2/urls.py\n+++ b/website/sales/api/v2/urls.py\n@@ -1,6 +1,7 @@\n from django.urls import path\n \n from sales.api.v2.views import (\n+ OrderClaimView,\n UserShiftListView,\n UserShiftDetailView,\n UserOrderListView,\n@@ -10,6 +11,7 @@\n app_name = \"sales\"\n \n urlpatterns = [\n+ path(\"sales/order/<uuid:pk>/claim/\", OrderClaimView.as_view(), name=\"order-claim\"),\n path(\"sales/shifts/\", UserShiftListView.as_view(), name=\"user-shift-list\"),\n path(\n \"sales/shifts/<int:pk>/\",\ndiff --git a/website/sales/api/v2/views.py b/website/sales/api/v2/views.py\n--- a/website/sales/api/v2/views.py\n+++ b/website/sales/api/v2/views.py\n@@ -1,25 +1,23 @@\n from django.db.models import Q\n from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\n from rest_framework.exceptions import PermissionDenied\n-from rest_framework.generics import (\n- ListAPIView,\n- RetrieveAPIView,\n- CreateAPIView,\n- UpdateAPIView,\n- DestroyAPIView,\n-)\n+from rest_framework.generics import GenericAPIView\n from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\n+from rest_framework.response import Response\n+from rest_framework.schemas.openapi import AutoSchema\n \n-from sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer\n+from sales.api.v2.admin.serializers.order import OrderListSerializer\n from sales.api.v2.admin.views import (\n- OrderListView,\n OrderDetailView,\n+ OrderListView,\n ShiftDetailView,\n ShiftListView,\n )\n+from sales import services\n from sales.api.v2.serializers.user_order import UserOrderSerializer\n from sales.api.v2.serializers.user_shift import UserShiftSerializer\n-from sales.models.shift import SelfOrderPeriod, Shift\n+from sales.models.shift import Shift\n+from sales.models.order import Order\n from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n \n \n@@ -111,4 +109,40 @@\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n- return super(UserOrderDetailView, self).destroy(request, *args, **kwargs)\n+\n+\n+class OrderClaimView(GenericAPIView):\n+ \"\"\"Claims an order to be paid by the current user.\"\"\"\n+\n+ class OrderClaimViewSchema(AutoSchema):\n+ def get_request_serializer(self, path, method):\n+ # This endpoint does not expect any content in the request body.\n+ return None\n+\n+ queryset = Order.objects.all()\n+ serializer_class = UserOrderSerializer\n+ schema = OrderClaimViewSchema(operation_id_base=\"claimOrder\")\n+ permission_classes = [IsAuthenticatedOrTokenHasScope]\n+ required_scopes = [\"sales:order\"]\n+\n+ def patch(self, request, *args, **kwargs):\n+ if request.member is None:\n+ raise PermissionDenied(\"You need to be a member to pay for an order.\")\n+\n+ order = self.get_object()\n+ if order.payment:\n+ raise PermissionDenied(detail=\"This order was already paid for.\")\n+\n+ if order.payer is not None and order.payer != request.member:\n+ raise PermissionDenied(detail=\"This order is not yours.\")\n+\n+ order.payer = request.member\n+ order.save()\n+\n+ if order.age_restricted and not services.is_adult(request.member):\n+ raise PermissionDenied(\n+ \"The age restrictions on this order do not allow you to pay for this order.\"\n+ )\n+\n+ serializer = self.get_serializer(order)\n+ return Response(serializer.data)\n", "issue": "Add sales order API\n### Describe the solution you'd like\r\n- An api endpoint `api/v2/sales/order/<uuid>/` that returns the order information that is shown on the website when you pay for the url from a qr code.\r\n- It should be possible to pay the order through `api/v2/payments/sales/order/<uuid>`. This might already be possible, I haven't checked.\r\n\r\n### Motivation\r\nThis way people will be able to pay from the app, instead of through the website where they may even need to log in. I think this is an obvious case where the ease of use of an app is very welcome.\r\n\r\n\n", "before_files": [{"content": "from django.db.models import Q\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n CreateAPIView,\n UpdateAPIView,\n DestroyAPIView,\n)\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\n\nfrom sales.api.v2.admin.serializers.order import OrderSerializer, OrderListSerializer\nfrom sales.api.v2.admin.views import (\n OrderListView,\n OrderDetailView,\n ShiftDetailView,\n ShiftListView,\n)\nfrom sales.api.v2.serializers.user_order import UserOrderSerializer\nfrom sales.api.v2.serializers.user_shift import UserShiftSerializer\nfrom sales.models.shift import SelfOrderPeriod, Shift\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass UserShiftListView(ShiftListView):\n serializer_class = UserShiftSerializer\n # queryset = SelfOrderPeriod.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"sales:read\"]\n\n\nclass UserShiftDetailView(ShiftDetailView):\n serializer_class = UserShiftSerializer\n # queryset = SelfOrderPeriod.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"sales:read\"]\n\n\nclass UserOrderListView(OrderListView):\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"sales:read\"],\n \"POST\": [\"sales:order\"],\n }\n method_serializer_classes = {\n (\"GET\",): OrderListSerializer,\n (\"POST\",): UserOrderSerializer,\n }\n\n def create(self, request, *args, **kwargs):\n shift = Shift.objects.get(pk=kwargs[\"pk\"])\n if not shift.user_orders_allowed:\n raise PermissionDenied\n return super(UserOrderListView, self).create(request, *args, **kwargs)\n\n def perform_create(self, serializer):\n serializer.save(\n payer_id=self.request.member.pk, created_by_id=self.request.member.pk\n )\n\n def get_queryset(self):\n queryset = super(UserOrderListView, self).get_queryset()\n return queryset.filter(\n Q(payer=self.request.member) | Q(created_by=self.request.member)\n )\n\n\nclass UserOrderDetailView(OrderDetailView):\n serializer_class = UserOrderSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"sales:read\"],\n \"PATCH\": [\"sales:order\"],\n \"PUT\": [\"sales:order\"],\n \"DELETE\": [\"sales:order\"],\n }\n\n def get_queryset(self):\n queryset = super(UserOrderDetailView, self).get_queryset()\n return queryset.filter(\n Q(payer=self.request.member) | Q(created_by=self.request.member)\n )\n\n def update(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).update(request, *args, **kwargs)\n\n def partial_update(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).partial_update(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n if not self.get_object().shift.user_orders_allowed:\n raise PermissionDenied\n if self.get_object().payment:\n raise PermissionDenied\n return super(UserOrderDetailView, self).destroy(request, *args, **kwargs)\n", "path": "website/sales/api/v2/views.py"}, {"content": "from django.urls import path\n\nfrom sales.api.v2.views import (\n UserShiftListView,\n UserShiftDetailView,\n UserOrderListView,\n UserOrderDetailView,\n)\n\napp_name = \"sales\"\n\nurlpatterns = [\n path(\"sales/shifts/\", UserShiftListView.as_view(), name=\"user-shift-list\"),\n path(\n \"sales/shifts/<int:pk>/\",\n UserShiftDetailView.as_view(),\n name=\"user-shift-detail\",\n ),\n path(\n \"sales/shifts/<int:pk>/orders/\",\n UserOrderListView.as_view(),\n name=\"user-order-list\",\n ),\n path(\n \"sales/orders/<uuid:pk>/\",\n UserOrderDetailView.as_view(),\n name=\"user-order-detail\",\n ),\n]\n", "path": "website/sales/api/v2/urls.py"}]}
1,966
845