problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_24217 | rasdani/github-patches | git_diff | docker__docker-py-288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please don't require exact versions in requirements
It's a good idea to pin requirements in applications.
It's a bad idea to pin requirements in libraries. Doing so makes reusing libraries difficult. If you have minimum requirements, that's fine.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4 from setuptools import setup
5
6 ROOT_DIR = os.path.dirname(__file__)
7 SOURCE_DIR = os.path.join(ROOT_DIR)
8
9 if sys.version_info[0] == 3:
10 requirements_file = './requirements3.txt'
11 else:
12 requirements_file = './requirements.txt'
13
14 exec(open('docker/version.py').read())
15
16 with open('./test-requirements.txt') as test_reqs_txt:
17 test_requirements = [line for line in test_reqs_txt]
18 with open(requirements_file) as requirements_txt:
19 requirements = [line for line in requirements_txt]
20
21 setup(
22 name="docker-py",
23 version=version,
24 description="Python client for Docker.",
25 packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',
26 'docker.ssladapter'],
27 install_requires=requirements + test_requirements,
28 zip_safe=False,
29 test_suite='tests',
30 classifiers=[
31 'Development Status :: 4 - Beta',
32 'Environment :: Other Environment',
33 'Intended Audience :: Developers',
34 'Operating System :: OS Independent',
35 'Programming Language :: Python',
36 'Programming Language :: Python :: 2.6',
37 'Programming Language :: Python :: 2.7',
38 'Programming Language :: Python :: 3.2',
39 'Programming Language :: Python :: 3.3',
40 'Programming Language :: Python :: 3.4',
41 'Topic :: Utilities',
42 'License :: OSI Approved :: Apache Software License',
43 ],
44 )
45
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,17 +6,19 @@
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
-if sys.version_info[0] == 3:
- requirements_file = './requirements3.txt'
-else:
- requirements_file = './requirements.txt'
+requirements = [
+ 'requests >= 2.2.1',
+ 'six >= 1.3.0',
+]
+
+if sys.version_info[0] < 3:
+ requirements.append('websocket-client >= 0.11.0')
exec(open('docker/version.py').read())
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
-with open(requirements_file) as requirements_txt:
- requirements = [line for line in requirements_txt]
+
setup(
name="docker-py",
@@ -24,7 +26,8 @@
description="Python client for Docker.",
packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',
'docker.ssladapter'],
- install_requires=requirements + test_requirements,
+ install_requires=requirements,
+ tests_require=test_requirements,
zip_safe=False,
test_suite='tests',
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,17 +6,19 @@\n ROOT_DIR = os.path.dirname(__file__)\n SOURCE_DIR = os.path.join(ROOT_DIR)\n \n-if sys.version_info[0] == 3:\n- requirements_file = './requirements3.txt'\n-else:\n- requirements_file = './requirements.txt'\n+requirements = [\n+ 'requests >= 2.2.1',\n+ 'six >= 1.3.0',\n+]\n+\n+if sys.version_info[0] < 3:\n+ requirements.append('websocket-client >= 0.11.0')\n \n exec(open('docker/version.py').read())\n \n with open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n-with open(requirements_file) as requirements_txt:\n- requirements = [line for line in requirements_txt]\n+\n \n setup(\n name=\"docker-py\",\n@@ -24,7 +26,8 @@\n description=\"Python client for Docker.\",\n packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',\n 'docker.ssladapter'],\n- install_requires=requirements + test_requirements,\n+ install_requires=requirements,\n+ tests_require=test_requirements,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n", "issue": "Please don't require exact versions in requirements\nIt's a good idea to pin requirements in applications.\n\nIt's a bad idea to pin requirements in libraries. Doing so makes reusing libraries difficult. If you have minimum requirements, that's fine.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nif sys.version_info[0] == 3:\n requirements_file = './requirements3.txt'\nelse:\n requirements_file = './requirements.txt'\n\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\nwith open(requirements_file) as requirements_txt:\n requirements = [line for line in requirements_txt]\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n packages=['docker', 'docker.auth', 'docker.unixconn', 'docker.utils',\n 'docker.ssladapter'],\n install_requires=requirements + test_requirements,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}]} | 981 | 299 |
gh_patches_debug_20950 | rasdani/github-patches | git_diff | bridgecrewio__checkov-648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cloudformation NLB Listener Rules are incorrectly detected as ALB rules and fail the HTTPS expectation
**Describe the bug**
NLB listeners are evaluated as ALB listeners, and fail because they don't use HTTPS
**To Reproduce**
1. Create a Cloudformation template with NLB listeners (e.g. TCP)
2. Run default tests
3. Tests will fail with `"AWS::ElasticLoadBalancingV2::Listener.<listenername>\" failed in check \"Ensure ALB protocol is HTTPS\"
**Expected behavior**
NLB rules should pass.
**Screenshots**
**Desktop (please complete the following information):**
- OS: Ubuntu 20.04
- Checkov Version: 1.0.423
**Additional context**
This resource:
```
"ConsulDnsListener": {
"Type": "AWS::ElasticLoadBalancingV2::Listener",
"Properties": {
"DefaultActions": [
{
"TargetGroupArn": {
"Ref": "ConsulDnsTargetGroup"
},
"Type": "forward"
}
],
"LoadBalancerArn": {
"Ref": "LoadBalancerArn"
},
"Port": 53,
"Protocol": "TCP_UDP"
}
},
```
Produces this error:
```
{
"type": "failure",
"message": "Resource \"AWS::ElasticLoadBalancingV2::Listener.ConsulDnsListener\" failed in check \"Ensure ALB protocol is HTTPS\""
}```
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4 class ALBListenerHTTPS(BaseResourceCheck):
5
6 def __init__(self):
7 name = "Ensure ALB protocol is HTTPS"
8 id = "CKV_AWS_2"
9 supported_resources = ['aws_lb_listener']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 """
15 validates kms rotation
16 https://www.terraform.io/docs/providers/aws/r/lb_listener.html
17 :param conf: aws_kms_key configuration
18 :return: <CheckResult>
19 """
20 key = 'protocol'
21 if key in conf.keys():
22 if (
23 conf[key] == ["HTTPS"]
24 or
25 conf[key] == ["TLS"]
26 ):
27 return CheckResult.PASSED
28 elif conf[key] == ["HTTP"]:
29 if 'default_action' in conf.keys():
30 default_action = conf['default_action'][0]
31 action_type = default_action['type']
32 if action_type == ['redirect']:
33 if default_action['redirect'][0]['protocol'] == ['HTTPS']:
34 return CheckResult.PASSED
35 return CheckResult.FAILED
36
37
38 check = ALBListenerHTTPS()
39
[end of checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py]
[start of checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
3
4 class ALBListenerHTTPS(BaseResourceCheck):
5
6 def __init__(self):
7 name = "Ensure ALB protocol is HTTPS"
8 id = "CKV_AWS_2"
9 supported_resources = ['AWS::ElasticLoadBalancingV2::Listener']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 """
15 validates ALB protocol is HTTPS
16 https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html
17 :param conf: aws_alb_listener configuration
18 :return: <CheckResult>
19 """
20
21 if 'Properties' in conf.keys():
22 if 'Protocol' in conf['Properties'].keys():
23 if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):
24 return CheckResult.PASSED
25 else:
26 if (
27 'DefaultActions' in conf['Properties'].keys()
28 and
29 conf['Properties']['DefaultActions'][0]['Type'] == 'redirect'
30 and
31 conf['Properties']['DefaultActions'][0]['RedirectConfig']['Protocol'] == "HTTPS"
32 ):
33 return CheckResult.PASSED
34 return CheckResult.FAILED
35
36 check = ALBListenerHTTPS()
37
[end of checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py
--- a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py
+++ b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py
@@ -20,7 +20,7 @@
if 'Properties' in conf.keys():
if 'Protocol' in conf['Properties'].keys():
- if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):
+ if conf['Properties']['Protocol'] in ('HTTPS', 'TLS', 'TCP', 'UDP', 'TCP_UDP'):
return CheckResult.PASSED
else:
if (
diff --git a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py
--- a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py
+++ b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py
@@ -19,11 +19,7 @@
"""
key = 'protocol'
if key in conf.keys():
- if (
- conf[key] == ["HTTPS"]
- or
- conf[key] == ["TLS"]
- ):
+ if conf[key] in (["HTTPS"], ["TLS"], ["TCP"], ["UDP"], ["TCP_UDP"]):
return CheckResult.PASSED
elif conf[key] == ["HTTP"]:
if 'default_action' in conf.keys():
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py\n--- a/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py\n+++ b/checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py\n@@ -20,7 +20,7 @@\n \n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n- if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):\n+ if conf['Properties']['Protocol'] in ('HTTPS', 'TLS', 'TCP', 'UDP', 'TCP_UDP'):\n return CheckResult.PASSED\n else:\n if (\ndiff --git a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py\n--- a/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py\n+++ b/checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py\n@@ -19,11 +19,7 @@\n \"\"\"\n key = 'protocol'\n if key in conf.keys():\n- if (\n- conf[key] == [\"HTTPS\"]\n- or\n- conf[key] == [\"TLS\"]\n- ):\n+ if conf[key] in ([\"HTTPS\"], [\"TLS\"], [\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n return CheckResult.PASSED\n elif conf[key] == [\"HTTP\"]:\n if 'default_action' in conf.keys():\n", "issue": "Cloudformation NLB Listener Rules are incorrectly detected as ALB rules and fail the HTTPS expectation\n**Describe the bug**\r\nNLB listeners are evaluated as ALB listeners, and fail because they don't use HTTPS\r\n\r\n**To Reproduce**\r\n1. Create a Cloudformation template with NLB listeners (e.g. TCP)\r\n2. Run default tests\r\n3. Tests will fail with `\"AWS::ElasticLoadBalancingV2::Listener.<listenername>\\\" failed in check \\\"Ensure ALB protocol is HTTPS\\\"\r\n\r\n**Expected behavior**\r\nNLB rules should pass. \r\n\r\n**Screenshots**\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 20.04\r\n - Checkov Version: 1.0.423\r\n\r\n**Additional context**\r\nThis resource:\r\n```\r\n \"ConsulDnsListener\": { \r\n \"Type\": \"AWS::ElasticLoadBalancingV2::Listener\", \r\n \"Properties\": { \r\n \"DefaultActions\": [ \r\n { \r\n \"TargetGroupArn\": { \r\n \"Ref\": \"ConsulDnsTargetGroup\" \r\n }, \r\n \"Type\": \"forward\" \r\n } \r\n ], \r\n \"LoadBalancerArn\": { \r\n \"Ref\": \"LoadBalancerArn\" \r\n }, \r\n \"Port\": 53, \r\n \"Protocol\": \"TCP_UDP\" \r\n } \r\n }, \r\n```\r\nProduces this error:\r\n```\r\n{\r\n \"type\": \"failure\",\r\n \"message\": \"Resource \\\"AWS::ElasticLoadBalancingV2::Listener.ConsulDnsListener\\\" failed in check \\\"Ensure ALB protocol is HTTPS\\\"\"\r\n}```\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\nclass ALBListenerHTTPS(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure ALB protocol is HTTPS\"\n id = \"CKV_AWS_2\"\n supported_resources = ['aws_lb_listener']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates kms rotation\n https://www.terraform.io/docs/providers/aws/r/lb_listener.html\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'protocol'\n if key in conf.keys():\n if (\n conf[key] == [\"HTTPS\"]\n or\n conf[key] == [\"TLS\"]\n ):\n return CheckResult.PASSED\n elif conf[key] == [\"HTTP\"]:\n if 'default_action' in conf.keys():\n default_action = conf['default_action'][0]\n action_type = default_action['type']\n if action_type == ['redirect']:\n if default_action['redirect'][0]['protocol'] == ['HTTPS']:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ALBListenerHTTPS()\n", "path": "checkov/terraform/checks/resource/aws/ALBListenerHTTPS.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\nclass ALBListenerHTTPS(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure ALB protocol is HTTPS\"\n id = \"CKV_AWS_2\"\n supported_resources = ['AWS::ElasticLoadBalancingV2::Listener']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n validates ALB protocol is HTTPS\n https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html\n :param conf: aws_alb_listener configuration\n :return: <CheckResult>\n \"\"\"\n\n if 'Properties' in conf.keys():\n if 'Protocol' in conf['Properties'].keys():\n if conf['Properties']['Protocol'] in ('HTTPS', 'TLS'):\n return CheckResult.PASSED\n else:\n if (\n 'DefaultActions' in conf['Properties'].keys()\n and\n conf['Properties']['DefaultActions'][0]['Type'] == 'redirect'\n and\n conf['Properties']['DefaultActions'][0]['RedirectConfig']['Protocol'] == \"HTTPS\"\n ):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\ncheck = ALBListenerHTTPS()\n", "path": "checkov/cloudformation/checks/resource/aws/ALBListenerHTTPS.py"}]} | 1,697 | 336 |
gh_patches_debug_43728 | rasdani/github-patches | git_diff | sopel-irc__sopel-1257 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Anonymous gists are going away. Help module will break.
GitHub [announced](https://blog.github.com/2018-02-18-deprecation-notice-removing-anonymous-gist-creation/) last month that anonymous gist creation will be disabled on March 19, 2018 (UTC).
The `help` module was modified in fa89eb7 to essentially paste its output to a gist and simply return the link, and this functionality will break (as of writing) tomorrow. `.help` (or `.commands`) will return an error until (and unless) the module is updated to paste its contents elsewhere and/or create a static HTML file instead as I originally proposed in #1080.
(As an aside, I've been meaning to implement that HTML mode for ages. Maybe this is the kick I needed to finally get off my arse and write it. We'll see.)
</issue>
<code>
[start of sopel/modules/help.py]
1 # coding=utf-8
2 """
3 help.py - Sopel Help Module
4 Copyright 2008, Sean B. Palmer, inamidst.com
5 Copyright © 2013, Elad Alfassa, <[email protected]>
6 Licensed under the Eiffel Forum License 2.
7
8 http://sopel.chat
9 """
10 from __future__ import unicode_literals, absolute_import, print_function, division
11
12 import textwrap
13 import collections
14 import json
15
16 import requests
17
18 from sopel.logger import get_logger
19 from sopel.module import commands, rule, example, priority
20
21 logger = get_logger(__name__)
22
23
24 @rule('$nick' '(?i)(help|doc) +([A-Za-z]+)(?:\?+)?$')
25 @example('.help tell')
26 @commands('help', 'commands')
27 @priority('low')
28 def help(bot, trigger):
29 """Shows a command's documentation, and possibly an example."""
30 if trigger.group(2):
31 name = trigger.group(2)
32 name = name.lower()
33
34 # number of lines of help to show
35 threshold = 3
36
37 if name in bot.doc:
38 if len(bot.doc[name][0]) + (1 if bot.doc[name][1] else 0) > threshold:
39 if trigger.nick != trigger.sender: # don't say that if asked in private
40 bot.reply('The documentation for this command is too long; I\'m sending it to you in a private message.')
41 msgfun = lambda l: bot.msg(trigger.nick, l)
42 else:
43 msgfun = bot.reply
44
45 for line in bot.doc[name][0]:
46 msgfun(line)
47 if bot.doc[name][1]:
48 msgfun('e.g. ' + bot.doc[name][1])
49 else:
50 # This'll probably catch most cases, without having to spend the time
51 # actually creating the list first. Maybe worth storing the link and a
52 # heuristic in config, too, so it persists across restarts. Would need a
53 # command to regenerate, too...
54 if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups):
55 url = bot.memory['command-gist'][1]
56 else:
57 bot.say("Hang on, I'm creating a list.")
58 msgs = []
59
60 name_length = max(6, max(len(k) for k in bot.command_groups.keys()))
61 for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():
62 category = category.upper().ljust(name_length)
63 cmds = set(cmds) # remove duplicates
64 cmds = ' '.join(cmds)
65 msg = category + ' ' + cmds
66 indent = ' ' * (name_length + 2)
67 # Honestly not sure why this is a list here
68 msgs.append('\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))
69
70 url = create_gist(bot, '\n\n'.join(msgs))
71 if not url:
72 return
73 bot.memory['command-gist'] = (len(bot.command_groups), url)
74 bot.say("I've posted a list of my commands at {} - You can see "
75 "more info about any of these commands by doing .help "
76 "<command> (e.g. .help time)".format(url))
77
78
79 def create_gist(bot, msg):
80 payload = {
81 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host),
82 'public': 'true',
83 'files': {
84 'commands.txt': {
85 "content": msg,
86 },
87 },
88 }
89 try:
90 result = requests.post('https://api.github.com/gists',
91 data=json.dumps(payload))
92 except requests.RequestException:
93 bot.say("Sorry! Something went wrong.")
94 logger.exception("Error posting commands gist")
95 return
96 if not result.status_code != '201':
97 bot.say("Sorry! Something went wrong.")
98 logger.error("Error %s posting commands gist: %s",
99 result.status_code, result.text)
100 return
101 result = result.json()
102 if 'html_url' not in result:
103 bot.say("Sorry! Something went wrong.")
104 logger.error("Invalid result %s", result)
105 return
106 return result['html_url']
107
108
109 @rule('$nick' r'(?i)help(?:[?!]+)?$')
110 @priority('low')
111 def help2(bot, trigger):
112 response = (
113 'Hi, I\'m a bot. Say ".commands" to me in private for a list ' +
114 'of my commands, or see http://sopel.chat for more ' +
115 'general details. My owner is %s.'
116 ) % bot.config.core.owner
117 bot.reply(response)
118
[end of sopel/modules/help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/help.py b/sopel/modules/help.py
--- a/sopel/modules/help.py
+++ b/sopel/modules/help.py
@@ -3,6 +3,7 @@
help.py - Sopel Help Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2013, Elad Alfassa, <[email protected]>
+Copyright © 2018, Adam Erdman, pandorah.org
Licensed under the Eiffel Forum License 2.
http://sopel.chat
@@ -11,8 +12,6 @@
import textwrap
import collections
-import json
-
import requests
from sopel.logger import get_logger
@@ -51,8 +50,8 @@
# actually creating the list first. Maybe worth storing the link and a
# heuristic in config, too, so it persists across restarts. Would need a
# command to regenerate, too...
- if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups):
- url = bot.memory['command-gist'][1]
+ if 'command-list' in bot.memory and bot.memory['command-list'][0] == len(bot.command_groups):
+ url = bot.memory['command-list'][1]
else:
bot.say("Hang on, I'm creating a list.")
msgs = []
@@ -60,49 +59,39 @@
name_length = max(6, max(len(k) for k in bot.command_groups.keys()))
for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():
category = category.upper().ljust(name_length)
+ cmds = set(cmds) # remove duplicates
cmds = ' '.join(cmds)
msg = category + ' ' + cmds
indent = ' ' * (name_length + 2)
# Honestly not sure why this is a list here
msgs.append('\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))
- url = create_gist(bot, '\n\n'.join(msgs))
+ url = create_list(bot, '\n\n'.join(msgs))
if not url:
return
- bot.memory['command-gist'] = (len(bot.command_groups), url)
+ bot.memory['command-list'] = (len(bot.command_groups), url)
bot.say("I've posted a list of my commands at {} - You can see "
"more info about any of these commands by doing .help "
"<command> (e.g. .help time)".format(url))
-def create_gist(bot, msg):
- payload = {
- 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host),
- 'public': 'true',
- 'files': {
- 'commands.txt': {
- "content": msg,
- },
- },
- }
+def create_list(bot, msg):
+ msg = 'Command listing for {}@{}\n\n'.format(bot.nick, bot.config.core.host) + msg
+ payload = { "content": msg }
+ headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
+
try:
- result = requests.post('https://api.github.com/gists',
- data=json.dumps(payload))
+ result = requests.post('https://ptpb.pw/', json=payload, headers=headers)
except requests.RequestException:
bot.say("Sorry! Something went wrong.")
- logger.exception("Error posting commands gist")
- return
- if not result.status_code != '201':
- bot.say("Sorry! Something went wrong.")
- logger.error("Error %s posting commands gist: %s",
- result.status_code, result.text)
+ logger.exception("Error posting commands")
return
result = result.json()
- if 'html_url' not in result:
+ if 'url' not in result:
bot.say("Sorry! Something went wrong.")
logger.error("Invalid result %s", result)
return
- return result['html_url']
+ return result['url']
@rule('$nick' r'(?i)help(?:[?!]+)?$')
| {"golden_diff": "diff --git a/sopel/modules/help.py b/sopel/modules/help.py\n--- a/sopel/modules/help.py\n+++ b/sopel/modules/help.py\n@@ -3,6 +3,7 @@\n help.py - Sopel Help Module\n Copyright 2008, Sean B. Palmer, inamidst.com\n Copyright \u00a9 2013, Elad Alfassa, <[email protected]>\n+Copyright \u00a9 2018, Adam Erdman, pandorah.org\n Licensed under the Eiffel Forum License 2.\n \n http://sopel.chat\n@@ -11,8 +12,6 @@\n \n import textwrap\n import collections\n-import json\n-\n import requests\n \n from sopel.logger import get_logger\n@@ -51,8 +50,8 @@\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in config, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n- if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups):\n- url = bot.memory['command-gist'][1]\n+ if 'command-list' in bot.memory and bot.memory['command-list'][0] == len(bot.command_groups):\n+ url = bot.memory['command-list'][1]\n else:\n bot.say(\"Hang on, I'm creating a list.\")\n msgs = []\n@@ -60,49 +59,39 @@\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n+ cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n \n- url = create_gist(bot, '\\n\\n'.join(msgs))\n+ url = create_list(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n- bot.memory['command-gist'] = (len(bot.command_groups), url)\n+ bot.memory['command-list'] = (len(bot.command_groups), url)\n bot.say(\"I've posted a list of my commands at {} - You can see \"\n \"more info about any of these commands by doing .help \"\n \"<command> (e.g. .help time)\".format(url))\n \n \n-def create_gist(bot, msg):\n- payload = {\n- 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host),\n- 'public': 'true',\n- 'files': {\n- 'commands.txt': {\n- \"content\": msg,\n- },\n- },\n- }\n+def create_list(bot, msg):\n+ msg = 'Command listing for {}@{}\\n\\n'.format(bot.nick, bot.config.core.host) + msg\n+ payload = { \"content\": msg }\n+ headers = {'Content-type': 'application/json', 'Accept': 'application/json'}\n+ \n try:\n- result = requests.post('https://api.github.com/gists',\n- data=json.dumps(payload))\n+ result = requests.post('https://ptpb.pw/', json=payload, headers=headers)\n except requests.RequestException:\n bot.say(\"Sorry! Something went wrong.\")\n- logger.exception(\"Error posting commands gist\")\n- return\n- if not result.status_code != '201':\n- bot.say(\"Sorry! Something went wrong.\")\n- logger.error(\"Error %s posting commands gist: %s\",\n- result.status_code, result.text)\n+ logger.exception(\"Error posting commands\")\n return\n result = result.json()\n- if 'html_url' not in result:\n+ if 'url' not in result:\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Invalid result %s\", result)\n return\n- return result['html_url']\n+ return result['url']\n \n \n @rule('$nick' r'(?i)help(?:[?!]+)?$')\n", "issue": "Anonymous gists are going away. Help module will break.\nGitHub [announced](https://blog.github.com/2018-02-18-deprecation-notice-removing-anonymous-gist-creation/) last month that anonymous gist creation will be disabled on March 19, 2018 (UTC).\r\n\r\nThe `help` module was modified in fa89eb7 to essentially paste its output to a gist and simply return the link, and this functionality will break (as of writing) tomorrow. `.help` (or `.commands`) will return an error until (and unless) the module is updated to paste its contents elsewhere and/or create a static HTML file instead as I originally proposed in #1080.\r\n\r\n(As an aside, I've been meaning to implement that HTML mode for ages. Maybe this is the kick I needed to finally get off my arse and write it. We'll see.)\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nhelp.py - Sopel Help Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport textwrap\nimport collections\nimport json\n\nimport requests\n\nfrom sopel.logger import get_logger\nfrom sopel.module import commands, rule, example, priority\n\nlogger = get_logger(__name__)\n\n\n@rule('$nick' '(?i)(help|doc) +([A-Za-z]+)(?:\\?+)?$')\n@example('.help tell')\n@commands('help', 'commands')\n@priority('low')\ndef help(bot, trigger):\n \"\"\"Shows a command's documentation, and possibly an example.\"\"\"\n if trigger.group(2):\n name = trigger.group(2)\n name = name.lower()\n\n # number of lines of help to show\n threshold = 3\n\n if name in bot.doc:\n if len(bot.doc[name][0]) + (1 if bot.doc[name][1] else 0) > threshold:\n if trigger.nick != trigger.sender: # don't say that if asked in private\n bot.reply('The documentation for this command is too long; I\\'m sending it to you in a private message.')\n msgfun = lambda l: bot.msg(trigger.nick, l)\n else:\n msgfun = bot.reply\n\n for line in bot.doc[name][0]:\n msgfun(line)\n if bot.doc[name][1]:\n msgfun('e.g. ' + bot.doc[name][1])\n else:\n # This'll probably catch most cases, without having to spend the time\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in config, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n if 'command-gist' in bot.memory and bot.memory['command-gist'][0] == len(bot.command_groups):\n url = bot.memory['command-gist'][1]\n else:\n bot.say(\"Hang on, I'm creating a list.\")\n msgs = []\n\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n\n url = create_gist(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n bot.memory['command-gist'] = (len(bot.command_groups), url)\n bot.say(\"I've posted a list of my commands at {} - You can see \"\n \"more info about any of these commands by doing .help \"\n \"<command> (e.g. .help time)\".format(url))\n\n\ndef create_gist(bot, msg):\n payload = {\n 'description': 'Command listing for {}@{}'.format(bot.nick, bot.config.core.host),\n 'public': 'true',\n 'files': {\n 'commands.txt': {\n \"content\": msg,\n },\n },\n }\n try:\n result = requests.post('https://api.github.com/gists',\n data=json.dumps(payload))\n except requests.RequestException:\n bot.say(\"Sorry! Something went wrong.\")\n logger.exception(\"Error posting commands gist\")\n return\n if not result.status_code != '201':\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Error %s posting commands gist: %s\",\n result.status_code, result.text)\n return\n result = result.json()\n if 'html_url' not in result:\n bot.say(\"Sorry! Something went wrong.\")\n logger.error(\"Invalid result %s\", result)\n return\n return result['html_url']\n\n\n@rule('$nick' r'(?i)help(?:[?!]+)?$')\n@priority('low')\ndef help2(bot, trigger):\n response = (\n 'Hi, I\\'m a bot. Say \".commands\" to me in private for a list ' +\n 'of my commands, or see http://sopel.chat for more ' +\n 'general details. My owner is %s.'\n ) % bot.config.core.owner\n bot.reply(response)\n", "path": "sopel/modules/help.py"}]} | 2,003 | 942 |
gh_patches_debug_2545 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-5153 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wording: no formal adress regarding notice when comment is too long (missing String on Weblate)
**URL:** https://meinberlin-demo.liqd.net/budgeting/2023-00049/
**user:** any
**expected behaviour:** as a user on mein Berlin I want to be adressed in a formal way (Sie)
**behaviour:** When I write a comment which is too long, I get a notice with an informal adress that the textfield should not contain more than 4000 characters.
**important screensize:** -
**device & browser:** -
**Comment/Question:** there is no string on weblate yet, so I cannot translate it correctly
Screenshot?
<img width="725" alt="Bildschirmfoto 2023-02-13 um 10 27 18" src="https://user-images.githubusercontent.com/113608720/219613075-f384b1ad-4227-4ee8-b4fc-c166d9ba3fe4.png">
</issue>
<code>
[start of meinberlin/apps/contrib/django_standard_messages.py]
1 def _(s):
2 return s
3
4
5 django_standard_messages_to_override = [
6 _("You have signed out."),
7 _("Verify Your E-mail Address"),
8 _("You must type the same password each time."),
9 _("You have confirmed %(email)s."),
10 _("You cannot remove your primary e-mail address (%(email)s)."),
11 _(
12 "We have sent you an e-mail. Please contact us if "
13 "you do not receive it within a few minutes."
14 ),
15 _(
16 "We have sent an e-mail to you for verification. "
17 "Follow the link provided to finalize the signup process. "
18 "If you do not see the verification e-mail in your main inbox, "
19 "check your spam folder. "
20 "Please contact us if you do not receive the verification e-mail "
21 "within a few minutes."
22 ),
23 _(
24 "We have sent you an e-mail. If you have not received it "
25 "please check your spam folder. Otherwise contact us if you "
26 "do not receive it in a few minutes."
27 ),
28 _("You must select a minimum of %(limit_value)d choices."),
29 _("You must select a maximum of %(limit_value)d choices."),
30 _("Enter a valid email address."),
31 ]
32
[end of meinberlin/apps/contrib/django_standard_messages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py
--- a/meinberlin/apps/contrib/django_standard_messages.py
+++ b/meinberlin/apps/contrib/django_standard_messages.py
@@ -28,4 +28,5 @@
_("You must select a minimum of %(limit_value)d choices."),
_("You must select a maximum of %(limit_value)d choices."),
_("Enter a valid email address."),
+ _("Ensure this field has no more than {max_length} characters."),
]
| {"golden_diff": "diff --git a/meinberlin/apps/contrib/django_standard_messages.py b/meinberlin/apps/contrib/django_standard_messages.py\n--- a/meinberlin/apps/contrib/django_standard_messages.py\n+++ b/meinberlin/apps/contrib/django_standard_messages.py\n@@ -28,4 +28,5 @@\n _(\"You must select a minimum of %(limit_value)d choices.\"),\n _(\"You must select a maximum of %(limit_value)d choices.\"),\n _(\"Enter a valid email address.\"),\n+ _(\"Ensure this field has no more than {max_length} characters.\"),\n ]\n", "issue": "Wording: no formal adress regarding notice when comment is too long (missing String on Weblate)\n**URL:** https://meinberlin-demo.liqd.net/budgeting/2023-00049/\r\n**user:** any\r\n**expected behaviour:** as a user on mein Berlin I want to be adressed in a formal way (Sie) \r\n**behaviour:** When I write a comment which is too long, I get a notice with an informal adress that the textfield should not contain more than 4000 characters.\r\n**important screensize:** - \r\n**device & browser:** - \r\n**Comment/Question:** there is no string on weblate yet, so I cannot translate it correctly\r\n\r\nScreenshot?\r\n<img width=\"725\" alt=\"Bildschirmfoto 2023-02-13 um 10 27 18\" src=\"https://user-images.githubusercontent.com/113608720/219613075-f384b1ad-4227-4ee8-b4fc-c166d9ba3fe4.png\">\n", "before_files": [{"content": "def _(s):\n return s\n\n\ndjango_standard_messages_to_override = [\n _(\"You have signed out.\"),\n _(\"Verify Your E-mail Address\"),\n _(\"You must type the same password each time.\"),\n _(\"You have confirmed %(email)s.\"),\n _(\"You cannot remove your primary e-mail address (%(email)s).\"),\n _(\n \"We have sent you an e-mail. Please contact us if \"\n \"you do not receive it within a few minutes.\"\n ),\n _(\n \"We have sent an e-mail to you for verification. \"\n \"Follow the link provided to finalize the signup process. \"\n \"If you do not see the verification e-mail in your main inbox, \"\n \"check your spam folder. \"\n \"Please contact us if you do not receive the verification e-mail \"\n \"within a few minutes.\"\n ),\n _(\n \"We have sent you an e-mail. If you have not received it \"\n \"please check your spam folder. Otherwise contact us if you \"\n \"do not receive it in a few minutes.\"\n ),\n _(\"You must select a minimum of %(limit_value)d choices.\"),\n _(\"You must select a maximum of %(limit_value)d choices.\"),\n _(\"Enter a valid email address.\"),\n]\n", "path": "meinberlin/apps/contrib/django_standard_messages.py"}]} | 1,117 | 128 |
gh_patches_debug_256 | rasdani/github-patches | git_diff | jazzband__pip-tools-28 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip-review should compare version, not test equality
```
$ pip-review
pelican==3.0.1 is available (you have 3.1)
```
I'm locally testing this package, and `pip-review` will just test if current installed version is the same as the latest version in `pip`. Which causes problem as shown above.
</issue>
<code>
[start of setup.py]
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 import sys
5 from setuptools import setup
6
7
8 def get_dependencies():
9 deps = []
10 if sys.version_info < (2, 7):
11 deps += ['argparse']
12 return deps
13
14
15 setup(
16 name='pip-tools',
17 version='0.2.1',
18 url='https://github.com/nvie/pip-tools/',
19 license='BSD',
20 author='Vincent Driessen',
21 author_email='[email protected]',
22 description=__doc__.strip('\n'),
23 #packages=[],
24 scripts=['bin/pip-review', 'bin/pip-dump'],
25 #include_package_data=True,
26 zip_safe=False,
27 platforms='any',
28 install_requires=get_dependencies(),
29 classifiers=[
30 # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
31 #'Development Status :: 1 - Planning',
32 #'Development Status :: 2 - Pre-Alpha',
33 #'Development Status :: 3 - Alpha',
34 'Development Status :: 4 - Beta',
35 #'Development Status :: 5 - Production/Stable',
36 #'Development Status :: 6 - Mature',
37 #'Development Status :: 7 - Inactive',
38 'Programming Language :: Python',
39 'Programming Language :: Python :: 2',
40 #'Programming Language :: Python :: 2.3',
41 #'Programming Language :: Python :: 2.4',
42 #'Programming Language :: Python :: 2.5',
43 'Programming Language :: Python :: 2.6',
44 'Programming Language :: Python :: 2.7',
45 #'Programming Language :: Python :: 3',
46 #'Programming Language :: Python :: 3.0',
47 #'Programming Language :: Python :: 3.1',
48 #'Programming Language :: Python :: 3.2',
49 #'Programming Language :: Python :: 3.3',
50 'Intended Audience :: Developers',
51 'Intended Audience :: System Administrators',
52 'License :: OSI Approved :: BSD License',
53 'Operating System :: OS Independent',
54 'Topic :: System :: Systems Administration',
55 ]
56 )
57
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
def get_dependencies():
- deps = []
+ deps = ['verlib']
if sys.version_info < (2, 7):
deps += ['argparse']
return deps
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,7 +6,7 @@\n \n \n def get_dependencies():\n- deps = []\n+ deps = ['verlib']\n if sys.version_info < (2, 7):\n deps += ['argparse']\n return deps\n", "issue": "pip-review should compare version, not test equality\n```\n$ pip-review\npelican==3.0.1 is available (you have 3.1)\n```\n\nI'm locally testing this package, and `pip-review` will just test if current installed version is the same as the latest version in `pip`. Which causes problem as shown above.\n\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\ndef get_dependencies():\n deps = []\n if sys.version_info < (2, 7):\n deps += ['argparse']\n return deps\n\n\nsetup(\n name='pip-tools',\n version='0.2.1',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__.strip('\\n'),\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=get_dependencies(),\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n #'Programming Language :: Python :: 2.3',\n #'Programming Language :: Python :: 2.4',\n #'Programming Language :: Python :: 2.5',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n #'Programming Language :: Python :: 3',\n #'Programming Language :: Python :: 3.0',\n #'Programming Language :: Python :: 3.1',\n #'Programming Language :: Python :: 3.2',\n #'Programming Language :: Python :: 3.3',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]} | 1,151 | 72 |
gh_patches_debug_4088 | rasdani/github-patches | git_diff | plotly__plotly.py-2015 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when using two image scrappers together
### Introduction to the problem
I am trying to move the current Jupyter Notebook examples of the [poliastro project](https://github.com/poliastro/poliastro) to an [Sphinx-Gallery](https://github.com/sphinx-gallery/sphinx-gallery) set. Since we are making use of **plotly figures** we need to **capture them** as output figures and therefore, make use of the **plotly image scrapper**. We also need to capture `matplotlib` figures, so this image scrapper must be also added to the `conf.py` file.
#### How to reproduce this issue :beetle:
If you download the [official example repository](https://github.com/plotly/plotly-sphinx-gallery) from @emmanuelle for achieving this task and you add the following [simple Python file](https://gist.github.com/jorgepiloto/db807a7ee3a0bcfbaea38fc9cd7ac95e) in the `examples/` directory for plotting a sinusoidal wave with `matplotlib` and error is raised:
```bash
generating gallery...
generating gallery for auto_examples... [ 25%] plot_sin.py
Exception occurred:
File "/home/lobo/anaconda3/envs/poliastro/lib/python3.7/site-packages/plotly/io/_sg_scraper.py", line 91, in figure_rst
figure_name = figure_paths[0]
IndexError: list index out of range
```
</issue>
<code>
[start of packages/python/plotly/plotly/io/_sg_scraper.py]
1 # This module defines an image scraper for sphinx-gallery
2 # https://sphinx-gallery.github.io/
3 # which can be used by projects using plotly in their documentation.
4 import inspect, os
5
6 import plotly
7 from glob import glob
8 import shutil
9
10 plotly.io.renderers.default = "sphinx_gallery"
11
12
13 def plotly_sg_scraper(block, block_vars, gallery_conf, **kwargs):
14 """Scrape Plotly figures for galleries of examples using
15 sphinx-gallery.
16
17 Examples should use ``plotly.io.show()`` to display the figure with
18 the custom sphinx_gallery renderer.
19
20 Since the sphinx_gallery renderer generates both html and static png
21 files, we simply crawl these files and give them the appropriate path.
22
23 Parameters
24 ----------
25 block : tuple
26 A tuple containing the (label, content, line_number) of the block.
27 block_vars : dict
28 Dict of block variables.
29 gallery_conf : dict
30 Contains the configuration of Sphinx-Gallery
31 **kwargs : dict
32 Additional keyword arguments to pass to
33 :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.
34 The ``format`` kwarg in particular is used to set the file extension
35 of the output file (currently only 'png' and 'svg' are supported).
36
37 Returns
38 -------
39 rst : str
40 The ReSTructuredText that will be rendered to HTML containing
41 the images.
42
43 Notes
44 -----
45 Add this function to the image scrapers
46 """
47 examples_dirs = gallery_conf["examples_dirs"]
48 if isinstance(examples_dirs, (list, tuple)):
49 examples_dirs = examples_dirs[0]
50 pngs = sorted(glob(os.path.join(examples_dirs, "*.png")))
51 htmls = sorted(glob(os.path.join(examples_dirs, "*.html")))
52 image_path_iterator = block_vars["image_path_iterator"]
53 image_names = list()
54 seen = set()
55 for html, png in zip(htmls, pngs):
56 if png not in seen:
57 seen |= set(png)
58 this_image_path_png = next(image_path_iterator)
59 this_image_path_html = os.path.splitext(this_image_path_png)[0] + ".html"
60 image_names.append(this_image_path_html)
61 shutil.move(png, this_image_path_png)
62 shutil.move(html, this_image_path_html)
63 # Use the `figure_rst` helper function to generate rST for image files
64 return figure_rst(image_names, gallery_conf["src_dir"])
65
66
67 def figure_rst(figure_list, sources_dir):
68 """Generate RST for a list of PNG filenames.
69
70 Depending on whether we have one or more figures, we use a
71 single rst call to 'image' or a horizontal list.
72
73 Parameters
74 ----------
75 figure_list : list
76 List of strings of the figures' absolute paths.
77 sources_dir : str
78 absolute path of Sphinx documentation sources
79
80 Returns
81 -------
82 images_rst : str
83 rst code to embed the images in the document
84 """
85
86 figure_paths = [
87 os.path.relpath(figure_path, sources_dir).replace(os.sep, "/").lstrip("/")
88 for figure_path in figure_list
89 ]
90 images_rst = ""
91 figure_name = figure_paths[0]
92 ext = os.path.splitext(figure_name)[1]
93 figure_path = os.path.join("images", os.path.basename(figure_name))
94 images_rst = SINGLE_HTML % figure_path
95 return images_rst
96
97
98 SINGLE_HTML = """
99 .. raw:: html
100 :file: %s
101 """
102
[end of packages/python/plotly/plotly/io/_sg_scraper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/python/plotly/plotly/io/_sg_scraper.py b/packages/python/plotly/plotly/io/_sg_scraper.py
--- a/packages/python/plotly/plotly/io/_sg_scraper.py
+++ b/packages/python/plotly/plotly/io/_sg_scraper.py
@@ -88,6 +88,8 @@
for figure_path in figure_list
]
images_rst = ""
+ if not figure_paths:
+ return images_rst
figure_name = figure_paths[0]
ext = os.path.splitext(figure_name)[1]
figure_path = os.path.join("images", os.path.basename(figure_name))
| {"golden_diff": "diff --git a/packages/python/plotly/plotly/io/_sg_scraper.py b/packages/python/plotly/plotly/io/_sg_scraper.py\n--- a/packages/python/plotly/plotly/io/_sg_scraper.py\n+++ b/packages/python/plotly/plotly/io/_sg_scraper.py\n@@ -88,6 +88,8 @@\n for figure_path in figure_list\n ]\n images_rst = \"\"\n+ if not figure_paths:\n+ return images_rst\n figure_name = figure_paths[0]\n ext = os.path.splitext(figure_name)[1]\n figure_path = os.path.join(\"images\", os.path.basename(figure_name))\n", "issue": "Error when using two image scrappers together\n### Introduction to the problem \r\nI am trying to move the current Jupyter Notebook examples of the [poliastro project](https://github.com/poliastro/poliastro) to an [Sphinx-Gallery](https://github.com/sphinx-gallery/sphinx-gallery) set. Since we are making use of **plotly figures** we need to **capture them** as output figures and therefore, make use of the **plotly image scrapper**. We also need to capture `matplotlib` figures, so this image scrapper must be also added to the `conf.py` file.\r\n\r\n#### How to reproduce this issue :beetle: \r\n\r\nIf you download the [official example repository](https://github.com/plotly/plotly-sphinx-gallery) from @emmanuelle for achieving this task and you add the following [simple Python file](https://gist.github.com/jorgepiloto/db807a7ee3a0bcfbaea38fc9cd7ac95e) in the `examples/` directory for plotting a sinusoidal wave with `matplotlib` and error is raised:\r\n\r\n```bash\r\ngenerating gallery...\r\ngenerating gallery for auto_examples... [ 25%] plot_sin.py \r\nException occurred:\r\n File \"/home/lobo/anaconda3/envs/poliastro/lib/python3.7/site-packages/plotly/io/_sg_scraper.py\", line 91, in figure_rst\r\n figure_name = figure_paths[0]\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "# This module defines an image scraper for sphinx-gallery\n# https://sphinx-gallery.github.io/\n# which can be used by projects using plotly in their documentation.\nimport inspect, os\n\nimport plotly\nfrom glob import glob\nimport shutil\n\nplotly.io.renderers.default = \"sphinx_gallery\"\n\n\ndef plotly_sg_scraper(block, block_vars, gallery_conf, **kwargs):\n \"\"\"Scrape Plotly figures for galleries of examples using\n sphinx-gallery.\n\n Examples should use ``plotly.io.show()`` to display the figure with\n the custom sphinx_gallery renderer.\n\n Since the sphinx_gallery renderer generates both html and static png\n files, we simply crawl these files and give them the appropriate path.\n\n Parameters\n ----------\n block : tuple\n A tuple containing the (label, content, line_number) of the block.\n block_vars : dict\n Dict of block variables.\n gallery_conf : dict\n Contains the configuration of Sphinx-Gallery\n **kwargs : dict\n Additional keyword arguments to pass to\n :meth:`~matplotlib.figure.Figure.savefig`, e.g. ``format='svg'``.\n The ``format`` kwarg in particular is used to set the file extension\n of the output file (currently only 'png' and 'svg' are supported).\n\n Returns\n -------\n rst : str\n The ReSTructuredText that will be rendered to HTML containing\n the images.\n\n Notes\n -----\n Add this function to the image scrapers \n \"\"\"\n examples_dirs = gallery_conf[\"examples_dirs\"]\n if isinstance(examples_dirs, (list, tuple)):\n examples_dirs = examples_dirs[0]\n pngs = sorted(glob(os.path.join(examples_dirs, \"*.png\")))\n htmls = sorted(glob(os.path.join(examples_dirs, \"*.html\")))\n image_path_iterator = block_vars[\"image_path_iterator\"]\n image_names = list()\n seen = set()\n for html, png in zip(htmls, pngs):\n if png not in seen:\n seen |= set(png)\n this_image_path_png = next(image_path_iterator)\n this_image_path_html = os.path.splitext(this_image_path_png)[0] + \".html\"\n image_names.append(this_image_path_html)\n shutil.move(png, this_image_path_png)\n shutil.move(html, this_image_path_html)\n # Use the `figure_rst` helper function to generate rST for image files\n return figure_rst(image_names, gallery_conf[\"src_dir\"])\n\n\ndef figure_rst(figure_list, sources_dir):\n \"\"\"Generate RST for a list of PNG filenames.\n\n Depending on whether we have one or more figures, we use a\n single rst call to 'image' or a horizontal list.\n\n Parameters\n ----------\n figure_list : list\n List of strings of the figures' absolute paths.\n sources_dir : str\n absolute path of Sphinx documentation sources\n\n Returns\n -------\n images_rst : str\n rst code to embed the images in the document\n \"\"\"\n\n figure_paths = [\n os.path.relpath(figure_path, sources_dir).replace(os.sep, \"/\").lstrip(\"/\")\n for figure_path in figure_list\n ]\n images_rst = \"\"\n figure_name = figure_paths[0]\n ext = os.path.splitext(figure_name)[1]\n figure_path = os.path.join(\"images\", os.path.basename(figure_name))\n images_rst = SINGLE_HTML % figure_path\n return images_rst\n\n\nSINGLE_HTML = \"\"\"\n.. raw:: html\n :file: %s\n\"\"\"\n", "path": "packages/python/plotly/plotly/io/_sg_scraper.py"}]} | 1,858 | 149 |
gh_patches_debug_3006 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1629 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.10 compatibility issue
#### Environment details
- OS type and version: Windows 10
- Python version: `python --version` 3.10.1
- pip version: `pip --version` 21.2.4
- `google-api-python-client` version: `pip show google-api-python-client` - 2.33.0
uritemplate package 3.0.0 is not compatible with python 3.10. Need to update the requirements.
Partial Stack Trace
service = build('gmail', 'v1', credentials=creds)
File "C:\JA\Envs\GIC\lib\site-packages\googleapiclient\_helpers.py", line 130, in positional_wrapper
return wrapped(*args, **kwargs)
File "C:\JA\Envs\GIC\lib\site-packages\googleapiclient\discovery.py", line 219, in build
requested_url = uritemplate.expand(discovery_url, params)
File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\api.py", line 33, in expand
return URITemplate(uri).expand(var_dict, **kwargs)
File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\template.py", line 132, in expand
return self._expand(_merge(var_dict, kwargs), False)
File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\template.py", line 97, in _expand
expanded.update(v.expand(expansion))
File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\variable.py", line 338, in expand
expanded = expansion(name, value, opts['explode'], opts['prefix'])
File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\variable.py", line 278, in _string_expansion
if dict_test(value) or tuples:
File "C:\JA\Envs\GIC\lib\site-packages\uritemplate\variable.py", line 363, in dict_test
return isinstance(value, (dict, collections.MutableMapping))
AttributeError: module 'collections' has no attribute 'MutableMapping'
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (3, 6):
25 print("google-api-python-client requires python3 version >= 3.6.", file=sys.stderr)
26 sys.exit(1)
27
28 import io
29 import os
30 from setuptools import setup
31
32 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
33
34 install_requires = [
35 "httplib2>=0.15.0,<1dev",
36 # NOTE: Maintainers, please do not require google-auth>=2.x.x
37 # Until this issue is closed
38 # https://github.com/googleapis/google-cloud-python/issues/10566
39 "google-auth>=1.16.0,<3.0.0dev",
40 "google-auth-httplib2>=0.1.0",
41 # NOTE: Maintainers, please do not require google-api-core>=2.x.x
42 # Until this issue is closed
43 # https://github.com/googleapis/google-cloud-python/issues/10566
44 "google-api-core>=1.21.0,<3.0.0dev",
45 "uritemplate>=3.0.0,<5",
46 ]
47
48 package_root = os.path.abspath(os.path.dirname(__file__))
49
50 readme_filename = os.path.join(package_root, "README.md")
51 with io.open(readme_filename, encoding="utf-8") as readme_file:
52 readme = readme_file.read()
53
54 package_root = os.path.abspath(os.path.dirname(__file__))
55
56 version = {}
57 with open(os.path.join(package_root, "googleapiclient/version.py")) as fp:
58 exec(fp.read(), version)
59 version = version["__version__"]
60
61 setup(
62 name="google-api-python-client",
63 version=version,
64 description="Google API Client Library for Python",
65 long_description=readme,
66 long_description_content_type='text/markdown',
67 author="Google LLC",
68 author_email="[email protected]",
69 url="https://github.com/googleapis/google-api-python-client/",
70 install_requires=install_requires,
71 python_requires=">=3.6",
72 packages=packages,
73 package_data={"googleapiclient": ["discovery_cache/documents/*.json"]},
74 license="Apache 2.0",
75 keywords="google api client",
76 classifiers=[
77 "Programming Language :: Python :: 3",
78 "Programming Language :: Python :: 3.6",
79 "Programming Language :: Python :: 3.7",
80 "Programming Language :: Python :: 3.8",
81 "Programming Language :: Python :: 3.9",
82 "Programming Language :: Python :: 3.10",
83 "Development Status :: 5 - Production/Stable",
84 "Intended Audience :: Developers",
85 "License :: OSI Approved :: Apache Software License",
86 "Operating System :: OS Independent",
87 "Topic :: Internet :: WWW/HTTP",
88 ],
89 )
90
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
"google-api-core>=1.21.0,<3.0.0dev",
- "uritemplate>=3.0.0,<5",
+ "uritemplate>=3.0.1,<5",
]
package_root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core>=1.21.0,<3.0.0dev\",\n- \"uritemplate>=3.0.0,<5\",\n+ \"uritemplate>=3.0.1,<5\",\n ]\n \n package_root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "Python 3.10 compatibility issue\n\r\n\r\n#### Environment details\r\n\r\n - OS type and version: Windows 10\r\n - Python version: `python --version` 3.10.1\r\n - pip version: `pip --version` 21.2.4\r\n - `google-api-python-client` version: `pip show google-api-python-client` - 2.33.0\r\n\r\nuritemplate package 3.0.0 is not compatible with python 3.10. Need to update the requirements.\r\n\r\nPartial Stack Trace\r\n\r\nservice = build('gmail', 'v1', credentials=creds)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\googleapiclient\\_helpers.py\", line 130, in positional_wrapper\r\n return wrapped(*args, **kwargs)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\googleapiclient\\discovery.py\", line 219, in build\r\n requested_url = uritemplate.expand(discovery_url, params)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\api.py\", line 33, in expand\r\n return URITemplate(uri).expand(var_dict, **kwargs)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\template.py\", line 132, in expand\r\n return self._expand(_merge(var_dict, kwargs), False)\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\template.py\", line 97, in _expand\r\n expanded.update(v.expand(expansion))\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\variable.py\", line 338, in expand\r\n expanded = expansion(name, value, opts['explode'], opts['prefix'])\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\variable.py\", line 278, in _string_expansion\r\n if dict_test(value) or tuples:\r\n File \"C:\\JA\\Envs\\GIC\\lib\\site-packages\\uritemplate\\variable.py\", line 363, in dict_test\r\n return isinstance(value, (dict, collections.MutableMapping))\r\nAttributeError: module 'collections' has no attribute 'MutableMapping'\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-auth>=1.16.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core>=1.21.0,<3.0.0dev\",\n \"uritemplate>=3.0.0,<5\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"googleapiclient/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 2,014 | 124 |
gh_patches_debug_21659 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4721 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adopting a projected as gold user fails
### How to reproduce it
1. sign up as Gold member
1. go to https://readthedocs.org/accounts/gold/subscription/
1. select the project that you want to adopt
### Expected Result
Adopts the project.
### Actual Result
Fails with a 500.
https://sentry.io/read-the-docs/readthedocs-org/issues/587668658/
### The problem
This line
https://github.com/rtfd/readthedocs.org/blob/44e02def230b937e4eca396864de9fc81f4ef33f/readthedocs/gold/views.py#L109
cause the problem since we are receiving a "project name" and using it as "project slug".
</issue>
<code>
[start of readthedocs/gold/forms.py]
1 """Gold subscription forms."""
2
3 from __future__ import absolute_import
4
5 from builtins import object
6 from django import forms
7
8 from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin
9
10 from .models import LEVEL_CHOICES, GoldUser
11
12
13 class GoldSubscriptionForm(StripeResourceMixin, StripeModelForm):
14
15 """
16 Gold subscription payment form.
17
18 This extends the common base form for handling Stripe subscriptions. Credit
19 card fields for card number, expiry, and CVV are extended from
20 :py:class:`StripeModelForm`, with additional methods from
21 :py:class:`StripeResourceMixin` for common operations against the Stripe API.
22 """
23
24 class Meta(object):
25 model = GoldUser
26 fields = ['last_4_card_digits', 'level']
27
28 last_4_card_digits = forms.CharField(
29 required=True,
30 min_length=4,
31 max_length=4,
32 widget=forms.HiddenInput(attrs={
33 'data-bind': 'valueInit: last_4_card_digits, value: last_4_card_digits',
34 })
35 )
36
37 level = forms.ChoiceField(
38 required=True,
39 choices=LEVEL_CHOICES,
40 )
41
42 def clean(self):
43 self.instance.user = self.customer
44 return super(GoldSubscriptionForm, self).clean()
45
46 def validate_stripe(self):
47 subscription = self.get_subscription()
48 self.instance.stripe_id = subscription.customer
49 self.instance.subscribed = True
50
51 def get_customer_kwargs(self):
52 return {
53 'description': self.customer.get_full_name() or self.customer.username,
54 'email': self.customer.email,
55 'id': self.instance.stripe_id or None
56 }
57
58 def get_subscription(self):
59 customer = self.get_customer()
60
61 # TODO get the first subscription more intelligently
62 subscriptions = customer.subscriptions.all(limit=5)
63 if subscriptions.data:
64 # Update an existing subscription - Stripe prorates by default
65 subscription = subscriptions.data[0]
66 subscription.plan = self.cleaned_data['level']
67 if 'stripe_token' in self.cleaned_data and self.cleaned_data['stripe_token']:
68 # Optionally update the card
69 subscription.source = self.cleaned_data['stripe_token']
70 subscription.save()
71 else:
72 # Add a new subscription
73 subscription = customer.subscriptions.create(
74 plan=self.cleaned_data['level'],
75 source=self.cleaned_data['stripe_token']
76 )
77
78 return subscription
79
80
81 class GoldProjectForm(forms.Form):
82 project = forms.CharField(
83 required=True,
84 )
85
86 def __init__(self, *args, **kwargs):
87 self.user = kwargs.pop('user', None)
88 self.projects = kwargs.pop('projects', None)
89 super(GoldProjectForm, self).__init__(*args, **kwargs)
90
91 def clean(self):
92 cleaned_data = super(GoldProjectForm, self).clean()
93 if self.projects.count() < self.user.num_supported_projects:
94 return cleaned_data
95
96 self.add_error(None, 'You already have the max number of supported projects.')
97
[end of readthedocs/gold/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/gold/forms.py b/readthedocs/gold/forms.py
--- a/readthedocs/gold/forms.py
+++ b/readthedocs/gold/forms.py
@@ -5,7 +5,10 @@
from builtins import object
from django import forms
+from django.utils.translation import ugettext_lazy as _
+
from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin
+from readthedocs.projects.models import Project
from .models import LEVEL_CHOICES, GoldUser
@@ -88,6 +91,14 @@
self.projects = kwargs.pop('projects', None)
super(GoldProjectForm, self).__init__(*args, **kwargs)
+ def clean_project(self):
+ project_slug = self.cleaned_data.get('project', '')
+ project_instance = Project.objects.filter(slug=project_slug)
+ if not project_instance.exists():
+ raise forms.ValidationError(_('No project found.'))
+ else:
+ return project_slug
+
def clean(self):
cleaned_data = super(GoldProjectForm, self).clean()
if self.projects.count() < self.user.num_supported_projects:
| {"golden_diff": "diff --git a/readthedocs/gold/forms.py b/readthedocs/gold/forms.py\n--- a/readthedocs/gold/forms.py\n+++ b/readthedocs/gold/forms.py\n@@ -5,7 +5,10 @@\n from builtins import object\n from django import forms\n \n+from django.utils.translation import ugettext_lazy as _\n+\n from readthedocs.payments.forms import StripeModelForm, StripeResourceMixin\n+from readthedocs.projects.models import Project\n \n from .models import LEVEL_CHOICES, GoldUser\n \n@@ -88,6 +91,14 @@\n self.projects = kwargs.pop('projects', None)\n super(GoldProjectForm, self).__init__(*args, **kwargs)\n \n+ def clean_project(self):\n+ project_slug = self.cleaned_data.get('project', '')\n+ project_instance = Project.objects.filter(slug=project_slug)\n+ if not project_instance.exists():\n+ raise forms.ValidationError(_('No project found.'))\n+ else:\n+ return project_slug\n+\n def clean(self):\n cleaned_data = super(GoldProjectForm, self).clean()\n if self.projects.count() < self.user.num_supported_projects:\n", "issue": "Adopting a projected as gold user fails\n### How to reproduce it\r\n\r\n1. sign up as Gold member\r\n1. go to https://readthedocs.org/accounts/gold/subscription/\r\n1. select the project that you want to adopt\r\n\r\n### Expected Result\r\n\r\nAdopts the project.\r\n\r\n### Actual Result\r\n\r\nFails with a 500.\r\n\r\nhttps://sentry.io/read-the-docs/readthedocs-org/issues/587668658/\r\n\r\n### The problem\r\n\r\nThis line\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/44e02def230b937e4eca396864de9fc81f4ef33f/readthedocs/gold/views.py#L109\r\n\r\ncause the problem since we are receiving a \"project name\" and using it as \"project slug\".\n", "before_files": [{"content": "\"\"\"Gold subscription forms.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom builtins import object\nfrom django import forms\n\nfrom readthedocs.payments.forms import StripeModelForm, StripeResourceMixin\n\nfrom .models import LEVEL_CHOICES, GoldUser\n\n\nclass GoldSubscriptionForm(StripeResourceMixin, StripeModelForm):\n\n \"\"\"\n Gold subscription payment form.\n\n This extends the common base form for handling Stripe subscriptions. Credit\n card fields for card number, expiry, and CVV are extended from\n :py:class:`StripeModelForm`, with additional methods from\n :py:class:`StripeResourceMixin` for common operations against the Stripe API.\n \"\"\"\n\n class Meta(object):\n model = GoldUser\n fields = ['last_4_card_digits', 'level']\n\n last_4_card_digits = forms.CharField(\n required=True,\n min_length=4,\n max_length=4,\n widget=forms.HiddenInput(attrs={\n 'data-bind': 'valueInit: last_4_card_digits, value: last_4_card_digits',\n })\n )\n\n level = forms.ChoiceField(\n required=True,\n choices=LEVEL_CHOICES,\n )\n\n def clean(self):\n self.instance.user = self.customer\n return super(GoldSubscriptionForm, self).clean()\n\n def validate_stripe(self):\n subscription = self.get_subscription()\n self.instance.stripe_id = subscription.customer\n self.instance.subscribed = True\n\n def get_customer_kwargs(self):\n return {\n 'description': self.customer.get_full_name() or self.customer.username,\n 'email': self.customer.email,\n 'id': self.instance.stripe_id or None\n }\n\n def get_subscription(self):\n customer = self.get_customer()\n\n # TODO get the first subscription more intelligently\n subscriptions = customer.subscriptions.all(limit=5)\n if subscriptions.data:\n # Update an existing subscription - Stripe prorates by default\n subscription = subscriptions.data[0]\n subscription.plan = self.cleaned_data['level']\n if 'stripe_token' in self.cleaned_data and self.cleaned_data['stripe_token']:\n # Optionally update the card\n subscription.source = self.cleaned_data['stripe_token']\n subscription.save()\n else:\n # Add a new subscription\n subscription = customer.subscriptions.create(\n plan=self.cleaned_data['level'],\n source=self.cleaned_data['stripe_token']\n )\n\n return subscription\n\n\nclass GoldProjectForm(forms.Form):\n project = forms.CharField(\n required=True,\n )\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n self.projects = kwargs.pop('projects', None)\n super(GoldProjectForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n cleaned_data = super(GoldProjectForm, self).clean()\n if self.projects.count() < self.user.num_supported_projects:\n return cleaned_data\n\n self.add_error(None, 'You already have the max number of supported projects.')\n", "path": "readthedocs/gold/forms.py"}]} | 1,549 | 247 |
gh_patches_debug_4043 | rasdani/github-patches | git_diff | hylang__hy-139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Quote doesn't return valid lists
```
=> (car (quote [if 1 2 3]))
u'if'
=> (cdr (quote [if 1 2 3]))
[1, 2, 3]
```
=> OK
```
=> (car (quote (if 1 2 3)))
u'_hy_hoisted_fn_1'
=> (car (car (quote (if 1 2 3))))
u'_'
=> (cdr (quote (if 1 2 3)))
[]
```
=> Not ok
</issue>
<code>
[start of hy/core/mangles.py]
1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from hy.models.expression import HyExpression
22 from hy.models.symbol import HySymbol
23 from hy.models.list import HyList
24
25 import hy.mangle
26
27
28 class HoistableMangle(hy.mangle.Mangle):
29 def should_hoist(self):
30 for frame in self.stack:
31 if frame is self.scope:
32 return False
33
34 if isinstance(frame, HyExpression) and frame != []:
35 call = frame[0]
36 if call in self.ignore:
37 continue
38 return True
39 return False
40
41
42 class FunctionMangle(HoistableMangle):
43 hoistable = ["fn"]
44 ignore = ["def", "decorate_with", "setf", "setv", "foreach", "do"]
45
46 def __init__(self):
47 self.series = 0
48
49 def unique_name(self):
50 self.series += 1
51 return "_hy_hoisted_fn_%s" % (self.series)
52
53 def visit(self, tree):
54 if isinstance(tree, HyExpression) and tree != []:
55 call = tree[0]
56 if call == "fn" and self.should_hoist():
57 new_name = HySymbol(self.unique_name())
58 new_name.replace(tree)
59 fn_def = HyExpression([HySymbol("def"),
60 new_name,
61 tree])
62 fn_def.replace(tree)
63 self.hoist(fn_def)
64 return new_name
65
66
67 class IfMangle(HoistableMangle):
68 ignore = ["foreach", "do"]
69
70 def __init__(self):
71 self.series = 0
72
73 def visit(self, tree):
74 if isinstance(tree, HyExpression) and tree != []:
75 call = tree[0]
76 if call == "if" and self.should_hoist():
77 fn = HyExpression([HyExpression([HySymbol("fn"),
78 HyList([]),
79 tree])])
80 fn.replace(tree)
81 return fn
82
83
84 hy.mangle.MANGLES.append(IfMangle)
85 hy.mangle.MANGLES.append(FunctionMangle)
86
[end of hy/core/mangles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/core/mangles.py b/hy/core/mangles.py
--- a/hy/core/mangles.py
+++ b/hy/core/mangles.py
@@ -27,6 +27,11 @@
class HoistableMangle(hy.mangle.Mangle):
def should_hoist(self):
+ for frame in self.stack:
+ if (isinstance(frame, HyExpression) and
+ frame and frame[0] == "quote"):
+ return False
+
for frame in self.stack:
if frame is self.scope:
return False
| {"golden_diff": "diff --git a/hy/core/mangles.py b/hy/core/mangles.py\n--- a/hy/core/mangles.py\n+++ b/hy/core/mangles.py\n@@ -27,6 +27,11 @@\n \n class HoistableMangle(hy.mangle.Mangle):\n def should_hoist(self):\n+ for frame in self.stack:\n+ if (isinstance(frame, HyExpression) and\n+ frame and frame[0] == \"quote\"):\n+ return False\n+\n for frame in self.stack:\n if frame is self.scope:\n return False\n", "issue": "Quote doesn't return valid lists\n```\n=> (car (quote [if 1 2 3]))\nu'if'\n=> (cdr (quote [if 1 2 3]))\n[1, 2, 3]\n```\n\n=> OK\n\n```\n=> (car (quote (if 1 2 3)))\nu'_hy_hoisted_fn_1'\n=> (car (car (quote (if 1 2 3))))\nu'_'\n=> (cdr (quote (if 1 2 3)))\n[]\n```\n\n=> Not ok\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\n\nimport hy.mangle\n\n\nclass HoistableMangle(hy.mangle.Mangle):\n def should_hoist(self):\n for frame in self.stack:\n if frame is self.scope:\n return False\n\n if isinstance(frame, HyExpression) and frame != []:\n call = frame[0]\n if call in self.ignore:\n continue\n return True\n return False\n\n\nclass FunctionMangle(HoistableMangle):\n hoistable = [\"fn\"]\n ignore = [\"def\", \"decorate_with\", \"setf\", \"setv\", \"foreach\", \"do\"]\n\n def __init__(self):\n self.series = 0\n\n def unique_name(self):\n self.series += 1\n return \"_hy_hoisted_fn_%s\" % (self.series)\n\n def visit(self, tree):\n if isinstance(tree, HyExpression) and tree != []:\n call = tree[0]\n if call == \"fn\" and self.should_hoist():\n new_name = HySymbol(self.unique_name())\n new_name.replace(tree)\n fn_def = HyExpression([HySymbol(\"def\"),\n new_name,\n tree])\n fn_def.replace(tree)\n self.hoist(fn_def)\n return new_name\n\n\nclass IfMangle(HoistableMangle):\n ignore = [\"foreach\", \"do\"]\n\n def __init__(self):\n self.series = 0\n\n def visit(self, tree):\n if isinstance(tree, HyExpression) and tree != []:\n call = tree[0]\n if call == \"if\" and self.should_hoist():\n fn = HyExpression([HyExpression([HySymbol(\"fn\"),\n HyList([]),\n tree])])\n fn.replace(tree)\n return fn\n\n\nhy.mangle.MANGLES.append(IfMangle)\nhy.mangle.MANGLES.append(FunctionMangle)\n", "path": "hy/core/mangles.py"}]} | 1,491 | 129 |
gh_patches_debug_6693 | rasdani/github-patches | git_diff | sosreport__sos-3342 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[hpssm] controller collection misses Gen10+ controllers and above slot 9.
1. Gen10+ controllers changed the naming from **Smart Array** for at least some controllers.
* `HPE SR932i-p Gen10+ in Slot 3`
2. Controllers are showing up above slot 9 even when there is only 1 or two total controllers.
* `HPE Smart Array P816i-a SR Gen10 in Slot 12`
This system had no controller in slot 0, a new naming style in slot 3, and the old naming style in slot 12.
The `ssacli ctrl all show status` still lists them, and `ssacli ctrl all show config detail` still gets the config details of each. The current pattern fails to identify the slots in both cases, leading to not executing individual slot based commands or executing on the wrong slot.
</issue>
<code>
[start of sos/report/plugins/hpssm.py]
1 # This file is part of the sos project: https://github.com/sosreport/sos
2 #
3 # This copyrighted material is made available to anyone wishing to use,
4 # modify, copy, or redistribute it subject to the terms and conditions of
5 # version 2 of the GNU General Public License.
6 #
7 # See the LICENSE file in the source distribution for further information.
8
9 from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt
10 import re
11
12
13 class Hpssm(Plugin, IndependentPlugin):
14 """
15 This plugin will capture details for each controller from Smart Storage
16 Array Administrator, an Array diagnostic report from Smart Storage
17 Administrator Diagnostics Utility and, when the plugins debug option is
18 enabled will gather the Active Health System log via the RESTful Interface
19 Tool (iLOREST).
20 """
21 short_desc = 'HP Smart Storage Management'
22
23 plugin_name = 'hpssm'
24 profiles = ('system', 'storage', 'hardware',)
25 packages = ('ilorest', 'ssacli', 'ssaducli',)
26
27 option_list = [
28 PluginOpt('debug', default=False, desc='capture debug data')
29 ]
30
31 def setup(self):
32 cmd = 'ssacli'
33 subcmds = [
34 'ctrl all show status'
35 ]
36 slot_subcmds = [
37 'array all show detail',
38 'ld all show',
39 'ld all show detail',
40 'pd all show',
41 'pd all show detail',
42 'show detail'
43 ]
44 self.add_cmd_output(
45 ["%s %s" % (cmd, subcmd) for subcmd in subcmds]
46 )
47
48 pattern = re.compile("^HP.*Smart Array (.*) in Slot ([0123456789])")
49 config_detail_cmd = cmd + ' ctrl all show config detail'
50 config_detail = self.collect_cmd_output(config_detail_cmd)
51 ctrl_slots = []
52 if config_detail['status'] == 0:
53 ctrl_slots = [m.group(2)
54 for line in config_detail['output'].splitlines()
55 for m in [pattern.search(line)] if m]
56 ssacli_ctrl_slot_cmd = cmd + ' ctrl slot='
57 self.add_cmd_output(
58 ["%s%s %s" % (
59 ssacli_ctrl_slot_cmd,
60 slot,
61 slot_subcmd
62 )
63 for slot in ctrl_slots
64 for slot_subcmd in slot_subcmds]
65 )
66
67 logpath = self.get_cmd_output_path()
68
69 self.add_cmd_output(
70 'ssaducli -v -adu -f %s/adu-log.zip' % logpath,
71 suggest_filename='ssaducli_-v_-adu.log'
72 )
73
74 if self.get_option("debug"):
75 self.do_debug(logpath)
76
77 def do_debug(self, logpath):
78 self.add_cmd_output(
79 'ilorest serverlogs --selectlog=AHS --directorypath=%s' % logpath,
80 runat=logpath, suggest_filename='ilorest.log'
81 )
82
83 # vim: set et ts=4 sw=4 :
84
[end of sos/report/plugins/hpssm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sos/report/plugins/hpssm.py b/sos/report/plugins/hpssm.py
--- a/sos/report/plugins/hpssm.py
+++ b/sos/report/plugins/hpssm.py
@@ -45,7 +45,7 @@
["%s %s" % (cmd, subcmd) for subcmd in subcmds]
)
- pattern = re.compile("^HP.*Smart Array (.*) in Slot ([0123456789])")
+ pattern = re.compile("^HP[E] (.*) in Slot ([0123456789]+)")
config_detail_cmd = cmd + ' ctrl all show config detail'
config_detail = self.collect_cmd_output(config_detail_cmd)
ctrl_slots = []
| {"golden_diff": "diff --git a/sos/report/plugins/hpssm.py b/sos/report/plugins/hpssm.py\n--- a/sos/report/plugins/hpssm.py\n+++ b/sos/report/plugins/hpssm.py\n@@ -45,7 +45,7 @@\n [\"%s %s\" % (cmd, subcmd) for subcmd in subcmds]\n )\n \n- pattern = re.compile(\"^HP.*Smart Array (.*) in Slot ([0123456789])\")\n+ pattern = re.compile(\"^HP[E] (.*) in Slot ([0123456789]+)\")\n config_detail_cmd = cmd + ' ctrl all show config detail'\n config_detail = self.collect_cmd_output(config_detail_cmd)\n ctrl_slots = []\n", "issue": "[hpssm] controller collection misses Gen10+ controllers and above slot 9.\n1. Gen10+ controllers changed the naming from **Smart Array** for at least some controllers.\r\n * `HPE SR932i-p Gen10+ in Slot 3`\r\n2. Controllers are showing up above slot 9 even when there is only 1 or two total controllers.\r\n * `HPE Smart Array P816i-a SR Gen10 in Slot 12`\r\n\r\nThis system had no controller in slot 0, a new naming style in slot 3, and the old naming style in slot 12. \r\nThe `ssacli ctrl all show status` still lists them, and `ssacli ctrl all show config detail` still gets the config details of each. The current pattern fails to identify the slots in both cases, leading to not executing individual slot based commands or executing on the wrong slot.\r\n\r\n\n", "before_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, IndependentPlugin, PluginOpt\nimport re\n\n\nclass Hpssm(Plugin, IndependentPlugin):\n \"\"\"\n This plugin will capture details for each controller from Smart Storage\n Array Administrator, an Array diagnostic report from Smart Storage\n Administrator Diagnostics Utility and, when the plugins debug option is\n enabled will gather the Active Health System log via the RESTful Interface\n Tool (iLOREST).\n \"\"\"\n short_desc = 'HP Smart Storage Management'\n\n plugin_name = 'hpssm'\n profiles = ('system', 'storage', 'hardware',)\n packages = ('ilorest', 'ssacli', 'ssaducli',)\n\n option_list = [\n PluginOpt('debug', default=False, desc='capture debug data')\n ]\n\n def setup(self):\n cmd = 'ssacli'\n subcmds = [\n 'ctrl all show status'\n ]\n slot_subcmds = [\n 'array all show detail',\n 'ld all show',\n 'ld all show detail',\n 'pd all show',\n 'pd all show detail',\n 'show detail'\n ]\n self.add_cmd_output(\n [\"%s %s\" % (cmd, subcmd) for subcmd in subcmds]\n )\n\n pattern = re.compile(\"^HP.*Smart Array (.*) in Slot ([0123456789])\")\n config_detail_cmd = cmd + ' ctrl all show config detail'\n config_detail = self.collect_cmd_output(config_detail_cmd)\n ctrl_slots = []\n if config_detail['status'] == 0:\n ctrl_slots = [m.group(2)\n for line in config_detail['output'].splitlines()\n for m in [pattern.search(line)] if m]\n ssacli_ctrl_slot_cmd = cmd + ' ctrl slot='\n self.add_cmd_output(\n [\"%s%s %s\" % (\n ssacli_ctrl_slot_cmd,\n slot,\n slot_subcmd\n )\n for slot in ctrl_slots\n for slot_subcmd in slot_subcmds]\n )\n\n logpath = self.get_cmd_output_path()\n\n self.add_cmd_output(\n 'ssaducli -v -adu -f %s/adu-log.zip' % logpath,\n suggest_filename='ssaducli_-v_-adu.log'\n )\n\n if self.get_option(\"debug\"):\n self.do_debug(logpath)\n\n def do_debug(self, logpath):\n self.add_cmd_output(\n 'ilorest serverlogs --selectlog=AHS --directorypath=%s' % logpath,\n runat=logpath, suggest_filename='ilorest.log'\n )\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/hpssm.py"}]} | 1,553 | 171 |
gh_patches_debug_56607 | rasdani/github-patches | git_diff | spacetelescope__jwql-662 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch EDB to use MAST token from config.json always
Currently the EDB feature of the JWQL web app attempts to authenticate users with whichever MAST token is currently cached (https://github.com/spacetelescope/jwql/blob/develop/jwql/utils/credentials.py#L45), and if that doesn't succeed, _then_ it uses the `mast_token` key in the `config.json` file. This seems problematic if users are creating new tokens but then attempting to perform EDB queries in the same browser. We should probably switch this to just use the `mast_token` key in `config.json` file always.
</issue>
<code>
[start of jwql/utils/credentials.py]
1 """Utility functions related to accessing remote services and databases.
2
3 Authors
4 -------
5
6 - Johannes Sahlmann
7 - Lauren Chambers
8
9 Use
10 ---
11
12 This module can be imported as such:
13 ::
14
15 import credentials
16 token = credentials.get_mast_token()
17
18 """
19 import os
20
21 from astroquery.mast import Mast
22
23 from jwql.utils.utils import get_config, check_config_for_key
24
25
26 def get_mast_token(request=None):
27 """Return MAST token from either Astroquery.Mast, webpage cookies, the
28 JWQL configuration file, or an environment variable.
29
30 Parameters
31 ----------
32 request : HttpRequest object
33 Incoming request from the webpage
34
35 Returns
36 -------
37 token : str or None
38 User-specific MAST token string, if available
39 """
40 if Mast.authenticated():
41 print('Authenticated with Astroquery MAST magic')
42 return None
43 else:
44 if request is not None:
45 token = str(request.POST.get('access_token'))
46 if token != 'None':
47 print('Authenticated with cached MAST token.')
48 return token
49 try:
50 # check if token is available via config file
51 check_config_for_key('mast_token')
52 token = get_config()['mast_token']
53 print('Authenticated with config.json MAST token.')
54 return token
55 except (KeyError, ValueError):
56 # check if token is available via environment variable
57 # see https://auth.mast.stsci.edu/info
58 try:
59 token = os.environ['MAST_API_TOKEN']
60 print('Authenticated with MAST token environment variable.')
61 return token
62 except KeyError:
63 return None
64
[end of jwql/utils/credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jwql/utils/credentials.py b/jwql/utils/credentials.py
--- a/jwql/utils/credentials.py
+++ b/jwql/utils/credentials.py
@@ -41,11 +41,6 @@
print('Authenticated with Astroquery MAST magic')
return None
else:
- if request is not None:
- token = str(request.POST.get('access_token'))
- if token != 'None':
- print('Authenticated with cached MAST token.')
- return token
try:
# check if token is available via config file
check_config_for_key('mast_token')
| {"golden_diff": "diff --git a/jwql/utils/credentials.py b/jwql/utils/credentials.py\n--- a/jwql/utils/credentials.py\n+++ b/jwql/utils/credentials.py\n@@ -41,11 +41,6 @@\n print('Authenticated with Astroquery MAST magic')\n return None\n else:\n- if request is not None:\n- token = str(request.POST.get('access_token'))\n- if token != 'None':\n- print('Authenticated with cached MAST token.')\n- return token\n try:\n # check if token is available via config file\n check_config_for_key('mast_token')\n", "issue": "Switch EDB to use MAST token from config.json always\nCurrently the EDB feature of the JWQL web app attempts to authenticate users with whichever MAST token is currently cached (https://github.com/spacetelescope/jwql/blob/develop/jwql/utils/credentials.py#L45), and if that doesn't succeed, _then_ it uses the `mast_token` key in the `config.json` file. This seems problematic if users are creating new tokens but then attempting to perform EDB queries in the same browser. We should probably switch this to just use the `mast_token` key in `config.json` file always. \n", "before_files": [{"content": "\"\"\"Utility functions related to accessing remote services and databases.\n\nAuthors\n-------\n\n - Johannes Sahlmann\n - Lauren Chambers\n\nUse\n---\n\n This module can be imported as such:\n ::\n\n import credentials\n token = credentials.get_mast_token()\n\n \"\"\"\nimport os\n\nfrom astroquery.mast import Mast\n\nfrom jwql.utils.utils import get_config, check_config_for_key\n\n\ndef get_mast_token(request=None):\n \"\"\"Return MAST token from either Astroquery.Mast, webpage cookies, the\n JWQL configuration file, or an environment variable.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n token : str or None\n User-specific MAST token string, if available\n \"\"\"\n if Mast.authenticated():\n print('Authenticated with Astroquery MAST magic')\n return None\n else:\n if request is not None:\n token = str(request.POST.get('access_token'))\n if token != 'None':\n print('Authenticated with cached MAST token.')\n return token\n try:\n # check if token is available via config file\n check_config_for_key('mast_token')\n token = get_config()['mast_token']\n print('Authenticated with config.json MAST token.')\n return token\n except (KeyError, ValueError):\n # check if token is available via environment variable\n # see https://auth.mast.stsci.edu/info\n try:\n token = os.environ['MAST_API_TOKEN']\n print('Authenticated with MAST token environment variable.')\n return token\n except KeyError:\n return None\n", "path": "jwql/utils/credentials.py"}]} | 1,147 | 139 |
gh_patches_debug_19238 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicate checks CKV_K8S_29 and CKV_K8S_30 ?
It seem that both checks are the same, at least in the description :
```
Check: CKV_K8S_29: "Apply security context to your pods and containers"
FAILED for resource: module.some_module.kubernetes_deployment.app
File: /base/main.tf:12-355
Calling File: /some_module.tf:1-116
Check: CKV_K8S_30: "Apply security context to your pods and containers"
FAILED for resource: module.some_module.kubernetes_deployment.app
File: /base/main.tf:12-355
Calling File: /some_module.tf:1-116
```
</issue>
<code>
[start of checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py]
1 from typing import Any, Dict
2
3 from checkov.common.models.enums import CheckResult
4 from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck
5
6
7 class ContainerSecurityContext(BaseK8sContainerCheck):
8 def __init__(self) -> None:
9 # CIS-1.5 5.7.3
10 name = "Apply security context to your pods and containers"
11 # Security context can be set at pod or container level.
12 # Location: container .securityContext
13 id = "CKV_K8S_30"
14 super().__init__(name=name, id=id)
15
16 def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:
17 self.evaluated_container_keys = ["securityContext"]
18 if conf.get("securityContext"):
19 return CheckResult.PASSED
20 return CheckResult.FAILED
21
22
23 check = ContainerSecurityContext()
24
[end of checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py]
[start of checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class PodSecurityContext(BaseResourceCheck):
6
7 def __init__(self):
8 # CIS-1.5 5.7.3
9 name = "Apply security context to your pods and containers"
10 # Security context can be set at pod or container level.
11 id = "CKV_K8S_29"
12
13 supported_resources = ('kubernetes_pod', 'kubernetes_pod_v1',
14 'kubernetes_deployment', 'kubernetes_deployment_v1',
15 'kubernetes_daemonset', 'kubernetes_daemon_set_v1')
16 categories = (CheckCategories.GENERAL_SECURITY,)
17 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
18
19 def scan_resource_conf(self, conf) -> CheckResult:
20 if "spec" not in conf:
21 self.evaluated_keys = [""]
22 return CheckResult.FAILED
23 spec = conf['spec'][0]
24 if spec.get("container"):
25 containers = spec.get("container")
26
27 for idx, container in enumerate(containers):
28 if type(container) != dict:
29 return CheckResult.UNKNOWN
30
31 if not container.get("security_context"):
32 self.evaluated_keys = [f"spec/[0]/container/{idx}"]
33 return CheckResult.FAILED
34 return CheckResult.PASSED
35
36 if spec.get("template") and isinstance(spec.get("template"), list):
37 template = spec.get("template")[0]
38 if template.get("spec") and isinstance(template.get("spec"), list):
39 temp_spec = template.get("spec")[0]
40 if temp_spec.get("container"):
41 containers = temp_spec.get("container")
42
43 for idx, container in enumerate(containers):
44 if type(container) != dict:
45 return CheckResult.UNKNOWN
46
47 if not container.get("security_context"):
48 self.evaluated_keys = [f"spec/[0]/template/[0]/spec/[0]/container/{idx}"]
49 return CheckResult.FAILED
50 return CheckResult.PASSED
51 return CheckResult.FAILED
52
53
54 check = PodSecurityContext()
55
[end of checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py
--- a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py
+++ b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py
@@ -7,7 +7,7 @@
class ContainerSecurityContext(BaseK8sContainerCheck):
def __init__(self) -> None:
# CIS-1.5 5.7.3
- name = "Apply security context to your pods and containers"
+ name = "Apply security context to your containers"
# Security context can be set at pod or container level.
# Location: container .securityContext
id = "CKV_K8S_30"
diff --git a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py
--- a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py
+++ b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py
@@ -6,7 +6,7 @@
def __init__(self):
# CIS-1.5 5.7.3
- name = "Apply security context to your pods and containers"
+ name = "Apply security context to your pods, deployments and daemon_sets"
# Security context can be set at pod or container level.
id = "CKV_K8S_29"
| {"golden_diff": "diff --git a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py\n--- a/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py\n+++ b/checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py\n@@ -7,7 +7,7 @@\n class ContainerSecurityContext(BaseK8sContainerCheck):\n def __init__(self) -> None:\n # CIS-1.5 5.7.3\n- name = \"Apply security context to your pods and containers\"\n+ name = \"Apply security context to your containers\"\n # Security context can be set at pod or container level.\n # Location: container .securityContext\n id = \"CKV_K8S_30\"\ndiff --git a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py\n--- a/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py\n+++ b/checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py\n@@ -6,7 +6,7 @@\n \n def __init__(self):\n # CIS-1.5 5.7.3\n- name = \"Apply security context to your pods and containers\"\n+ name = \"Apply security context to your pods, deployments and daemon_sets\"\n # Security context can be set at pod or container level.\n id = \"CKV_K8S_29\"\n", "issue": "Duplicate checks CKV_K8S_29 and CKV_K8S_30 ?\nIt seem that both checks are the same, at least in the description :\r\n\r\n```\r\nCheck: CKV_K8S_29: \"Apply security context to your pods and containers\"\r\n\tFAILED for resource: module.some_module.kubernetes_deployment.app\r\n\tFile: /base/main.tf:12-355\r\n\tCalling File: /some_module.tf:1-116\r\n\r\nCheck: CKV_K8S_30: \"Apply security context to your pods and containers\"\r\n\tFAILED for resource: module.some_module.kubernetes_deployment.app\r\n\tFile: /base/main.tf:12-355\r\n\tCalling File: /some_module.tf:1-116\r\n```\n", "before_files": [{"content": "from typing import Any, Dict\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck\n\n\nclass ContainerSecurityContext(BaseK8sContainerCheck):\n def __init__(self) -> None:\n # CIS-1.5 5.7.3\n name = \"Apply security context to your pods and containers\"\n # Security context can be set at pod or container level.\n # Location: container .securityContext\n id = \"CKV_K8S_30\"\n super().__init__(name=name, id=id)\n\n def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:\n self.evaluated_container_keys = [\"securityContext\"]\n if conf.get(\"securityContext\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ContainerSecurityContext()\n", "path": "checkov/kubernetes/checks/resource/k8s/ContainerSecurityContext.py"}, {"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass PodSecurityContext(BaseResourceCheck):\n\n def __init__(self):\n # CIS-1.5 5.7.3\n name = \"Apply security context to your pods and containers\"\n # Security context can be set at pod or container level.\n id = \"CKV_K8S_29\"\n\n supported_resources = ('kubernetes_pod', 'kubernetes_pod_v1',\n 'kubernetes_deployment', 'kubernetes_deployment_v1',\n 'kubernetes_daemonset', 'kubernetes_daemon_set_v1')\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if \"spec\" not in conf:\n self.evaluated_keys = [\"\"]\n return CheckResult.FAILED\n spec = conf['spec'][0]\n if spec.get(\"container\"):\n containers = spec.get(\"container\")\n\n for idx, container in enumerate(containers):\n if type(container) != dict:\n return CheckResult.UNKNOWN\n\n if not container.get(\"security_context\"):\n self.evaluated_keys = [f\"spec/[0]/container/{idx}\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n if spec.get(\"template\") and isinstance(spec.get(\"template\"), list):\n template = spec.get(\"template\")[0]\n if template.get(\"spec\") and isinstance(template.get(\"spec\"), list):\n temp_spec = template.get(\"spec\")[0]\n if temp_spec.get(\"container\"):\n containers = temp_spec.get(\"container\")\n\n for idx, container in enumerate(containers):\n if type(container) != dict:\n return CheckResult.UNKNOWN\n\n if not container.get(\"security_context\"):\n self.evaluated_keys = [f\"spec/[0]/template/[0]/spec/[0]/container/{idx}\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = PodSecurityContext()\n", "path": "checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py"}]} | 1,575 | 328 |
gh_patches_debug_1977 | rasdani/github-patches | git_diff | xorbitsai__inference-1096 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: Add the option to use CPU to inference even there is GPU device
### Is your feature request related to a problem? Please describe
There is a GPU in my server, but when load some LLM model, I need load it into my memory because the model size is bigger
than GPU memory.
However, when I launch the model from web page, the N-GPU setting only contains auto, 0, 1 options, if I select 0, system will complain the following error:
> Server error: 400 - [address=0.0.0.0:19270, pid=2063850] The parameter `n_gpu` must be greater than 0 and not greater than the number of GPUs: 1 on the machine.
### Describe the solution you'd like
I think when the N GPU setting is set to 0, it should use CPU as inference device.
</issue>
<code>
[start of xinference/device_utils.py]
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 import torch
18 from typing_extensions import Literal, Union
19
20 DeviceType = Literal["cuda", "mps", "xpu", "cpu"]
21
22
23 def is_xpu_available() -> bool:
24 return hasattr(torch, "xpu") and torch.xpu.is_available()
25
26
27 def get_available_device() -> DeviceType:
28 if torch.cuda.is_available():
29 return "cuda"
30 elif torch.backends.mps.is_available():
31 return "mps"
32 elif is_xpu_available():
33 return "xpu"
34 return "cpu"
35
36
37 def is_device_available(device: str) -> bool:
38 if device == "cuda":
39 return torch.cuda.is_available()
40 elif device == "mps":
41 return torch.backends.mps.is_available()
42 elif device == "xpu":
43 return is_xpu_available()
44 elif device == "cpu":
45 return True
46
47 return False
48
49
50 def move_model_to_available_device(model):
51 device = get_available_device()
52
53 if device == "cpu":
54 return model
55
56 return model.to(device)
57
58
59 def get_device_preferred_dtype(device: str) -> Union[torch.dtype, None]:
60 if device == "cpu":
61 return torch.float32
62 elif device == "cuda" or device == "mps":
63 return torch.float16
64 elif device == "xpu":
65 return torch.bfloat16
66
67 return None
68
69
70 def is_hf_accelerate_supported(device: str) -> bool:
71 return device == "cuda" or device == "xpu"
72
73
74 def empty_cache():
75 if torch.cuda.is_available():
76 torch.cuda.empty_cache()
77 if torch.backends.mps.is_available():
78 torch.mps.empty_cache()
79 if is_xpu_available():
80 torch.xpu.empty_cache()
81
82
83 def gpu_count():
84 if torch.cuda.is_available():
85 cuda_visible_devices_env = os.getenv("CUDA_VISIBLE_DEVICES", None)
86
87 if cuda_visible_devices_env is None:
88 return torch.cuda.device_count()
89
90 cuda_visible_devices = (
91 cuda_visible_devices_env.split(",") if cuda_visible_devices_env else []
92 )
93
94 return min(torch.cuda.device_count(), len(cuda_visible_devices))
95 elif torch.backends.mps.is_available():
96 return 1
97 elif is_xpu_available():
98 return torch.xpu.device_count()
99 else:
100 return 0
101
[end of xinference/device_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xinference/device_utils.py b/xinference/device_utils.py
--- a/xinference/device_utils.py
+++ b/xinference/device_utils.py
@@ -92,8 +92,6 @@
)
return min(torch.cuda.device_count(), len(cuda_visible_devices))
- elif torch.backends.mps.is_available():
- return 1
elif is_xpu_available():
return torch.xpu.device_count()
else:
| {"golden_diff": "diff --git a/xinference/device_utils.py b/xinference/device_utils.py\n--- a/xinference/device_utils.py\n+++ b/xinference/device_utils.py\n@@ -92,8 +92,6 @@\n )\n \n return min(torch.cuda.device_count(), len(cuda_visible_devices))\n- elif torch.backends.mps.is_available():\n- return 1\n elif is_xpu_available():\n return torch.xpu.device_count()\n else:\n", "issue": "ENH: Add the option to use CPU to inference even there is GPU device\n### Is your feature request related to a problem? Please describe\r\nThere is a GPU in my server, but when load some LLM model, I need load it into my memory because the model size is bigger\r\nthan GPU memory.\r\n\r\nHowever, when I launch the model from web page, the N-GPU setting only contains auto, 0, 1 options, if I select 0, system will complain the following error:\r\n\r\n> Server error: 400 - [address=0.0.0.0:19270, pid=2063850] The parameter `n_gpu` must be greater than 0 and not greater than the number of GPUs: 1 on the machine.\r\n\r\n### Describe the solution you'd like\r\nI think when the N GPU setting is set to 0, it should use CPU as inference device.\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport torch\nfrom typing_extensions import Literal, Union\n\nDeviceType = Literal[\"cuda\", \"mps\", \"xpu\", \"cpu\"]\n\n\ndef is_xpu_available() -> bool:\n return hasattr(torch, \"xpu\") and torch.xpu.is_available()\n\n\ndef get_available_device() -> DeviceType:\n if torch.cuda.is_available():\n return \"cuda\"\n elif torch.backends.mps.is_available():\n return \"mps\"\n elif is_xpu_available():\n return \"xpu\"\n return \"cpu\"\n\n\ndef is_device_available(device: str) -> bool:\n if device == \"cuda\":\n return torch.cuda.is_available()\n elif device == \"mps\":\n return torch.backends.mps.is_available()\n elif device == \"xpu\":\n return is_xpu_available()\n elif device == \"cpu\":\n return True\n\n return False\n\n\ndef move_model_to_available_device(model):\n device = get_available_device()\n\n if device == \"cpu\":\n return model\n\n return model.to(device)\n\n\ndef get_device_preferred_dtype(device: str) -> Union[torch.dtype, None]:\n if device == \"cpu\":\n return torch.float32\n elif device == \"cuda\" or device == \"mps\":\n return torch.float16\n elif device == \"xpu\":\n return torch.bfloat16\n\n return None\n\n\ndef is_hf_accelerate_supported(device: str) -> bool:\n return device == \"cuda\" or device == \"xpu\"\n\n\ndef empty_cache():\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if torch.backends.mps.is_available():\n torch.mps.empty_cache()\n if is_xpu_available():\n torch.xpu.empty_cache()\n\n\ndef gpu_count():\n if torch.cuda.is_available():\n cuda_visible_devices_env = os.getenv(\"CUDA_VISIBLE_DEVICES\", None)\n\n if cuda_visible_devices_env is None:\n return torch.cuda.device_count()\n\n cuda_visible_devices = (\n cuda_visible_devices_env.split(\",\") if cuda_visible_devices_env else []\n )\n\n return min(torch.cuda.device_count(), len(cuda_visible_devices))\n elif torch.backends.mps.is_available():\n return 1\n elif is_xpu_available():\n return torch.xpu.device_count()\n else:\n return 0\n", "path": "xinference/device_utils.py"}]} | 1,567 | 99 |
gh_patches_debug_12186 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chg: be more precise in error message on config file.
We can be more helpful when the main config file throws a parser error.
</issue>
<code>
[start of cookiecutter/config.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.config
6 -------------------
7
8 Global configuration handling
9 """
10
11 from __future__ import unicode_literals
12 import copy
13 import logging
14 import os
15 import io
16
17 import yaml
18
19 from .exceptions import ConfigDoesNotExistException
20 from .exceptions import InvalidConfiguration
21
22
23 logger = logging.getLogger(__name__)
24
25 DEFAULT_CONFIG = {
26 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
27 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
28 'default_context': {}
29 }
30
31
32 def get_config(config_path):
33 """
34 Retrieve the config from the specified path, returning it as a config dict.
35 """
36
37 if not os.path.exists(config_path):
38 raise ConfigDoesNotExistException
39
40 logger.debug('config_path is {0}'.format(config_path))
41 with io.open(config_path, encoding='utf-8') as file_handle:
42 try:
43 yaml_dict = yaml.safe_load(file_handle)
44 except yaml.scanner.ScannerError:
45 raise InvalidConfiguration(
46 '{0} is no a valid YAML file'.format(config_path))
47
48 config_dict = copy.copy(DEFAULT_CONFIG)
49 config_dict.update(yaml_dict)
50
51 return config_dict
52
53
54 def get_user_config():
55 """
56 Retrieve config from the user's ~/.cookiecutterrc, if it exists.
57 Otherwise, return None.
58 """
59
60 # TODO: test on windows...
61 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
62
63 if os.path.exists(USER_CONFIG_PATH):
64 return get_config(USER_CONFIG_PATH)
65 return copy.copy(DEFAULT_CONFIG)
66
[end of cookiecutter/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/config.py b/cookiecutter/config.py
--- a/cookiecutter/config.py
+++ b/cookiecutter/config.py
@@ -41,9 +41,12 @@
with io.open(config_path, encoding='utf-8') as file_handle:
try:
yaml_dict = yaml.safe_load(file_handle)
- except yaml.scanner.ScannerError:
+ except yaml.scanner.ScannerError as e:
raise InvalidConfiguration(
- '{0} is no a valid YAML file'.format(config_path))
+ '{0} is not a valid YAML file: line {1}: {2}'.format(
+ config_path,
+ e.problem_mark.line,
+ e.problem))
config_dict = copy.copy(DEFAULT_CONFIG)
config_dict.update(yaml_dict)
| {"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -41,9 +41,12 @@\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = yaml.safe_load(file_handle)\n- except yaml.scanner.ScannerError:\n+ except yaml.scanner.ScannerError as e:\n raise InvalidConfiguration(\n- '{0} is no a valid YAML file'.format(config_path))\n+ '{0} is not a valid YAML file: line {1}: {2}'.format(\n+ config_path,\n+ e.problem_mark.line,\n+ e.problem))\n \n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n", "issue": "chg: be more precise in error message on config file.\nWe can be more helpful when the main config file throws a parser error.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.config\n-------------------\n\nGlobal configuration handling\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport yaml\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {}\n}\n\n\ndef get_config(config_path):\n \"\"\"\n Retrieve the config from the specified path, returning it as a config dict.\n \"\"\"\n\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = yaml.safe_load(file_handle)\n except yaml.scanner.ScannerError:\n raise InvalidConfiguration(\n '{0} is no a valid YAML file'.format(config_path))\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n return config_dict\n\n\ndef get_user_config():\n \"\"\"\n Retrieve config from the user's ~/.cookiecutterrc, if it exists.\n Otherwise, return None.\n \"\"\"\n\n # TODO: test on windows...\n USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n return copy.copy(DEFAULT_CONFIG)\n", "path": "cookiecutter/config.py"}]} | 1,042 | 177 |
gh_patches_debug_1013 | rasdani/github-patches | git_diff | magenta__magenta-785 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
numpy dependency missing?
magenta/models/sketch_rnn/utils.py has ```import numpy as np```, but magenta/tools/pip/setup.py doesn't list it as a dependency.
</issue>
<code>
[start of magenta/tools/pip/setup.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """A setuptools based setup module for magenta."""
15
16 from setuptools import find_packages
17 from setuptools import setup
18
19 # Bit of a hack to parse the version string stored in version.py without
20 # executing __init__.py, which will end up requiring a bunch of dependencies to
21 # execute (e.g., tensorflow, pretty_midi, etc.).
22 # Makes the __version__ variable available.
23 execfile('magenta/version.py')
24
25
26 REQUIRED_PACKAGES = [
27 'IPython',
28 'Pillow >= 3.4.2',
29 'bokeh >= 0.12.0',
30 'futures',
31 'intervaltree >= 2.1.0',
32 'matplotlib >= 1.5.3',
33 'mido == 1.2.6',
34 'pandas >= 0.18.1',
35 'pretty_midi >= 0.2.6',
36 'python-rtmidi',
37 'scipy >= 0.18.1',
38 'tensorflow >= 1.1.0',
39 'wheel',
40 ]
41
42 CONSOLE_SCRIPTS = [
43 'magenta.interfaces.midi.magenta_midi',
44 'magenta.interfaces.midi.midi_clock',
45 'magenta.models.drums_rnn.drums_rnn_create_dataset',
46 'magenta.models.drums_rnn.drums_rnn_generate',
47 'magenta.models.drums_rnn.drums_rnn_train',
48 'magenta.models.image_stylization.image_stylization_create_dataset',
49 'magenta.models.image_stylization.image_stylization_evaluate',
50 'magenta.models.image_stylization.image_stylization_finetune',
51 'magenta.models.image_stylization.image_stylization_train',
52 'magenta.models.image_stylization.image_stylization_transform',
53 'magenta.models.improv_rnn.improv_rnn_create_dataset',
54 'magenta.models.improv_rnn.improv_rnn_generate',
55 'magenta.models.improv_rnn.improv_rnn_train',
56 'magenta.models.melody_rnn.melody_rnn_create_dataset',
57 'magenta.models.melody_rnn.melody_rnn_generate',
58 'magenta.models.melody_rnn.melody_rnn_train',
59 'magenta.models.nsynth.wavenet.nsynth_generate',
60 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',
61 'magenta.models.performance_rnn.performance_rnn_create_dataset',
62 'magenta.models.performance_rnn.performance_rnn_generate',
63 'magenta.models.performance_rnn.performance_rnn_train',
64 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',
65 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',
66 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',
67 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',
68 'magenta.models.polyphony_rnn.polyphony_rnn_generate',
69 'magenta.models.polyphony_rnn.polyphony_rnn_train',
70 'magenta.models.rl_tuner.rl_tuner_train',
71 'magenta.models.sketch_rnn.sketch_rnn_train',
72 'magenta.scripts.convert_dir_to_note_sequences',
73 ]
74
75 setup(
76 name='magenta',
77 version=__version__, # pylint: disable=undefined-variable
78 description='Use machine learning to create art and music',
79 long_description='',
80 url='https://magenta.tensorflow.org/',
81 author='Google Inc.',
82 author_email='[email protected]',
83 license='Apache 2',
84 # PyPI package information.
85 classifiers=[
86 'Development Status :: 4 - Beta',
87 'Intended Audience :: Developers',
88 'Intended Audience :: Education',
89 'Intended Audience :: Science/Research',
90 'License :: OSI Approved :: Apache Software License',
91 'Programming Language :: Python :: 2.7',
92 'Topic :: Scientific/Engineering :: Mathematics',
93 'Topic :: Software Development :: Libraries :: Python Modules',
94 'Topic :: Software Development :: Libraries',
95 ],
96 keywords='tensorflow machine learning magenta music art',
97
98 packages=find_packages(),
99 install_requires=REQUIRED_PACKAGES,
100 entry_points={
101 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in
102 ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],
103 },
104
105 include_package_data=True,
106 package_data={
107 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],
108 },
109 )
110
[end of magenta/tools/pip/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py
--- a/magenta/tools/pip/setup.py
+++ b/magenta/tools/pip/setup.py
@@ -31,6 +31,7 @@
'intervaltree >= 2.1.0',
'matplotlib >= 1.5.3',
'mido == 1.2.6',
+ 'numpy >= 1.11.0',
'pandas >= 0.18.1',
'pretty_midi >= 0.2.6',
'python-rtmidi',
| {"golden_diff": "diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py\n--- a/magenta/tools/pip/setup.py\n+++ b/magenta/tools/pip/setup.py\n@@ -31,6 +31,7 @@\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n+ 'numpy >= 1.11.0',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n", "issue": "numpy dependency missing?\nmagenta/models/sketch_rnn/utils.py has ```import numpy as np```, but magenta/tools/pip/setup.py doesn't list it as a dependency.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py"}]} | 1,890 | 136 |
gh_patches_debug_1278 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1637 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
botbuilder-testing is missing install requirements
## Version
botbuilder-testing 4.12.0
## Describe the bug
While installing botbuilder-testing for CI I got errors about missing dependencies.
## To Reproduce
1. `python3 -m venv .venv`
2. `. .venv/bin/activate`
3. `pip install -U pip wheel`
4. `pip install botbuilder-testing`
5. `python -c "from botbuilder.testing import DialogTestClient"`
First error is missing `pytest`:
```python
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module>
from .storage_base_tests import StorageBaseTests
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 26, in <module>
import pytest
ModuleNotFoundError: No module named 'pytest'
```
6. `pip install pytest`
7. `python -c 'from botbuilder.testing import DialogTestClient'`
Next error is missing `botbuilder-azure`:
```python
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module>
from .storage_base_tests import StorageBaseTests
File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 27, in <module>
from botbuilder.azure import CosmosDbStorage
ModuleNotFoundError: No module named 'botbuilder.azure'
```
8. `pip install botbuilder-azure`
9. `python -c 'from botbuilder.testing import DialogTestClient'`
Command works!
## Expected behavior
No errors after installing botbuilder-testing and importing module
I do wonder if the requirement for pytest is not necessary, leaving the lib test-suite agnostic and could be refactored out?
</issue>
<code>
[start of libraries/botbuilder-testing/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "botbuilder-schema==4.13.0",
9 "botbuilder-core==4.13.0",
10 "botbuilder-dialogs==4.13.0",
11 ]
12
13 TESTS_REQUIRES = ["aiounittest==1.3.0"]
14
15 root = os.path.abspath(os.path.dirname(__file__))
16
17 with open(os.path.join(root, "botbuilder", "testing", "about.py")) as f:
18 package_info = {}
19 info = f.read()
20 exec(info, package_info)
21
22 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
23 long_description = f.read()
24
25 setup(
26 name=package_info["__title__"],
27 version=package_info["__version__"],
28 url=package_info["__uri__"],
29 author=package_info["__author__"],
30 description=package_info["__description__"],
31 keywords="botbuilder-testing bots ai testing botframework botbuilder",
32 long_description=long_description,
33 long_description_content_type="text/x-rst",
34 license=package_info["__license__"],
35 packages=["botbuilder.testing"],
36 install_requires=REQUIRES + TESTS_REQUIRES,
37 tests_require=TESTS_REQUIRES,
38 include_package_data=True,
39 classifiers=[
40 "Programming Language :: Python :: 3.7",
41 "Intended Audience :: Developers",
42 "License :: OSI Approved :: MIT License",
43 "Operating System :: OS Independent",
44 "Development Status :: 5 - Production/Stable",
45 "Topic :: Scientific/Engineering :: Artificial Intelligence",
46 ],
47 )
48
[end of libraries/botbuilder-testing/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-testing/setup.py b/libraries/botbuilder-testing/setup.py
--- a/libraries/botbuilder-testing/setup.py
+++ b/libraries/botbuilder-testing/setup.py
@@ -8,6 +8,8 @@
"botbuilder-schema==4.13.0",
"botbuilder-core==4.13.0",
"botbuilder-dialogs==4.13.0",
+ "botbuilder-azure==4.13.0",
+ "pytest~=6.2.3",
]
TESTS_REQUIRES = ["aiounittest==1.3.0"]
| {"golden_diff": "diff --git a/libraries/botbuilder-testing/setup.py b/libraries/botbuilder-testing/setup.py\n--- a/libraries/botbuilder-testing/setup.py\n+++ b/libraries/botbuilder-testing/setup.py\n@@ -8,6 +8,8 @@\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n+ \"botbuilder-azure==4.13.0\",\n+ \"pytest~=6.2.3\",\n ]\n \n TESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n", "issue": "botbuilder-testing is missing install requirements\n## Version\r\n\r\nbotbuilder-testing 4.12.0\r\n\r\n## Describe the bug\r\nWhile installing botbuilder-testing for CI I got errors about missing dependencies. \r\n\r\n## To Reproduce\r\n\r\n1. `python3 -m venv .venv`\r\n2. `. .venv/bin/activate`\r\n3. `pip install -U pip wheel`\r\n4. `pip install botbuilder-testing`\r\n5. `python -c \"from botbuilder.testing import DialogTestClient\"`\r\n\r\nFirst error is missing `pytest`:\r\n```python\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py\", line 6, in <module>\r\n from .storage_base_tests import StorageBaseTests\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py\", line 26, in <module>\r\n import pytest\r\nModuleNotFoundError: No module named 'pytest'\r\n```\r\n\r\n6. `pip install pytest`\r\n7. `python -c 'from botbuilder.testing import DialogTestClient'`\r\n\r\nNext error is missing `botbuilder-azure`:\r\n```python\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py\", line 6, in <module>\r\n from .storage_base_tests import StorageBaseTests\r\n File \"/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py\", line 27, in <module>\r\n from botbuilder.azure import CosmosDbStorage\r\nModuleNotFoundError: No module named 'botbuilder.azure'\r\n```\r\n\r\n8. `pip install botbuilder-azure`\r\n9. `python -c 'from botbuilder.testing import DialogTestClient'`\r\n\r\nCommand works!\r\n\r\n## Expected behavior\r\nNo errors after installing botbuilder-testing and importing module\r\n\r\nI do wonder if the requirement for pytest is not necessary, leaving the lib test-suite agnostic and could be refactored out?\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"botbuilder-schema==4.13.0\",\n \"botbuilder-core==4.13.0\",\n \"botbuilder-dialogs==4.13.0\",\n]\n\nTESTS_REQUIRES = [\"aiounittest==1.3.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"testing\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-testing bots ai testing botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.testing\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-testing/setup.py"}]} | 1,519 | 139 |
gh_patches_debug_61215 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1124 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Top-Level Python API methods don't have docstrings rendered in docs
# Description
The top level Python API methods pages on the docs website doesn't contain any of the rendered docstrings. For example, the `pyhf.set_backend()` API has examples (and it rather important for new users)
https://github.com/scikit-hep/pyhf/blob/e55eea408d7c28e3109338de96252119ac63f87a/src/pyhf/__init__.py#L42-L52
but the docs website doesn't show any of this

# Expected Behavior
Have the docstrings be rendered in the docs
# Actual Behavior
c.f. above
# Steps to Reproduce
Build the docs
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
</issue>
<code>
[start of src/pyhf/events.py]
1 import weakref
2
3 __events = {}
4 __disabled_events = set([])
5
6
7 def noop(*args, **kwargs):
8 pass
9
10
11 class WeakList(list):
12 def append(self, item):
13 list.append(self, weakref.WeakMethod(item, self.remove))
14
15
16 class Callables(WeakList):
17 def __call__(self, *args, **kwargs):
18 for func in self:
19 # weakref: needs to be de-ref'd first before calling
20 func()(*args, **kwargs)
21
22 def __repr__(self):
23 return "Callables(%s)" % list.__repr__(self)
24
25
26 def subscribe(event):
27 """
28 This is meant to be used as a decorator.
29 """
30 # Example:
31 #
32 # >>> @pyhf.events.subscribe('myevent')
33 # ... def test(a,b):
34 # ... print a+b
35 # ...
36 # >>> pyhf.events.trigger_myevent(1,2)
37 # 3
38 global __events
39
40 def __decorator(func):
41 __events.setdefault(event, Callables()).append(func)
42 return func
43
44 return __decorator
45
46
47 def register(event):
48 """
49 This is meant to be used as a decorator to register a function for triggering events.
50
51 This creates two events: "<event_name>::before" and "<event_name>::after"
52 """
53 # Examples:
54 #
55 # >>> @pyhf.events.register('test_func')
56 # ... def test(a,b):
57 # ... print a+b
58 # ...
59 # >>> @pyhf.events.subscribe('test_func::before')
60 # ... def precall():
61 # ... print 'before call'
62 # ...
63 # >>> @pyhf.events.subscribe('test_func::after')
64 # ... def postcall():
65 # ... print 'after call'
66 # ...
67 # >>> test(1,2)
68 # "before call"
69 # 3
70 # "after call"
71 # >>>
72
73 def _register(func):
74 def register_wrapper(*args, **kwargs):
75 trigger("{0:s}::before".format(event))()
76 result = func(*args, **kwargs)
77 trigger("{0:s}::after".format(event))()
78 return result
79
80 return register_wrapper
81
82 return _register
83
84
85 def trigger(event):
86 """
87 Trigger an event if not disabled.
88 """
89 global __events, __disabled_events, noop
90 is_noop = bool(event in __disabled_events or event not in __events)
91 return noop if is_noop else __events.get(event)
92
93
94 def disable(event):
95 """
96 Disable an event from firing.
97 """
98 global __disabled_events
99 __disabled_events.add(event)
100
101
102 def enable(event):
103 """
104 Enable an event to be fired if disabled.
105 """
106 global __disabled_events
107 __disabled_events.remove(event)
108
[end of src/pyhf/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/events.py b/src/pyhf/events.py
--- a/src/pyhf/events.py
+++ b/src/pyhf/events.py
@@ -1,4 +1,5 @@
import weakref
+from functools import wraps
__events = {}
__disabled_events = set([])
@@ -71,6 +72,7 @@
# >>>
def _register(func):
+ @wraps(func)
def register_wrapper(*args, **kwargs):
trigger("{0:s}::before".format(event))()
result = func(*args, **kwargs)
| {"golden_diff": "diff --git a/src/pyhf/events.py b/src/pyhf/events.py\n--- a/src/pyhf/events.py\n+++ b/src/pyhf/events.py\n@@ -1,4 +1,5 @@\n import weakref\n+from functools import wraps\n \n __events = {}\n __disabled_events = set([])\n@@ -71,6 +72,7 @@\n # >>>\n \n def _register(func):\n+ @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(\"{0:s}::before\".format(event))()\n result = func(*args, **kwargs)\n", "issue": "Top-Level Python API methods don't have docstrings rendered in docs\n# Description\r\n\r\nThe top level Python API methods pages on the docs website doesn't contain any of the rendered docstrings. For example, the `pyhf.set_backend()` API has examples (and it rather important for new users)\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/e55eea408d7c28e3109338de96252119ac63f87a/src/pyhf/__init__.py#L42-L52\r\n\r\nbut the docs website doesn't show any of this\r\n\r\n\r\n\r\n\r\n# Expected Behavior\r\n\r\nHave the docstrings be rendered in the docs \r\n\r\n# Actual Behavior\r\n\r\nc.f. above\r\n\r\n# Steps to Reproduce\r\n\r\nBuild the docs\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "import weakref\n\n__events = {}\n__disabled_events = set([])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass WeakList(list):\n def append(self, item):\n list.append(self, weakref.WeakMethod(item, self.remove))\n\n\nclass Callables(WeakList):\n def __call__(self, *args, **kwargs):\n for func in self:\n # weakref: needs to be de-ref'd first before calling\n func()(*args, **kwargs)\n\n def __repr__(self):\n return \"Callables(%s)\" % list.__repr__(self)\n\n\ndef subscribe(event):\n \"\"\"\n This is meant to be used as a decorator.\n \"\"\"\n # Example:\n #\n # >>> @pyhf.events.subscribe('myevent')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> pyhf.events.trigger_myevent(1,2)\n # 3\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n This is meant to be used as a decorator to register a function for triggering events.\n\n This creates two events: \"<event_name>::before\" and \"<event_name>::after\"\n \"\"\"\n # Examples:\n #\n # >>> @pyhf.events.register('test_func')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> @pyhf.events.subscribe('test_func::before')\n # ... def precall():\n # ... print 'before call'\n # ...\n # >>> @pyhf.events.subscribe('test_func::after')\n # ... def postcall():\n # ... print 'after call'\n # ...\n # >>> test(1,2)\n # \"before call\"\n # 3\n # \"after call\"\n # >>>\n\n def _register(func):\n def register_wrapper(*args, **kwargs):\n trigger(\"{0:s}::before\".format(event))()\n result = func(*args, **kwargs)\n trigger(\"{0:s}::after\".format(event))()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}]} | 1,678 | 126 |
gh_patches_debug_22018 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-2535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Italian language with Google TTS
I'm running Mycroft on Manjaro Linux, with Italian language.
I tried both the "British male" and "American male" voices, and they do not speak Italian at all. So I decided to try the Google Voice.
That way, no sound is emitted. Whenever Mycroft tries to speak, I see this error in logs (file audio.log):
```
2020-04-13 10:45:39.632 | INFO | 195922 | mycroft.audio.speech:mute_and_speak:127 | Speak: Va uno spettacolo
2020-04-13 10:45:40.070 | ERROR | 195922 | mycroft.audio.speech:handle_speak:99 | Error in mute_and_speak
Traceback (most recent call last):
File "/home/luke/git/mycroft-core/mycroft/audio/speech.py", line 95, in handle_speak
mute_and_speak(chunk, ident, listen)
File "/home/luke/git/mycroft-core/mycroft/audio/speech.py", line 129, in mute_and_speak
tts.execute(utterance, ident, listen)
File "/home/luke/git/mycroft-core/mycroft/tts/tts.py", line 337, in execute
wav_file, phonemes = self.get_tts(sentence, wav_file)
File "/home/luke/git/mycroft-core/mycroft/tts/google_tts.py", line 35, in get_tts
tts = gTTS(text=sentence, lang=self.lang)
File "/home/luke/git/mycroft-core/.venv/lib/python3.8/site-packages/gtts/tts.py", line 121, in __init__
raise ValueError("Language not supported: %s" % lang)
ValueError: Language not supported: it-it
```
The problem is that Google TTS can handle "it", but it cannot handle "it-it".
I will release a PR shortly for fixing this. (So far, Google Voice seems my only option for Italian language, so I really need that).
</issue>
<code>
[start of mycroft/tts/google_tts.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from gtts import gTTS
16
17 from .tts import TTS, TTSValidator
18
19
20 class GoogleTTS(TTS):
21 """Interface to google TTS."""
22 def __init__(self, lang, config):
23 super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
24 self), 'mp3')
25
26 def get_tts(self, sentence, wav_file):
27 """Fetch tts audio using gTTS.
28
29 Arguments:
30 sentence (str): Sentence to generate audio for
31 wav_file (str): output file path
32 Returns:
33 Tuple ((str) written file, None)
34 """
35 tts = gTTS(text=sentence, lang=self.lang)
36 tts.save(wav_file)
37 return (wav_file, None) # No phonemes
38
39
40 class GoogleTTSValidator(TTSValidator):
41 def __init__(self, tts):
42 super(GoogleTTSValidator, self).__init__(tts)
43
44 def validate_lang(self):
45 # TODO
46 pass
47
48 def validate_connection(self):
49 try:
50 gTTS(text='Hi').save(self.tts.filename)
51 except Exception:
52 raise Exception(
53 'GoogleTTS server could not be verified. Please check your '
54 'internet connection.')
55
56 def get_tts_class(self):
57 return GoogleTTS
58
[end of mycroft/tts/google_tts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py
--- a/mycroft/tts/google_tts.py
+++ b/mycroft/tts/google_tts.py
@@ -13,13 +13,19 @@
# limitations under the License.
#
from gtts import gTTS
+from gtts.lang import tts_langs
from .tts import TTS, TTSValidator
+supported_langs = tts_langs()
+
class GoogleTTS(TTS):
"""Interface to google TTS."""
def __init__(self, lang, config):
+ if lang.lower() not in supported_langs and \
+ lang[:2].lower() in supported_langs:
+ lang = lang[:2]
super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
self), 'mp3')
@@ -42,8 +48,10 @@
super(GoogleTTSValidator, self).__init__(tts)
def validate_lang(self):
- # TODO
- pass
+ lang = self.tts.lang
+ if lang.lower() not in supported_langs:
+ raise ValueError("Language not supported by gTTS: {}"
+ .format(lang))
def validate_connection(self):
try:
| {"golden_diff": "diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py\n--- a/mycroft/tts/google_tts.py\n+++ b/mycroft/tts/google_tts.py\n@@ -13,13 +13,19 @@\n # limitations under the License.\n #\n from gtts import gTTS\n+from gtts.lang import tts_langs\n \n from .tts import TTS, TTSValidator\n \n+supported_langs = tts_langs()\n+\n \n class GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n+ if lang.lower() not in supported_langs and \\\n+ lang[:2].lower() in supported_langs:\n+ lang = lang[:2]\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n \n@@ -42,8 +48,10 @@\n super(GoogleTTSValidator, self).__init__(tts)\n \n def validate_lang(self):\n- # TODO\n- pass\n+ lang = self.tts.lang\n+ if lang.lower() not in supported_langs:\n+ raise ValueError(\"Language not supported by gTTS: {}\"\n+ .format(lang))\n \n def validate_connection(self):\n try:\n", "issue": "Italian language with Google TTS\nI'm running Mycroft on Manjaro Linux, with Italian language.\r\nI tried both the \"British male\" and \"American male\" voices, and they do not speak Italian at all. So I decided to try the Google Voice.\r\nThat way, no sound is emitted. Whenever Mycroft tries to speak, I see this error in logs (file audio.log):\r\n\r\n```\r\n2020-04-13 10:45:39.632 | INFO | 195922 | mycroft.audio.speech:mute_and_speak:127 | Speak: Va uno spettacolo\r\n2020-04-13 10:45:40.070 | ERROR | 195922 | mycroft.audio.speech:handle_speak:99 | Error in mute_and_speak\r\nTraceback (most recent call last):\r\n File \"/home/luke/git/mycroft-core/mycroft/audio/speech.py\", line 95, in handle_speak\r\n mute_and_speak(chunk, ident, listen)\r\n File \"/home/luke/git/mycroft-core/mycroft/audio/speech.py\", line 129, in mute_and_speak\r\n tts.execute(utterance, ident, listen)\r\n File \"/home/luke/git/mycroft-core/mycroft/tts/tts.py\", line 337, in execute\r\n wav_file, phonemes = self.get_tts(sentence, wav_file)\r\n File \"/home/luke/git/mycroft-core/mycroft/tts/google_tts.py\", line 35, in get_tts\r\n tts = gTTS(text=sentence, lang=self.lang)\r\n File \"/home/luke/git/mycroft-core/.venv/lib/python3.8/site-packages/gtts/tts.py\", line 121, in __init__\r\n raise ValueError(\"Language not supported: %s\" % lang)\r\nValueError: Language not supported: it-it\r\n```\r\n\r\nThe problem is that Google TTS can handle \"it\", but it cannot handle \"it-it\".\r\n\r\nI will release a PR shortly for fixing this. (So far, Google Voice seems my only option for Italian language, so I really need that).\r\n\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\n\nfrom .tts import TTS, TTSValidator\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Arguments:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}]} | 1,562 | 291 |
gh_patches_debug_27633 | rasdani/github-patches | git_diff | fedora-infra__bodhi-417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Markdown unordered lists do not work in update notes
Filling the update notes under https://bodhi.fedoraproject.org/updates/new with unordered lists in markdown syntax (https://help.github.com/articles/markdown-basics/) does not work, neither in the preview nor after submitting, visit e.g. https://bodhi.fedoraproject.org/updates/phpMyAdmin-4.4.14-1.fc23 with Firefox 38 ESR from RHEL/CentOS 6. It seems to work properly with a text browser such as w3m (CSS issue?).
</issue>
<code>
[start of bodhi/ffmarkdown.py]
1 # This program is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU General Public License
3 # as published by the Free Software Foundation; either version 2
4 # of the License, or (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10 #
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
14 # USA.
15
16 """ Fedora-flavored Markdown
17
18 Author: Ralph Bean <[email protected]>
19 """
20
21 import markdown.inlinepatterns
22 import markdown.util
23 import pyramid.threadlocal
24
25
26 def user_url(name):
27 request = pyramid.threadlocal.get_current_request()
28 return request.route_url('user', name=name)
29
30
31 def bugzilla_url(idx):
32 return "https://bugzilla.redhat.com/show_bug.cgi?id=%s" % idx
33
34
35 def inject():
36 """ Hack out python-markdown to do the autolinking that we want. """
37
38 # First, make it so that bare links get automatically linkified.
39 markdown.inlinepatterns.AUTOLINK_RE = '(%s)' % '|'.join([
40 r'<(?:f|ht)tps?://[^>]*>',
41 r'\b(?:f|ht)tps?://[^)<>\s]+[^.,)<>\s]',
42 r'\bwww\.[^)<>\s]+[^.,)<>\s]',
43 r'[^(<\s]+\.(?:com|net|org)\b',
44 ])
45
46 # Second, build some Pattern objects for @mentions, #bugs, etc...
47 class MentionPattern(markdown.inlinepatterns.Pattern):
48 def handleMatch(self, m):
49 el = markdown.util.etree.Element("a")
50 name = markdown.util.AtomicString(m.group(2))
51 el.set('href', user_url(name[1:]))
52 el.text = name
53 return el
54
55 class BugzillaPattern(markdown.inlinepatterns.Pattern):
56 def handleMatch(self, m):
57 el = markdown.util.etree.Element("a")
58 idx = markdown.util.AtomicString(m.group(2))
59 el.set('href', bugzilla_url(idx[1:]))
60 el.text = idx
61 return el
62
63 MENTION_RE = r'(@\w+)'
64 BUGZILLA_RE = r'(#[0-9]{5,})'
65
66 # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns
67 original_builder = markdown.build_inlinepatterns
68
69 def extended_builder(md_instance, **kwargs):
70 patterns = original_builder(md_instance, **kwargs)
71 patterns['mention'] = MentionPattern(MENTION_RE, md_instance)
72 patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)
73 return patterns
74
75 markdown.build_inlinepatterns = extended_builder
76
[end of bodhi/ffmarkdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bodhi/ffmarkdown.py b/bodhi/ffmarkdown.py
--- a/bodhi/ffmarkdown.py
+++ b/bodhi/ffmarkdown.py
@@ -19,6 +19,7 @@
"""
import markdown.inlinepatterns
+import markdown.postprocessors
import markdown.util
import pyramid.threadlocal
@@ -63,13 +64,26 @@
MENTION_RE = r'(@\w+)'
BUGZILLA_RE = r'(#[0-9]{5,})'
+ class SurroundProcessor(markdown.postprocessors.Postprocessor):
+ def run(self, text):
+ return "<div class='markdown'>" + text + "</div>"
+
# Lastly, monkey-patch the build_inlinepatterns func to insert our patterns
- original_builder = markdown.build_inlinepatterns
+ original_pattern_builder = markdown.build_inlinepatterns
- def extended_builder(md_instance, **kwargs):
- patterns = original_builder(md_instance, **kwargs)
+ def extended_pattern_builder(md_instance, **kwargs):
+ patterns = original_pattern_builder(md_instance, **kwargs)
patterns['mention'] = MentionPattern(MENTION_RE, md_instance)
patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)
return patterns
- markdown.build_inlinepatterns = extended_builder
+ markdown.build_inlinepatterns = extended_pattern_builder
+
+ original_postprocessor_builder = markdown.build_postprocessors
+
+ def extended_postprocessor_builder(md_instance, **kwargs):
+ processors = original_postprocessor_builder(md_instance, **kwargs)
+ processors['surround'] = SurroundProcessor(md_instance)
+ return processors
+
+ markdown.build_postprocessors = extended_postprocessor_builder
| {"golden_diff": "diff --git a/bodhi/ffmarkdown.py b/bodhi/ffmarkdown.py\n--- a/bodhi/ffmarkdown.py\n+++ b/bodhi/ffmarkdown.py\n@@ -19,6 +19,7 @@\n \"\"\"\n \n import markdown.inlinepatterns\n+import markdown.postprocessors\n import markdown.util\n import pyramid.threadlocal\n \n@@ -63,13 +64,26 @@\n MENTION_RE = r'(@\\w+)'\n BUGZILLA_RE = r'(#[0-9]{5,})'\n \n+ class SurroundProcessor(markdown.postprocessors.Postprocessor):\n+ def run(self, text):\n+ return \"<div class='markdown'>\" + text + \"</div>\"\n+\n # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns\n- original_builder = markdown.build_inlinepatterns\n+ original_pattern_builder = markdown.build_inlinepatterns\n \n- def extended_builder(md_instance, **kwargs):\n- patterns = original_builder(md_instance, **kwargs)\n+ def extended_pattern_builder(md_instance, **kwargs):\n+ patterns = original_pattern_builder(md_instance, **kwargs)\n patterns['mention'] = MentionPattern(MENTION_RE, md_instance)\n patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)\n return patterns\n \n- markdown.build_inlinepatterns = extended_builder\n+ markdown.build_inlinepatterns = extended_pattern_builder\n+\n+ original_postprocessor_builder = markdown.build_postprocessors\n+\n+ def extended_postprocessor_builder(md_instance, **kwargs):\n+ processors = original_postprocessor_builder(md_instance, **kwargs)\n+ processors['surround'] = SurroundProcessor(md_instance)\n+ return processors\n+\n+ markdown.build_postprocessors = extended_postprocessor_builder\n", "issue": "Markdown unordered lists do not work in update notes\nFilling the update notes under https://bodhi.fedoraproject.org/updates/new with unordered lists in markdown syntax (https://help.github.com/articles/markdown-basics/) does not work, neither in the preview nor after submitting, visit e.g. https://bodhi.fedoraproject.org/updates/phpMyAdmin-4.4.14-1.fc23 with Firefox 38 ESR from RHEL/CentOS 6. It seems to work properly with a text browser such as w3m (CSS issue?).\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\n\"\"\" Fedora-flavored Markdown\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nimport markdown.inlinepatterns\nimport markdown.util\nimport pyramid.threadlocal\n\n\ndef user_url(name):\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bugzilla_url(idx):\n return \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\" % idx\n\n\ndef inject():\n \"\"\" Hack out python-markdown to do the autolinking that we want. \"\"\"\n\n # First, make it so that bare links get automatically linkified.\n markdown.inlinepatterns.AUTOLINK_RE = '(%s)' % '|'.join([\n r'<(?:f|ht)tps?://[^>]*>',\n r'\\b(?:f|ht)tps?://[^)<>\\s]+[^.,)<>\\s]',\n r'\\bwww\\.[^)<>\\s]+[^.,)<>\\s]',\n r'[^(<\\s]+\\.(?:com|net|org)\\b',\n ])\n\n # Second, build some Pattern objects for @mentions, #bugs, etc...\n class MentionPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m):\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n class BugzillaPattern(markdown.inlinepatterns.Pattern):\n def handleMatch(self, m):\n el = markdown.util.etree.Element(\"a\")\n idx = markdown.util.AtomicString(m.group(2))\n el.set('href', bugzilla_url(idx[1:]))\n el.text = idx\n return el\n\n MENTION_RE = r'(@\\w+)'\n BUGZILLA_RE = r'(#[0-9]{5,})'\n\n # Lastly, monkey-patch the build_inlinepatterns func to insert our patterns\n original_builder = markdown.build_inlinepatterns\n\n def extended_builder(md_instance, **kwargs):\n patterns = original_builder(md_instance, **kwargs)\n patterns['mention'] = MentionPattern(MENTION_RE, md_instance)\n patterns['bugzillas'] = BugzillaPattern(BUGZILLA_RE, md_instance)\n return patterns\n\n markdown.build_inlinepatterns = extended_builder\n", "path": "bodhi/ffmarkdown.py"}]} | 1,478 | 373 |
gh_patches_debug_177 | rasdani/github-patches | git_diff | encode__starlette-455 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
py.typed missing in published artifacts
I didn’t check for earlier versions, but at least 0.11.4 on PyPI does not include `py.typed`. I assume this is an oversight, given it is mentioned in `setup.py`?
https://github.com/encode/starlette/blob/77b84a08c1e4de0db64a197b58ac363a26c51d4f/setup.py#L49
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import re
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 with open(os.path.join(package, "__init__.py")) as f:
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 with open("README.md", encoding="utf8") as f:
23 return f.read()
24
25
26 def get_packages(package):
27 """
28 Return root package and all sub-packages.
29 """
30 return [
31 dirpath
32 for dirpath, dirnames, filenames in os.walk(package)
33 if os.path.exists(os.path.join(dirpath, "__init__.py"))
34 ]
35
36
37 setup(
38 name="starlette",
39 python_requires=">=3.6",
40 version=get_version("starlette"),
41 url="https://github.com/encode/starlette",
42 license="BSD",
43 description="The little ASGI library that shines.",
44 long_description=get_long_description(),
45 long_description_content_type="text/markdown",
46 author="Tom Christie",
47 author_email="[email protected]",
48 packages=get_packages("starlette"),
49 package_data={"starlette": ["py.typed"]},
50 data_files=[("", ["LICENSE.md"])],
51 extras_require={
52 "full": [
53 "aiofiles",
54 "asyncpg",
55 "graphene",
56 "itsdangerous",
57 "jinja2",
58 "python-multipart",
59 "pyyaml",
60 "requests",
61 "ujson",
62 ]
63 },
64 classifiers=[
65 "Development Status :: 3 - Alpha",
66 "Environment :: Web Environment",
67 "Intended Audience :: Developers",
68 "License :: OSI Approved :: BSD License",
69 "Operating System :: OS Independent",
70 "Topic :: Internet :: WWW/HTTP",
71 "Programming Language :: Python :: 3",
72 "Programming Language :: Python :: 3.6",
73 "Programming Language :: Python :: 3.7",
74 ],
75 )
76
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,4 +72,5 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
+ zip_safe=False,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,4 +72,5 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n+ zip_safe=False,\n )\n", "issue": "py.typed missing in published artifacts\nI didn\u2019t check for earlier versions, but at least 0.11.4 on PyPI does not include `py.typed`. I assume this is an oversight, given it is mentioned in `setup.py`?\r\n\r\nhttps://github.com/encode/starlette/blob/77b84a08c1e4de0db64a197b58ac363a26c51d4f/setup.py#L49\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__init__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n return f.read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nsetup(\n name=\"starlette\",\n python_requires=\">=3.6\",\n version=get_version(\"starlette\"),\n url=\"https://github.com/encode/starlette\",\n license=\"BSD\",\n description=\"The little ASGI library that shines.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"starlette\"),\n package_data={\"starlette\": [\"py.typed\"]},\n data_files=[(\"\", [\"LICENSE.md\"])],\n extras_require={\n \"full\": [\n \"aiofiles\",\n \"asyncpg\",\n \"graphene\",\n \"itsdangerous\",\n \"jinja2\",\n \"python-multipart\",\n \"pyyaml\",\n \"requests\",\n \"ujson\",\n ]\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n", "path": "setup.py"}]} | 1,247 | 66 |
gh_patches_debug_1159 | rasdani/github-patches | git_diff | nltk__nltk-1274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tox fails with "ERROR: Failure: ImportError (No module named 'six')"
When I try to run the tests with Tox (on Ubuntu) from within a local clone of the repo, it manages to install the dependencies but blows up when trying to import things from within NLTK.
I imagine I can work around this by figuring out how to manually run just the tests I care about, but it's inconvenient.
I'm not sure whether I'm doing something dumb or whether the Tox setup is broken; if the former, the CONTRIBUTING docs should probably mention what needs to be done besides just running Tox; if the latter, it should probably be fixed.
Here's the full output (had to pastebin it due to GitHub's post length limit):
http://pastebin.com/ENuCLnv6
</issue>
<code>
[start of nltk/tokenize/api.py]
1 # Natural Language Toolkit: Tokenizer Interface
2 #
3 # Copyright (C) 2001-2015 NLTK Project
4 # Author: Edward Loper <[email protected]>
5 # Steven Bird <[email protected]>
6 # URL: <http://nltk.org/>
7 # For license information, see LICENSE.TXT
8
9 """
10 Tokenizer Interface
11 """
12
13 from abc import ABCMeta, abstractmethod
14 from six import add_metaclass
15
16 from nltk.internals import overridden
17 from nltk.tokenize.util import string_span_tokenize
18
19 @add_metaclass(ABCMeta)
20 class TokenizerI(object):
21 """
22 A processing interface for tokenizing a string.
23 Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).
24 """
25 @abstractmethod
26 def tokenize(self, s):
27 """
28 Return a tokenized copy of *s*.
29
30 :rtype: list of str
31 """
32 if overridden(self.tokenize_sents):
33 return self.tokenize_sents([s])[0]
34
35 def span_tokenize(self, s):
36 """
37 Identify the tokens using integer offsets ``(start_i, end_i)``,
38 where ``s[start_i:end_i]`` is the corresponding token.
39
40 :rtype: iter(tuple(int, int))
41 """
42 raise NotImplementedError()
43
44 def tokenize_sents(self, strings):
45 """
46 Apply ``self.tokenize()`` to each element of ``strings``. I.e.:
47
48 return [self.tokenize(s) for s in strings]
49
50 :rtype: list(list(str))
51 """
52 return [self.tokenize(s) for s in strings]
53
54 def span_tokenize_sents(self, strings):
55 """
56 Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:
57
58 return [self.span_tokenize(s) for s in strings]
59
60 :rtype: iter(list(tuple(int, int)))
61 """
62 for s in strings:
63 yield list(self.span_tokenize(s))
64
65
66 class StringTokenizer(TokenizerI):
67 """A tokenizer that divides a string into substrings by splitting
68 on the specified string (defined in subclasses).
69 """
70
71 def tokenize(self, s):
72 return s.split(self._string)
73
74 def span_tokenize(self, s):
75 for span in string_span_tokenize(s, self._string):
76 yield span
77
78
79
[end of nltk/tokenize/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nltk/tokenize/api.py b/nltk/tokenize/api.py
--- a/nltk/tokenize/api.py
+++ b/nltk/tokenize/api.py
@@ -11,7 +11,7 @@
"""
from abc import ABCMeta, abstractmethod
-from six import add_metaclass
+from nltk.six import add_metaclass
from nltk.internals import overridden
from nltk.tokenize.util import string_span_tokenize
| {"golden_diff": "diff --git a/nltk/tokenize/api.py b/nltk/tokenize/api.py\n--- a/nltk/tokenize/api.py\n+++ b/nltk/tokenize/api.py\n@@ -11,7 +11,7 @@\n \"\"\"\n \n from abc import ABCMeta, abstractmethod\n-from six import add_metaclass\n+from nltk.six import add_metaclass\n \n from nltk.internals import overridden\n from nltk.tokenize.util import string_span_tokenize\n", "issue": "Tox fails with \"ERROR: Failure: ImportError (No module named 'six')\"\nWhen I try to run the tests with Tox (on Ubuntu) from within a local clone of the repo, it manages to install the dependencies but blows up when trying to import things from within NLTK.\n\nI imagine I can work around this by figuring out how to manually run just the tests I care about, but it's inconvenient.\n\nI'm not sure whether I'm doing something dumb or whether the Tox setup is broken; if the former, the CONTRIBUTING docs should probably mention what needs to be done besides just running Tox; if the latter, it should probably be fixed.\n\nHere's the full output (had to pastebin it due to GitHub's post length limit):\n\nhttp://pastebin.com/ENuCLnv6\n\n", "before_files": [{"content": "# Natural Language Toolkit: Tokenizer Interface\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nTokenizer Interface\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom six import add_metaclass\n\nfrom nltk.internals import overridden\nfrom nltk.tokenize.util import string_span_tokenize\n\n@add_metaclass(ABCMeta)\nclass TokenizerI(object):\n \"\"\"\n A processing interface for tokenizing a string.\n Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both).\n \"\"\"\n @abstractmethod\n def tokenize(self, s):\n \"\"\"\n Return a tokenized copy of *s*.\n\n :rtype: list of str\n \"\"\"\n if overridden(self.tokenize_sents):\n return self.tokenize_sents([s])[0]\n\n def span_tokenize(self, s):\n \"\"\"\n Identify the tokens using integer offsets ``(start_i, end_i)``,\n where ``s[start_i:end_i]`` is the corresponding token.\n\n :rtype: iter(tuple(int, int))\n \"\"\"\n raise NotImplementedError()\n\n def tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.tokenize(s) for s in strings]\n\n :rtype: list(list(str))\n \"\"\"\n return [self.tokenize(s) for s in strings]\n\n def span_tokenize_sents(self, strings):\n \"\"\"\n Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:\n\n return [self.span_tokenize(s) for s in strings]\n\n :rtype: iter(list(tuple(int, int)))\n \"\"\"\n for s in strings:\n yield list(self.span_tokenize(s))\n\n\nclass StringTokenizer(TokenizerI):\n \"\"\"A tokenizer that divides a string into substrings by splitting\n on the specified string (defined in subclasses).\n \"\"\"\n\n def tokenize(self, s):\n return s.split(self._string)\n\n def span_tokenize(self, s):\n for span in string_span_tokenize(s, self._string):\n yield span\n\n\n", "path": "nltk/tokenize/api.py"}]} | 1,352 | 93 |
gh_patches_debug_18844 | rasdani/github-patches | git_diff | Gallopsled__pwntools-2345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pwn constgrep a throws an exception
Ugh, this seems wrong:
```
root@pwndbg:~# pwn constgrep a
Traceback (most recent call last):
File "/usr/local/bin/pwn", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/main.py", line 58, in main
commands[args.command](args)
File "/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/constgrep.py", line 110, in main
for _, k in sorted(out):
TypeError: '<' not supported between instances of 'Constant' and 'type'
```
</issue>
<code>
[start of pwnlib/commandline/constgrep.py]
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import argparse
5 import functools
6 import re
7
8 import pwnlib.args
9 pwnlib.args.free_form = False
10
11 from pwn import *
12 from pwnlib.commandline import common
13
14 p = common.parser_commands.add_parser(
15 'constgrep',
16 help = "Looking up constants from header files.\n\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'",
17 description = "Looking up constants from header files.\n\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'",
18 formatter_class = argparse.RawDescriptionHelpFormatter,
19 )
20
21 p.add_argument(
22 '-e', '--exact',
23 action='store_true',
24 help='Do an exact match for a constant instead of searching for a regex',
25 )
26
27 p.add_argument(
28 'regex',
29 help='The regex matching constant you want to find',
30 )
31
32 p.add_argument(
33 'constant',
34 nargs = '?',
35 default = None,
36 type = safeeval.expr,
37 help = 'The constant to find',
38 )
39
40 p.add_argument(
41 '-i', '--case-insensitive',
42 action = 'store_true',
43 help = 'Search case insensitive',
44 )
45
46 p.add_argument(
47 '-m', '--mask-mode',
48 action = 'store_true',
49 help = 'Instead of searching for a specific constant value, search for values not containing strictly less bits that the given value.',
50 )
51
52 p.add_argument(
53 '-c', '--context',
54 metavar = 'arch_or_os',
55 action = 'append',
56 type = common.context_arg,
57 choices = common.choices,
58 help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,
59 )
60
61 def main(args):
62 if args.exact:
63 # This is the simple case
64 print(cpp(args.regex).strip())
65 else:
66 # New we search in the right module.
67 # But first: We find the right module
68 if context.os == 'freebsd':
69 mod = constants.freebsd
70 else:
71 mod = getattr(getattr(constants, context.os), context.arch)
72
73 # Compile the given regex, for optimized lookup
74 if args.case_insensitive:
75 matcher = re.compile(args.regex, re.IGNORECASE)
76 else:
77 matcher = re.compile(args.regex)
78
79 # The found matching constants and the length of the longest string
80 out = []
81 maxlen = 0
82
83 constant = args.constant
84
85 for k in dir(mod):
86 # No python stuff
87 if k.endswith('__') and k.startswith('__'):
88 continue
89
90 # Run the regex
91 if not matcher.search(k):
92 continue
93
94 # Check the constant
95 if constant is not None:
96 val = getattr(mod, k)
97 if args.mask_mode:
98 if constant & val != val:
99 continue
100 else:
101 if constant != val:
102 continue
103
104 # Append it
105 out.append((getattr(mod, k), k))
106 maxlen = max(len(k), maxlen)
107
108 # Output all matching constants
109 for _, k in sorted(out):
110 print('#define %s %s' % (k.ljust(maxlen), cpp(k).strip()))
111
112 # If we are in match_mode, then try to find a combination of
113 # constants that yield the exact given value
114 # We do not want to find combinations using the value 0.
115 if constant and args.mask_mode:
116 mask = constant
117 good = []
118 out = [(v, k) for v, k in out if v != 0]
119
120 while mask and out:
121 cur = out.pop()
122 mask &= ~cur[0]
123 good.append(cur)
124
125 out = [(v, k) for v, k in out if mask & v == v]
126
127 if functools.reduce(lambda x, cur: x | cur[0], good, 0) == constant:
128 print('')
129 print('(%s) == %s' % (' | '.join(k for v, k in good), args.constant))
130
131 if __name__ == '__main__':
132 pwnlib.commandline.common.main(__file__)
133
[end of pwnlib/commandline/constgrep.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/commandline/constgrep.py b/pwnlib/commandline/constgrep.py
--- a/pwnlib/commandline/constgrep.py
+++ b/pwnlib/commandline/constgrep.py
@@ -91,9 +91,13 @@
if not matcher.search(k):
continue
+ # Check if the value has proper type
+ val = getattr(mod, k)
+ if not isinstance(val, pwnlib.constants.constant.Constant):
+ continue
+
# Check the constant
if constant is not None:
- val = getattr(mod, k)
if args.mask_mode:
if constant & val != val:
continue
@@ -102,7 +106,7 @@
continue
# Append it
- out.append((getattr(mod, k), k))
+ out.append((val, k))
maxlen = max(len(k), maxlen)
# Output all matching constants
| {"golden_diff": "diff --git a/pwnlib/commandline/constgrep.py b/pwnlib/commandline/constgrep.py\n--- a/pwnlib/commandline/constgrep.py\n+++ b/pwnlib/commandline/constgrep.py\n@@ -91,9 +91,13 @@\n if not matcher.search(k):\n continue\n \n+ # Check if the value has proper type\n+ val = getattr(mod, k)\n+ if not isinstance(val, pwnlib.constants.constant.Constant):\n+ continue\n+\n # Check the constant\n if constant is not None:\n- val = getattr(mod, k)\n if args.mask_mode:\n if constant & val != val:\n continue\n@@ -102,7 +106,7 @@\n continue\n \n # Append it\n- out.append((getattr(mod, k), k))\n+ out.append((val, k))\n maxlen = max(len(k), maxlen)\n \n # Output all matching constants\n", "issue": "pwn constgrep a throws an exception\nUgh, this seems wrong:\r\n\r\n```\r\nroot@pwndbg:~# pwn constgrep a\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/pwn\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/main.py\", line 58, in main\r\n commands[args.command](args)\r\n File \"/usr/local/lib/python3.10/dist-packages/pwnlib/commandline/constgrep.py\", line 110, in main\r\n for _, k in sorted(out):\r\nTypeError: '<' not supported between instances of 'Constant' and 'type'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport argparse\nimport functools\nimport re\n\nimport pwnlib.args\npwnlib.args.free_form = False\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\np = common.parser_commands.add_parser(\n 'constgrep',\n help = \"Looking up constants from header files.\\n\\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'\",\n description = \"Looking up constants from header files.\\n\\nExample: constgrep -c freebsd -m ^PROT_ '3 + 4'\",\n formatter_class = argparse.RawDescriptionHelpFormatter,\n)\n\np.add_argument(\n '-e', '--exact',\n action='store_true',\n help='Do an exact match for a constant instead of searching for a regex',\n)\n\np.add_argument(\n 'regex',\n help='The regex matching constant you want to find',\n)\n\np.add_argument(\n 'constant',\n nargs = '?',\n default = None,\n type = safeeval.expr,\n help = 'The constant to find',\n)\n\np.add_argument(\n '-i', '--case-insensitive',\n action = 'store_true',\n help = 'Search case insensitive',\n)\n\np.add_argument(\n '-m', '--mask-mode',\n action = 'store_true',\n help = 'Instead of searching for a specific constant value, search for values not containing strictly less bits that the given value.',\n)\n\np.add_argument(\n '-c', '--context',\n metavar = 'arch_or_os',\n action = 'append',\n type = common.context_arg,\n choices = common.choices,\n help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,\n)\n\ndef main(args):\n if args.exact:\n # This is the simple case\n print(cpp(args.regex).strip())\n else:\n # New we search in the right module.\n # But first: We find the right module\n if context.os == 'freebsd':\n mod = constants.freebsd\n else:\n mod = getattr(getattr(constants, context.os), context.arch)\n\n # Compile the given regex, for optimized lookup\n if args.case_insensitive:\n matcher = re.compile(args.regex, re.IGNORECASE)\n else:\n matcher = re.compile(args.regex)\n\n # The found matching constants and the length of the longest string\n out = []\n maxlen = 0\n\n constant = args.constant\n\n for k in dir(mod):\n # No python stuff\n if k.endswith('__') and k.startswith('__'):\n continue\n\n # Run the regex\n if not matcher.search(k):\n continue\n\n # Check the constant\n if constant is not None:\n val = getattr(mod, k)\n if args.mask_mode:\n if constant & val != val:\n continue\n else:\n if constant != val:\n continue\n\n # Append it\n out.append((getattr(mod, k), k))\n maxlen = max(len(k), maxlen)\n\n # Output all matching constants\n for _, k in sorted(out):\n print('#define %s %s' % (k.ljust(maxlen), cpp(k).strip()))\n\n # If we are in match_mode, then try to find a combination of\n # constants that yield the exact given value\n # We do not want to find combinations using the value 0.\n if constant and args.mask_mode:\n mask = constant\n good = []\n out = [(v, k) for v, k in out if v != 0]\n\n while mask and out:\n cur = out.pop()\n mask &= ~cur[0]\n good.append(cur)\n\n out = [(v, k) for v, k in out if mask & v == v]\n\n if functools.reduce(lambda x, cur: x | cur[0], good, 0) == constant:\n print('')\n print('(%s) == %s' % (' | '.join(k for v, k in good), args.constant))\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/constgrep.py"}]} | 1,911 | 210 |
gh_patches_debug_13831 | rasdani/github-patches | git_diff | pypa__setuptools-3705 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] release v65.6.0 breaks packages downstream due to removal of `distutils.log.Log`
### setuptools version
65.6.0
### Python version
Python 3.10
### OS
Ubuntu
### Additional environment information
_No response_
### Description
The `distutils.log.Log` class was removed in https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef. This causes popular packages downstream, such as `numpy`, to fail: see https://github.com/numpy/numpy/issues/22623
### Expected behavior
The module `distutils.log` module was not officially marked as deprecated even though https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef added to the docstring that the module is `Retained for compatibility and should not be used.`. It would be great if the removed class could be reinstated and a deprecation pathway be provided.
### How to Reproduce
1. `pip install setuptools==65.6.0`
2. `from numpy.distutils import Log`
### Output
```
In [4]: from numpy.distutils import Log
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-4-f8e71815afcd> in <module>
----> 1 from numpy.distutils import Log
~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/__init__.py in <module>
22 # Must import local ccompiler ASAP in order to get
23 # customized CCompiler.spawn effective.
---> 24 from . import ccompiler
25 from . import unixccompiler
26
~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/ccompiler.py in <module>
18 from distutils.version import LooseVersion
19
---> 20 from numpy.distutils import log
21 from numpy.distutils.exec_command import (
22 filepath_from_subprocess_output, forward_bytes_to_stdout
~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/log.py in <module>
2 import sys
3 from distutils.log import * # noqa: F403
----> 4 from distutils.log import Log as old_Log
5 from distutils.log import _global_log
6
ImportError: cannot import name 'Log' from 'distutils.log' (/home/sph/.virtualenvs/aiida_dev/lib/python3.9/site-packages/setuptools/_distutils/log.py)
```
</issue>
<code>
[start of setuptools/_distutils/log.py]
1 """
2 A simple log mechanism styled after PEP 282.
3
4 Retained for compatibility and should not be used.
5 """
6
7 import logging
8
9 from ._log import log as _global_log
10
11
12 DEBUG = logging.DEBUG
13 INFO = logging.INFO
14 WARN = logging.WARN
15 ERROR = logging.ERROR
16 FATAL = logging.FATAL
17
18 log = _global_log.log
19 debug = _global_log.debug
20 info = _global_log.info
21 warn = _global_log.warning
22 error = _global_log.error
23 fatal = _global_log.fatal
24
25
26 def set_threshold(level):
27 orig = _global_log.level
28 _global_log.setLevel(level)
29 return orig
30
31
32 def set_verbosity(v):
33 if v <= 0:
34 set_threshold(logging.WARN)
35 elif v == 1:
36 set_threshold(logging.INFO)
37 elif v >= 2:
38 set_threshold(logging.DEBUG)
39
[end of setuptools/_distutils/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/_distutils/log.py b/setuptools/_distutils/log.py
--- a/setuptools/_distutils/log.py
+++ b/setuptools/_distutils/log.py
@@ -5,6 +5,7 @@
"""
import logging
+import warnings
from ._log import log as _global_log
@@ -36,3 +37,21 @@
set_threshold(logging.INFO)
elif v >= 2:
set_threshold(logging.DEBUG)
+
+
+class Log(logging.Logger):
+ """distutils.log.Log is deprecated, please use an alternative from `logging`."""
+
+ def __init__(self, threshold=WARN):
+ warnings.warn(Log.__doc__) # avoid DeprecationWarning to ensure warn is shown
+ super().__init__(__name__, level=threshold)
+
+ @property
+ def threshold(self):
+ return self.level
+
+ @threshold.setter
+ def threshold(self, level):
+ self.setLevel(level)
+
+ warn = logging.Logger.warning
| {"golden_diff": "diff --git a/setuptools/_distutils/log.py b/setuptools/_distutils/log.py\n--- a/setuptools/_distutils/log.py\n+++ b/setuptools/_distutils/log.py\n@@ -5,6 +5,7 @@\n \"\"\"\n \n import logging\n+import warnings\n \n from ._log import log as _global_log\n \n@@ -36,3 +37,21 @@\n set_threshold(logging.INFO)\n elif v >= 2:\n set_threshold(logging.DEBUG)\n+\n+\n+class Log(logging.Logger):\n+ \"\"\"distutils.log.Log is deprecated, please use an alternative from `logging`.\"\"\"\n+\n+ def __init__(self, threshold=WARN):\n+ warnings.warn(Log.__doc__) # avoid DeprecationWarning to ensure warn is shown\n+ super().__init__(__name__, level=threshold)\n+\n+ @property\n+ def threshold(self):\n+ return self.level\n+\n+ @threshold.setter\n+ def threshold(self, level):\n+ self.setLevel(level)\n+\n+ warn = logging.Logger.warning\n", "issue": "[BUG] release v65.6.0 breaks packages downstream due to removal of `distutils.log.Log`\n### setuptools version\n\n65.6.0\n\n### Python version\n\nPython 3.10\n\n### OS\n\nUbuntu\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nThe `distutils.log.Log` class was removed in https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef. This causes popular packages downstream, such as `numpy`, to fail: see https://github.com/numpy/numpy/issues/22623\n\n### Expected behavior\n\nThe module `distutils.log` module was not officially marked as deprecated even though https://github.com/pypa/setuptools/commit/74652cabdeaacadc76ccf126563bed8ee2ccf3ef added to the docstring that the module is `Retained for compatibility and should not be used.`. It would be great if the removed class could be reinstated and a deprecation pathway be provided.\n\n### How to Reproduce\n\n1. `pip install setuptools==65.6.0`\r\n2. `from numpy.distutils import Log`\n\n### Output\n\n```\r\nIn [4]: from numpy.distutils import Log\r\n---------------------------------------------------------------------------\r\nImportError Traceback (most recent call last)\r\n<ipython-input-4-f8e71815afcd> in <module>\r\n----> 1 from numpy.distutils import Log\r\n\r\n~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/__init__.py in <module>\r\n 22 # Must import local ccompiler ASAP in order to get\r\n 23 # customized CCompiler.spawn effective.\r\n---> 24 from . import ccompiler\r\n 25 from . import unixccompiler\r\n 26 \r\n\r\n~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/ccompiler.py in <module>\r\n 18 from distutils.version import LooseVersion\r\n 19 \r\n---> 20 from numpy.distutils import log\r\n 21 from numpy.distutils.exec_command import (\r\n 22 filepath_from_subprocess_output, forward_bytes_to_stdout\r\n\r\n~/.virtualenvs/aiida_dev/lib/python3.9/site-packages/numpy/distutils/log.py in <module>\r\n 2 import sys\r\n 3 from distutils.log import * # noqa: F403\r\n----> 4 from distutils.log import Log as old_Log\r\n 5 from distutils.log import _global_log\r\n 6 \r\n\r\nImportError: cannot import name 'Log' from 'distutils.log' (/home/sph/.virtualenvs/aiida_dev/lib/python3.9/site-packages/setuptools/_distutils/log.py)\r\n```\n", "before_files": [{"content": "\"\"\"\nA simple log mechanism styled after PEP 282.\n\nRetained for compatibility and should not be used.\n\"\"\"\n\nimport logging\n\nfrom ._log import log as _global_log\n\n\nDEBUG = logging.DEBUG\nINFO = logging.INFO\nWARN = logging.WARN\nERROR = logging.ERROR\nFATAL = logging.FATAL\n\nlog = _global_log.log\ndebug = _global_log.debug\ninfo = _global_log.info\nwarn = _global_log.warning\nerror = _global_log.error\nfatal = _global_log.fatal\n\n\ndef set_threshold(level):\n orig = _global_log.level\n _global_log.setLevel(level)\n return orig\n\n\ndef set_verbosity(v):\n if v <= 0:\n set_threshold(logging.WARN)\n elif v == 1:\n set_threshold(logging.INFO)\n elif v >= 2:\n set_threshold(logging.DEBUG)\n", "path": "setuptools/_distutils/log.py"}]} | 1,421 | 224 |
gh_patches_debug_40283 | rasdani/github-patches | git_diff | enthought__chaco-598 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Serializable mixin should be removed
The class is not used by any current code, appears to be broken, and the problem it was meant to solve (selection of which traits to pickle) is better solved via the use of `transient` traits metadata.
Technically this is a backwards-incompatible change, however.
</issue>
<code>
[start of chaco/serializable.py]
1 """ Defines the Serializable mix-in class.
2 """
3
4
5 class Serializable(object):
6 """
7 Mix-in class to help serialization. Serializes just the attributes in
8 **_pickles**.
9
10 This mix-in works best when all the classes in a hierarchy subclass
11 from it. It solves the problem of allowing each class to specify
12 its own set of attributes to pickle and attributes to ignore, without
13 having to also implement __getstate__ and __setstate__.
14 """
15
16 # The basic list of attributes to save. These get set without firing
17 # any trait events.
18 _pickles = None
19
20 # A list of the parents of this class that will be searched for their
21 # list of _pickles. Only the parents in this list that inherit from
22 # Serialized will be pickled. The process stops at the first item in
23 # __pickle_parents that is not a subclass of Serialized.
24 #
25 # This is a double-underscore variable so that Python's attribute name
26 # will shield base class
27 # __pickle_parents = None
28
29 def _get_pickle_parents(self):
30 """
31 Subclasses can override this method to return the list of base
32 classes they want to have the serializer look at.
33 """
34 bases = []
35 for cls in self.__class__.__mro__:
36 if cls is Serializable:
37 # don't add Serializable to the list of parents
38 continue
39 elif issubclass(cls, Serializable):
40 bases.append(cls)
41 else:
42 break
43 return bases
44
45 def _pre_save(self):
46 """
47 Called before __getstate__ to give the object a chance to tidy up
48 and get ready to be saved. This usually also calls the superclass.
49 """
50
51 def _post_load(self):
52 """
53 Called after __setstate__ finishes restoring the state on the object.
54 This method usually needs to include a call to super(cls, self)._post_load().
55 Avoid explicitly calling a parent class by name, because in general
56 you want post_load() to happen in the same order as MRO, which super()
57 does automatically.
58 """
59 print("Serializable._post_load")
60 pass
61
62 def _do_setstate(self, state):
63 """
64 Called by __setstate__ to allow the subclass to set its state in a
65 special way.
66
67 Subclasses should override this instead of Serializable.__setstate__
68 because we need Serializable's implementation to call _post_load() after
69 all the _do_setstate() have returned.)
70 """
71 # Quietly set all the attributes
72 self.trait_setq(**state)
73
74 # ------------------------------------------------------------------------
75 # Private methods
76 # ------------------------------------------------------------------------
77
78
79 # def __getstate__(self):
80 # #idstring = self.__class__.__name__ + " id=" + str(id(self))
81 # # Give the object a chance to tidy up before saving
82 # self._pre_save()
83 #
84 # # Get the attributes that this class needs to serialize. We do this by
85 # # marching up the list of parent classes in _pickle_parents and getting
86 # # their lists of _pickles.
87 # all_pickles = Set()
88 # pickle_parents = self._get_pickle_parents()
89 # for parent_class in pickle_parents:
90 # all_pickles.update(parent_class._pickles)
91 #
92 # if self._pickles is not None:
93 # all_pickles.update(self._pickles)
94 #
95 # state = {}
96 # for attrib in all_pickles:
97 # state[attrib] = getattr(self, attrib)
98 #
99 # print('<<<<<<<<<<<<<', self)
100 # for key,value in state.items():
101 # print(key, type(value))
102 # print '>>>>>>>>>>>>>'
103 #
104 # return state
105
106 # ~ def __setstate__(self, state):
107 # ~ idstring = self.__class__.__name__ + " id=" + str(id(self))
108 # ~ self._do_setstate(state)
109 # ~ self._post_load()
110 # ~ return
111
[end of chaco/serializable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chaco/serializable.py b/chaco/serializable.py
deleted file mode 100644
--- a/chaco/serializable.py
+++ /dev/null
@@ -1,110 +0,0 @@
-""" Defines the Serializable mix-in class.
-"""
-
-
-class Serializable(object):
- """
- Mix-in class to help serialization. Serializes just the attributes in
- **_pickles**.
-
- This mix-in works best when all the classes in a hierarchy subclass
- from it. It solves the problem of allowing each class to specify
- its own set of attributes to pickle and attributes to ignore, without
- having to also implement __getstate__ and __setstate__.
- """
-
- # The basic list of attributes to save. These get set without firing
- # any trait events.
- _pickles = None
-
- # A list of the parents of this class that will be searched for their
- # list of _pickles. Only the parents in this list that inherit from
- # Serialized will be pickled. The process stops at the first item in
- # __pickle_parents that is not a subclass of Serialized.
- #
- # This is a double-underscore variable so that Python's attribute name
- # will shield base class
- # __pickle_parents = None
-
- def _get_pickle_parents(self):
- """
- Subclasses can override this method to return the list of base
- classes they want to have the serializer look at.
- """
- bases = []
- for cls in self.__class__.__mro__:
- if cls is Serializable:
- # don't add Serializable to the list of parents
- continue
- elif issubclass(cls, Serializable):
- bases.append(cls)
- else:
- break
- return bases
-
- def _pre_save(self):
- """
- Called before __getstate__ to give the object a chance to tidy up
- and get ready to be saved. This usually also calls the superclass.
- """
-
- def _post_load(self):
- """
- Called after __setstate__ finishes restoring the state on the object.
- This method usually needs to include a call to super(cls, self)._post_load().
- Avoid explicitly calling a parent class by name, because in general
- you want post_load() to happen in the same order as MRO, which super()
- does automatically.
- """
- print("Serializable._post_load")
- pass
-
- def _do_setstate(self, state):
- """
- Called by __setstate__ to allow the subclass to set its state in a
- special way.
-
- Subclasses should override this instead of Serializable.__setstate__
- because we need Serializable's implementation to call _post_load() after
- all the _do_setstate() have returned.)
- """
- # Quietly set all the attributes
- self.trait_setq(**state)
-
- # ------------------------------------------------------------------------
- # Private methods
- # ------------------------------------------------------------------------
-
-
-# def __getstate__(self):
-# #idstring = self.__class__.__name__ + " id=" + str(id(self))
-# # Give the object a chance to tidy up before saving
-# self._pre_save()
-#
-# # Get the attributes that this class needs to serialize. We do this by
-# # marching up the list of parent classes in _pickle_parents and getting
-# # their lists of _pickles.
-# all_pickles = Set()
-# pickle_parents = self._get_pickle_parents()
-# for parent_class in pickle_parents:
-# all_pickles.update(parent_class._pickles)
-#
-# if self._pickles is not None:
-# all_pickles.update(self._pickles)
-#
-# state = {}
-# for attrib in all_pickles:
-# state[attrib] = getattr(self, attrib)
-#
-# print('<<<<<<<<<<<<<', self)
-# for key,value in state.items():
-# print(key, type(value))
-# print '>>>>>>>>>>>>>'
-#
-# return state
-
-# ~ def __setstate__(self, state):
-# ~ idstring = self.__class__.__name__ + " id=" + str(id(self))
-# ~ self._do_setstate(state)
-# ~ self._post_load()
-# ~ return
| {"golden_diff": "diff --git a/chaco/serializable.py b/chaco/serializable.py\ndeleted file mode 100644\n--- a/chaco/serializable.py\n+++ /dev/null\n@@ -1,110 +0,0 @@\n-\"\"\" Defines the Serializable mix-in class.\n-\"\"\"\n-\n-\n-class Serializable(object):\n- \"\"\"\n- Mix-in class to help serialization. Serializes just the attributes in\n- **_pickles**.\n-\n- This mix-in works best when all the classes in a hierarchy subclass\n- from it. It solves the problem of allowing each class to specify\n- its own set of attributes to pickle and attributes to ignore, without\n- having to also implement __getstate__ and __setstate__.\n- \"\"\"\n-\n- # The basic list of attributes to save. These get set without firing\n- # any trait events.\n- _pickles = None\n-\n- # A list of the parents of this class that will be searched for their\n- # list of _pickles. Only the parents in this list that inherit from\n- # Serialized will be pickled. The process stops at the first item in\n- # __pickle_parents that is not a subclass of Serialized.\n- #\n- # This is a double-underscore variable so that Python's attribute name\n- # will shield base class\n- # __pickle_parents = None\n-\n- def _get_pickle_parents(self):\n- \"\"\"\n- Subclasses can override this method to return the list of base\n- classes they want to have the serializer look at.\n- \"\"\"\n- bases = []\n- for cls in self.__class__.__mro__:\n- if cls is Serializable:\n- # don't add Serializable to the list of parents\n- continue\n- elif issubclass(cls, Serializable):\n- bases.append(cls)\n- else:\n- break\n- return bases\n-\n- def _pre_save(self):\n- \"\"\"\n- Called before __getstate__ to give the object a chance to tidy up\n- and get ready to be saved. This usually also calls the superclass.\n- \"\"\"\n-\n- def _post_load(self):\n- \"\"\"\n- Called after __setstate__ finishes restoring the state on the object.\n- This method usually needs to include a call to super(cls, self)._post_load().\n- Avoid explicitly calling a parent class by name, because in general\n- you want post_load() to happen in the same order as MRO, which super()\n- does automatically.\n- \"\"\"\n- print(\"Serializable._post_load\")\n- pass\n-\n- def _do_setstate(self, state):\n- \"\"\"\n- Called by __setstate__ to allow the subclass to set its state in a\n- special way.\n-\n- Subclasses should override this instead of Serializable.__setstate__\n- because we need Serializable's implementation to call _post_load() after\n- all the _do_setstate() have returned.)\n- \"\"\"\n- # Quietly set all the attributes\n- self.trait_setq(**state)\n-\n- # ------------------------------------------------------------------------\n- # Private methods\n- # ------------------------------------------------------------------------\n-\n-\n-# def __getstate__(self):\n-# #idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n-# # Give the object a chance to tidy up before saving\n-# self._pre_save()\n-#\n-# # Get the attributes that this class needs to serialize. We do this by\n-# # marching up the list of parent classes in _pickle_parents and getting\n-# # their lists of _pickles.\n-# all_pickles = Set()\n-# pickle_parents = self._get_pickle_parents()\n-# for parent_class in pickle_parents:\n-# all_pickles.update(parent_class._pickles)\n-#\n-# if self._pickles is not None:\n-# all_pickles.update(self._pickles)\n-#\n-# state = {}\n-# for attrib in all_pickles:\n-# state[attrib] = getattr(self, attrib)\n-#\n-# print('<<<<<<<<<<<<<', self)\n-# for key,value in state.items():\n-# print(key, type(value))\n-# print '>>>>>>>>>>>>>'\n-#\n-# return state\n-\n-# ~ def __setstate__(self, state):\n-# ~ idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n-# ~ self._do_setstate(state)\n-# ~ self._post_load()\n-# ~ return\n", "issue": "Serializable mixin should be removed\nThe class is not used by any current code, appears to be broken, and the problem it was meant to solve (selection of which traits to pickle) is better solved via the use of `transient` traits metadata.\n\nTechnically this is a backwards-incompatible change, however.\n\n", "before_files": [{"content": "\"\"\" Defines the Serializable mix-in class.\n\"\"\"\n\n\nclass Serializable(object):\n \"\"\"\n Mix-in class to help serialization. Serializes just the attributes in\n **_pickles**.\n\n This mix-in works best when all the classes in a hierarchy subclass\n from it. It solves the problem of allowing each class to specify\n its own set of attributes to pickle and attributes to ignore, without\n having to also implement __getstate__ and __setstate__.\n \"\"\"\n\n # The basic list of attributes to save. These get set without firing\n # any trait events.\n _pickles = None\n\n # A list of the parents of this class that will be searched for their\n # list of _pickles. Only the parents in this list that inherit from\n # Serialized will be pickled. The process stops at the first item in\n # __pickle_parents that is not a subclass of Serialized.\n #\n # This is a double-underscore variable so that Python's attribute name\n # will shield base class\n # __pickle_parents = None\n\n def _get_pickle_parents(self):\n \"\"\"\n Subclasses can override this method to return the list of base\n classes they want to have the serializer look at.\n \"\"\"\n bases = []\n for cls in self.__class__.__mro__:\n if cls is Serializable:\n # don't add Serializable to the list of parents\n continue\n elif issubclass(cls, Serializable):\n bases.append(cls)\n else:\n break\n return bases\n\n def _pre_save(self):\n \"\"\"\n Called before __getstate__ to give the object a chance to tidy up\n and get ready to be saved. This usually also calls the superclass.\n \"\"\"\n\n def _post_load(self):\n \"\"\"\n Called after __setstate__ finishes restoring the state on the object.\n This method usually needs to include a call to super(cls, self)._post_load().\n Avoid explicitly calling a parent class by name, because in general\n you want post_load() to happen in the same order as MRO, which super()\n does automatically.\n \"\"\"\n print(\"Serializable._post_load\")\n pass\n\n def _do_setstate(self, state):\n \"\"\"\n Called by __setstate__ to allow the subclass to set its state in a\n special way.\n\n Subclasses should override this instead of Serializable.__setstate__\n because we need Serializable's implementation to call _post_load() after\n all the _do_setstate() have returned.)\n \"\"\"\n # Quietly set all the attributes\n self.trait_setq(**state)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n\n# def __getstate__(self):\n# #idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n# # Give the object a chance to tidy up before saving\n# self._pre_save()\n#\n# # Get the attributes that this class needs to serialize. We do this by\n# # marching up the list of parent classes in _pickle_parents and getting\n# # their lists of _pickles.\n# all_pickles = Set()\n# pickle_parents = self._get_pickle_parents()\n# for parent_class in pickle_parents:\n# all_pickles.update(parent_class._pickles)\n#\n# if self._pickles is not None:\n# all_pickles.update(self._pickles)\n#\n# state = {}\n# for attrib in all_pickles:\n# state[attrib] = getattr(self, attrib)\n#\n# print('<<<<<<<<<<<<<', self)\n# for key,value in state.items():\n# print(key, type(value))\n# print '>>>>>>>>>>>>>'\n#\n# return state\n\n# ~ def __setstate__(self, state):\n# ~ idstring = self.__class__.__name__ + \" id=\" + str(id(self))\n# ~ self._do_setstate(state)\n# ~ self._post_load()\n# ~ return\n", "path": "chaco/serializable.py"}]} | 1,689 | 1,014 |
gh_patches_debug_13783 | rasdani/github-patches | git_diff | pyca__cryptography-8260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove verify_interface
Now that `register_interface` is gone we have no use for `verify_interface`, but https://github.com/aws/aws-encryption-sdk-python/issues/464 is a blocker for removing it.
</issue>
<code>
[start of src/cryptography/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import abc
7 import enum
8 import sys
9 import types
10 import typing
11 import warnings
12
13
14 # We use a UserWarning subclass, instead of DeprecationWarning, because CPython
15 # decided deprecation warnings should be invisble by default.
16 class CryptographyDeprecationWarning(UserWarning):
17 pass
18
19
20 # Several APIs were deprecated with no specific end-of-life date because of the
21 # ubiquity of their use. They should not be removed until we agree on when that
22 # cycle ends.
23 DeprecatedIn36 = CryptographyDeprecationWarning
24 DeprecatedIn37 = CryptographyDeprecationWarning
25 DeprecatedIn39 = CryptographyDeprecationWarning
26 DeprecatedIn40 = CryptographyDeprecationWarning
27
28
29 def _check_bytes(name: str, value: bytes) -> None:
30 if not isinstance(value, bytes):
31 raise TypeError(f"{name} must be bytes")
32
33
34 def _check_byteslike(name: str, value: bytes) -> None:
35 try:
36 memoryview(value)
37 except TypeError:
38 raise TypeError(f"{name} must be bytes-like")
39
40
41 def int_to_bytes(integer: int, length: typing.Optional[int] = None) -> bytes:
42 return integer.to_bytes(
43 length or (integer.bit_length() + 7) // 8 or 1, "big"
44 )
45
46
47 class InterfaceNotImplemented(Exception):
48 pass
49
50
51 # DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've
52 # made this a no-op to avoid breaking old versions.
53 def verify_interface(
54 iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False
55 ):
56 # Exists exclusively for `aws-encryption-sdk` which relies on it existing,
57 # even though it was never a public API.
58 pass
59
60
61 class _DeprecatedValue:
62 def __init__(self, value: object, message: str, warning_class):
63 self.value = value
64 self.message = message
65 self.warning_class = warning_class
66
67
68 class _ModuleWithDeprecations(types.ModuleType):
69 def __init__(self, module: types.ModuleType):
70 super().__init__(module.__name__)
71 self.__dict__["_module"] = module
72
73 def __getattr__(self, attr: str) -> object:
74 obj = getattr(self._module, attr)
75 if isinstance(obj, _DeprecatedValue):
76 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
77 obj = obj.value
78 return obj
79
80 def __setattr__(self, attr: str, value: object) -> None:
81 setattr(self._module, attr, value)
82
83 def __delattr__(self, attr: str) -> None:
84 obj = getattr(self._module, attr)
85 if isinstance(obj, _DeprecatedValue):
86 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
87
88 delattr(self._module, attr)
89
90 def __dir__(self) -> typing.Sequence[str]:
91 return ["_module"] + dir(self._module)
92
93
94 def deprecated(
95 value: object,
96 module_name: str,
97 message: str,
98 warning_class: typing.Type[Warning],
99 name: typing.Optional[str] = None,
100 ) -> _DeprecatedValue:
101 module = sys.modules[module_name]
102 if not isinstance(module, _ModuleWithDeprecations):
103 sys.modules[module_name] = module = _ModuleWithDeprecations(module)
104 dv = _DeprecatedValue(value, message, warning_class)
105 # Maintain backwards compatibility with `name is None` for pyOpenSSL.
106 if name is not None:
107 setattr(module, name, dv)
108 return dv
109
110
111 def cached_property(func: typing.Callable) -> property:
112 cached_name = f"_cached_{func}"
113 sentinel = object()
114
115 def inner(instance: object):
116 cache = getattr(instance, cached_name, sentinel)
117 if cache is not sentinel:
118 return cache
119 result = func(instance)
120 setattr(instance, cached_name, result)
121 return result
122
123 return property(inner)
124
125
126 # Python 3.10 changed representation of enums. We use well-defined object
127 # representation and string representation from Python 3.9.
128 class Enum(enum.Enum):
129 def __repr__(self) -> str:
130 return f"<{self.__class__.__name__}.{self._name_}: {self._value_!r}>"
131
132 def __str__(self) -> str:
133 return f"{self.__class__.__name__}.{self._name_}"
134
[end of src/cryptography/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -3,7 +3,6 @@
# for complete details.
-import abc
import enum
import sys
import types
@@ -48,16 +47,6 @@
pass
-# DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've
-# made this a no-op to avoid breaking old versions.
-def verify_interface(
- iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False
-):
- # Exists exclusively for `aws-encryption-sdk` which relies on it existing,
- # even though it was never a public API.
- pass
-
-
class _DeprecatedValue:
def __init__(self, value: object, message: str, warning_class):
self.value = value
| {"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -3,7 +3,6 @@\n # for complete details.\n \n \n-import abc\n import enum\n import sys\n import types\n@@ -48,16 +47,6 @@\n pass\n \n \n-# DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've\n-# made this a no-op to avoid breaking old versions.\n-def verify_interface(\n- iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False\n-):\n- # Exists exclusively for `aws-encryption-sdk` which relies on it existing,\n- # even though it was never a public API.\n- pass\n-\n-\n class _DeprecatedValue:\n def __init__(self, value: object, message: str, warning_class):\n self.value = value\n", "issue": "Remove verify_interface\nNow that `register_interface` is gone we have no use for `verify_interface`, but https://github.com/aws/aws-encryption-sdk-python/issues/464 is a blocker for removing it.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport abc\nimport enum\nimport sys\nimport types\nimport typing\nimport warnings\n\n\n# We use a UserWarning subclass, instead of DeprecationWarning, because CPython\n# decided deprecation warnings should be invisble by default.\nclass CryptographyDeprecationWarning(UserWarning):\n pass\n\n\n# Several APIs were deprecated with no specific end-of-life date because of the\n# ubiquity of their use. They should not be removed until we agree on when that\n# cycle ends.\nDeprecatedIn36 = CryptographyDeprecationWarning\nDeprecatedIn37 = CryptographyDeprecationWarning\nDeprecatedIn39 = CryptographyDeprecationWarning\nDeprecatedIn40 = CryptographyDeprecationWarning\n\n\ndef _check_bytes(name: str, value: bytes) -> None:\n if not isinstance(value, bytes):\n raise TypeError(f\"{name} must be bytes\")\n\n\ndef _check_byteslike(name: str, value: bytes) -> None:\n try:\n memoryview(value)\n except TypeError:\n raise TypeError(f\"{name} must be bytes-like\")\n\n\ndef int_to_bytes(integer: int, length: typing.Optional[int] = None) -> bytes:\n return integer.to_bytes(\n length or (integer.bit_length() + 7) // 8 or 1, \"big\"\n )\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\n# DeprecatedIn39 -- Our only known consumer is aws-encryption-sdk, but we've\n# made this a no-op to avoid breaking old versions.\ndef verify_interface(\n iface: abc.ABCMeta, klass: object, *, check_annotations: bool = False\n):\n # Exists exclusively for `aws-encryption-sdk` which relies on it existing,\n # even though it was never a public API.\n pass\n\n\nclass _DeprecatedValue:\n def __init__(self, value: object, message: str, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(types.ModuleType):\n def __init__(self, module: types.ModuleType):\n super().__init__(module.__name__)\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr: str) -> object:\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr: str, value: object) -> None:\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr: str) -> None:\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self) -> typing.Sequence[str]:\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(\n value: object,\n module_name: str,\n message: str,\n warning_class: typing.Type[Warning],\n name: typing.Optional[str] = None,\n) -> _DeprecatedValue:\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n dv = _DeprecatedValue(value, message, warning_class)\n # Maintain backwards compatibility with `name is None` for pyOpenSSL.\n if name is not None:\n setattr(module, name, dv)\n return dv\n\n\ndef cached_property(func: typing.Callable) -> property:\n cached_name = f\"_cached_{func}\"\n sentinel = object()\n\n def inner(instance: object):\n cache = getattr(instance, cached_name, sentinel)\n if cache is not sentinel:\n return cache\n result = func(instance)\n setattr(instance, cached_name, result)\n return result\n\n return property(inner)\n\n\n# Python 3.10 changed representation of enums. We use well-defined object\n# representation and string representation from Python 3.9.\nclass Enum(enum.Enum):\n def __repr__(self) -> str:\n return f\"<{self.__class__.__name__}.{self._name_}: {self._value_!r}>\"\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}.{self._name_}\"\n", "path": "src/cryptography/utils.py"}]} | 1,885 | 204 |
gh_patches_debug_10293 | rasdani/github-patches | git_diff | lutris__lutris-5245 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Accounts" tab in settings is blank

When navigating to the "accounts" tab in the lutris settings (version 0.5.15), it is blank and I get this error. This is on Fedora 39 KDE.
```
2024-01-14 08:52:03,865: Error handling signal 'row-selected': 'PersonalName'
Traceback (most recent call last):
File "/usr/lib/python3.12/site-packages/lutris/exception_backstops.py", line 79, in error_wrapper
return handler(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.12/site-packages/lutris/gui/config/preferences_dialog.py", line 109, in on_sidebar_activated
generator()
File "/usr/lib/python3.12/site-packages/lutris/gui/config/accounts_box.py", line 33, in populate_accounts
account["PersonalName"]
~~~~~~~^^^^^^^^^^^^^^^^
KeyError: 'PersonalName'
```
My only guess is that my steam display name has a " / " in it. But I'm not sure.
I have both Steam RPM and Steam flatpak installed.
</issue>
<code>
[start of lutris/gui/config/accounts_box.py]
1 from gettext import gettext as _
2
3 from gi.repository import Gtk
4
5 from lutris import settings
6 from lutris.gui.config.base_config_box import BaseConfigBox
7 from lutris.util.steam.config import STEAM_ACCOUNT_SETTING, get_steam_users
8
9
10 class AccountsBox(BaseConfigBox):
11
12 def __init__(self):
13 super().__init__()
14 self.add(self.get_section_label(_("Steam accounts")))
15 self.add(self.get_description_label(
16 _("Select which Steam account is used for Lutris integration and creating Steam shortcuts.")
17 ))
18 frame = Gtk.Frame(visible=True, shadow_type=Gtk.ShadowType.ETCHED_IN)
19 frame.get_style_context().add_class("info-frame")
20 self.pack_start(frame, False, False, 0)
21
22 self.accounts_box = Gtk.VBox(visible=True)
23 frame.add(self.accounts_box)
24
25 def populate_accounts(self):
26 main_radio_button = None
27 active_steam_account = settings.read_setting(STEAM_ACCOUNT_SETTING)
28
29 steam_users = get_steam_users()
30 for account in steam_users:
31 steamid64 = account["steamid64"]
32 name = account.get("PersonalName") or f"#{steamid64}"
33 radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)
34 radio_button.set_margin_top(16)
35 radio_button.set_margin_start(16)
36 radio_button.set_margin_bottom(16)
37 radio_button.show()
38 radio_button.set_active(active_steam_account == steamid64)
39 radio_button.connect("toggled", self.on_steam_account_toggled, steamid64)
40 self.accounts_box.pack_start(radio_button, True, True, 0)
41 if not main_radio_button:
42 main_radio_button = radio_button
43 if not steam_users:
44 self.accounts_box.pack_start(Gtk.Label(_("No Steam account found"), visible=True), True, True, 0)
45
46 def on_steam_account_toggled(self, radio_button, steamid64):
47 """Handler for switching the active Steam account."""
48 settings.write_setting(STEAM_ACCOUNT_SETTING, steamid64)
49
[end of lutris/gui/config/accounts_box.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/gui/config/accounts_box.py b/lutris/gui/config/accounts_box.py
--- a/lutris/gui/config/accounts_box.py
+++ b/lutris/gui/config/accounts_box.py
@@ -29,7 +29,7 @@
steam_users = get_steam_users()
for account in steam_users:
steamid64 = account["steamid64"]
- name = account.get("PersonalName") or f"#{steamid64}"
+ name = account.get("PersonaName") or f"#{steamid64}"
radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)
radio_button.set_margin_top(16)
radio_button.set_margin_start(16)
| {"golden_diff": "diff --git a/lutris/gui/config/accounts_box.py b/lutris/gui/config/accounts_box.py\n--- a/lutris/gui/config/accounts_box.py\n+++ b/lutris/gui/config/accounts_box.py\n@@ -29,7 +29,7 @@\n steam_users = get_steam_users()\n for account in steam_users:\n steamid64 = account[\"steamid64\"]\n- name = account.get(\"PersonalName\") or f\"#{steamid64}\"\n+ name = account.get(\"PersonaName\") or f\"#{steamid64}\"\n radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)\n radio_button.set_margin_top(16)\n radio_button.set_margin_start(16)\n", "issue": "\"Accounts\" tab in settings is blank\n\r\n\r\nWhen navigating to the \"accounts\" tab in the lutris settings (version 0.5.15), it is blank and I get this error. This is on Fedora 39 KDE.\r\n\r\n```\r\n2024-01-14 08:52:03,865: Error handling signal 'row-selected': 'PersonalName'\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.12/site-packages/lutris/exception_backstops.py\", line 79, in error_wrapper\r\n return handler(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.12/site-packages/lutris/gui/config/preferences_dialog.py\", line 109, in on_sidebar_activated\r\n generator()\r\n File \"/usr/lib/python3.12/site-packages/lutris/gui/config/accounts_box.py\", line 33, in populate_accounts\r\n account[\"PersonalName\"]\r\n ~~~~~~~^^^^^^^^^^^^^^^^\r\nKeyError: 'PersonalName'\r\n```\r\n\r\nMy only guess is that my steam display name has a \" / \" in it. But I'm not sure.\r\n\r\nI have both Steam RPM and Steam flatpak installed.\n", "before_files": [{"content": "from gettext import gettext as _\n\nfrom gi.repository import Gtk\n\nfrom lutris import settings\nfrom lutris.gui.config.base_config_box import BaseConfigBox\nfrom lutris.util.steam.config import STEAM_ACCOUNT_SETTING, get_steam_users\n\n\nclass AccountsBox(BaseConfigBox):\n\n def __init__(self):\n super().__init__()\n self.add(self.get_section_label(_(\"Steam accounts\")))\n self.add(self.get_description_label(\n _(\"Select which Steam account is used for Lutris integration and creating Steam shortcuts.\")\n ))\n frame = Gtk.Frame(visible=True, shadow_type=Gtk.ShadowType.ETCHED_IN)\n frame.get_style_context().add_class(\"info-frame\")\n self.pack_start(frame, False, False, 0)\n\n self.accounts_box = Gtk.VBox(visible=True)\n frame.add(self.accounts_box)\n\n def populate_accounts(self):\n main_radio_button = None\n active_steam_account = settings.read_setting(STEAM_ACCOUNT_SETTING)\n\n steam_users = get_steam_users()\n for account in steam_users:\n steamid64 = account[\"steamid64\"]\n name = account.get(\"PersonalName\") or f\"#{steamid64}\"\n radio_button = Gtk.RadioButton.new_with_label_from_widget(main_radio_button, name)\n radio_button.set_margin_top(16)\n radio_button.set_margin_start(16)\n radio_button.set_margin_bottom(16)\n radio_button.show()\n radio_button.set_active(active_steam_account == steamid64)\n radio_button.connect(\"toggled\", self.on_steam_account_toggled, steamid64)\n self.accounts_box.pack_start(radio_button, True, True, 0)\n if not main_radio_button:\n main_radio_button = radio_button\n if not steam_users:\n self.accounts_box.pack_start(Gtk.Label(_(\"No Steam account found\"), visible=True), True, True, 0)\n\n def on_steam_account_toggled(self, radio_button, steamid64):\n \"\"\"Handler for switching the active Steam account.\"\"\"\n settings.write_setting(STEAM_ACCOUNT_SETTING, steamid64)\n", "path": "lutris/gui/config/accounts_box.py"}]} | 1,398 | 161 |
gh_patches_debug_40728 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider verizon is broken
During the global build at 2021-10-27-14-42-46, spider **verizon** failed with **4573 features** and **1650 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/logs/verizon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson))
</issue>
<code>
[start of locations/spiders/verizon.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7 from locations.hours import OpeningHours
8
9
10 class VerizonSpider(scrapy.Spider):
11 name = "verizon"
12 item_attributes = { 'brand': "Verizon" }
13 allowed_domains = ["www.verizonwireless.com"]
14 start_urls = (
15 'https://www.verizonwireless.com/sitemap_storelocator.xml',
16 )
17 custom_settings = {
18 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
19 }
20
21 def parse_hours(self, store_hours):
22 opening_hours = OpeningHours()
23 for store_day in store_hours['dayOfWeek']:
24 if store_day.lower() == 'closed':
25 continue
26 else:
27 day, open_close = store_day.split('-')
28 day = day.strip()[:2]
29 open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])
30 if open_time.split(' ')[0].lower() == 'closed':
31 continue
32 elif open_time.split(' ')[0].lower() == 'null':
33 continue
34 else:
35 if open_close.strip().count(' ') == 1:
36 open_time, close_time = open_time.split(' ')
37 opening_hours.add_range(day=day,
38 open_time=open_time,
39 close_time=close_time,
40 time_format='%I:%M%p'
41 )
42 elif open_close.strip().count(' ') == 2:
43 open_time = open_close.strip().split(' ')[0]
44 close_time = ''.join(open_close.strip().split(' ')[1:3])
45 opening_hours.add_range(day=day,
46 open_time=open_time,
47 close_time=close_time,
48 time_format='%I:%M%p'
49 )
50 else:
51 close_time = open_close.strip().split(' ', 2)[2]
52 opening_hours.add_range(day=day,
53 open_time=open_time,
54 close_time=close_time,
55 time_format='%I:%M %p'
56 )
57
58 return opening_hours.as_opening_hours()
59
60 def parse(self, response):
61 response.selector.remove_namespaces()
62 urls = response.xpath('//url/loc/text()').extract()
63
64 for url in urls:
65 if url.split('/')[-2].split('-')[-1].isdigit():
66 # Store pages have a number at the end of their URL
67 yield scrapy.Request(url, callback=self.parse_store)
68
69 def parse_store(self, response):
70 script = response.xpath('//script[contains(text(), "storeJSON")]/text()').extract_first()
71 if not script:
72 return
73
74 store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))
75
76 properties = {
77 'name': store_data["storeName"],
78 'ref': store_data["storeNumber"],
79 'addr_full': store_data["address"]["streetAddress"],
80 'city': store_data["address"]["addressLocality"],
81 'state': store_data["address"]["addressRegion"],
82 'postcode': store_data["address"]["postalCode"],
83 'country': store_data["address"]["addressCountry"],
84 'phone': store_data.get("telephone"),
85 'website': store_data.get("url") or response.url,
86 'lat': store_data["geo"].get("latitude"),
87 'lon': store_data["geo"].get("longitude"),
88 'extras': {
89 'business_name': store_data.get('posStoreDetail').get('businessName'),
90 'retail_id': store_data.get('retailId'),
91 'store_type': store_data.get('posStoreDetail').get('storeType'),
92 'store_type_note': store_data.get('typeOfStore')
93 }
94 }
95
96 hours = self.parse_hours(store_data.get("openingHoursSpecification"))
97 if hours:
98 properties["opening_hours"] = hours
99
100 yield GeojsonPointItem(**properties)
101
[end of locations/spiders/verizon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/verizon.py b/locations/spiders/verizon.py
--- a/locations/spiders/verizon.py
+++ b/locations/spiders/verizon.py
@@ -20,40 +20,18 @@
def parse_hours(self, store_hours):
opening_hours = OpeningHours()
- for store_day in store_hours['dayOfWeek']:
- if store_day.lower() == 'closed':
- continue
- else:
- day, open_close = store_day.split('-')
- day = day.strip()[:2]
- open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])
- if open_time.split(' ')[0].lower() == 'closed':
- continue
- elif open_time.split(' ')[0].lower() == 'null':
- continue
- else:
- if open_close.strip().count(' ') == 1:
- open_time, close_time = open_time.split(' ')
- opening_hours.add_range(day=day,
- open_time=open_time,
- close_time=close_time,
- time_format='%I:%M%p'
- )
- elif open_close.strip().count(' ') == 2:
- open_time = open_close.strip().split(' ')[0]
- close_time = ''.join(open_close.strip().split(' ')[1:3])
- opening_hours.add_range(day=day,
- open_time=open_time,
- close_time=close_time,
- time_format='%I:%M%p'
- )
- else:
- close_time = open_close.strip().split(' ', 2)[2]
- opening_hours.add_range(day=day,
- open_time=open_time,
- close_time=close_time,
- time_format='%I:%M %p'
- )
+
+ for store_day in ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']:
+ open_time = store_hours.get(f'{store_day}Open')
+ close_time = store_hours.get(f'{store_day}Close')
+
+ if open_time and close_time and open_time.lower() != 'closed' and close_time.lower() != 'closed':
+ opening_hours.add_range(
+ day=store_day[0:2],
+ open_time=open_time,
+ close_time=close_time,
+ time_format='%I:%M %p'
+ )
return opening_hours.as_opening_hours()
@@ -86,14 +64,15 @@
'lat': store_data["geo"].get("latitude"),
'lon': store_data["geo"].get("longitude"),
'extras': {
- 'business_name': store_data.get('posStoreDetail').get('businessName'),
+ # Sometimes 'postStoreDetail' exists with "None" value, usual get w/ default syntax isn't reliable
+ 'business_name': (store_data.get('posStoreDetail') or {}).get('businessName'),
'retail_id': store_data.get('retailId'),
- 'store_type': store_data.get('posStoreDetail').get('storeType'),
+ 'store_type': (store_data.get('posStoreDetail') or {}).get('storeType'),
'store_type_note': store_data.get('typeOfStore')
}
}
- hours = self.parse_hours(store_data.get("openingHoursSpecification"))
+ hours = self.parse_hours(store_data.get("StoreHours"))
if hours:
properties["opening_hours"] = hours
| {"golden_diff": "diff --git a/locations/spiders/verizon.py b/locations/spiders/verizon.py\n--- a/locations/spiders/verizon.py\n+++ b/locations/spiders/verizon.py\n@@ -20,40 +20,18 @@\n \n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n- for store_day in store_hours['dayOfWeek']:\n- if store_day.lower() == 'closed':\n- continue\n- else:\n- day, open_close = store_day.split('-')\n- day = day.strip()[:2]\n- open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])\n- if open_time.split(' ')[0].lower() == 'closed':\n- continue\n- elif open_time.split(' ')[0].lower() == 'null':\n- continue\n- else:\n- if open_close.strip().count(' ') == 1:\n- open_time, close_time = open_time.split(' ')\n- opening_hours.add_range(day=day,\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M%p'\n- )\n- elif open_close.strip().count(' ') == 2:\n- open_time = open_close.strip().split(' ')[0]\n- close_time = ''.join(open_close.strip().split(' ')[1:3])\n- opening_hours.add_range(day=day,\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M%p'\n- )\n- else:\n- close_time = open_close.strip().split(' ', 2)[2]\n- opening_hours.add_range(day=day,\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M %p'\n- )\n+\n+ for store_day in ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']:\n+ open_time = store_hours.get(f'{store_day}Open')\n+ close_time = store_hours.get(f'{store_day}Close')\n+\n+ if open_time and close_time and open_time.lower() != 'closed' and close_time.lower() != 'closed':\n+ opening_hours.add_range(\n+ day=store_day[0:2],\n+ open_time=open_time,\n+ close_time=close_time,\n+ time_format='%I:%M %p'\n+ )\n \n return opening_hours.as_opening_hours()\n \n@@ -86,14 +64,15 @@\n 'lat': store_data[\"geo\"].get(\"latitude\"),\n 'lon': store_data[\"geo\"].get(\"longitude\"),\n 'extras': {\n- 'business_name': store_data.get('posStoreDetail').get('businessName'),\n+ # Sometimes 'postStoreDetail' exists with \"None\" value, usual get w/ default syntax isn't reliable\n+ 'business_name': (store_data.get('posStoreDetail') or {}).get('businessName'),\n 'retail_id': store_data.get('retailId'),\n- 'store_type': store_data.get('posStoreDetail').get('storeType'),\n+ 'store_type': (store_data.get('posStoreDetail') or {}).get('storeType'),\n 'store_type_note': store_data.get('typeOfStore')\n }\n }\n \n- hours = self.parse_hours(store_data.get(\"openingHoursSpecification\"))\n+ hours = self.parse_hours(store_data.get(\"StoreHours\"))\n if hours:\n properties[\"opening_hours\"] = hours\n", "issue": "Spider verizon is broken\nDuring the global build at 2021-10-27-14-42-46, spider **verizon** failed with **4573 features** and **1650 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/logs/verizon.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-27-14-42-46/output/verizon.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass VerizonSpider(scrapy.Spider):\n name = \"verizon\"\n item_attributes = { 'brand': \"Verizon\" }\n allowed_domains = [\"www.verizonwireless.com\"]\n start_urls = (\n 'https://www.verizonwireless.com/sitemap_storelocator.xml',\n )\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n for store_day in store_hours['dayOfWeek']:\n if store_day.lower() == 'closed':\n continue\n else:\n day, open_close = store_day.split('-')\n day = day.strip()[:2]\n open_time = ' '.join(open_close.strip().split(' ', 2)[0:2])\n if open_time.split(' ')[0].lower() == 'closed':\n continue\n elif open_time.split(' ')[0].lower() == 'null':\n continue\n else:\n if open_close.strip().count(' ') == 1:\n open_time, close_time = open_time.split(' ')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p'\n )\n elif open_close.strip().count(' ') == 2:\n open_time = open_close.strip().split(' ')[0]\n close_time = ''.join(open_close.strip().split(' ')[1:3])\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p'\n )\n else:\n close_time = open_close.strip().split(' ', 2)[2]\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M %p'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n\n for url in urls:\n if url.split('/')[-2].split('-')[-1].isdigit():\n # Store pages have a number at the end of their URL\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n script = response.xpath('//script[contains(text(), \"storeJSON\")]/text()').extract_first()\n if not script:\n return\n\n store_data = json.loads(re.search(r'var storeJSON = (.*);', script).group(1))\n\n properties = {\n 'name': store_data[\"storeName\"],\n 'ref': store_data[\"storeNumber\"],\n 'addr_full': store_data[\"address\"][\"streetAddress\"],\n 'city': store_data[\"address\"][\"addressLocality\"],\n 'state': store_data[\"address\"][\"addressRegion\"],\n 'postcode': store_data[\"address\"][\"postalCode\"],\n 'country': store_data[\"address\"][\"addressCountry\"],\n 'phone': store_data.get(\"telephone\"),\n 'website': store_data.get(\"url\") or response.url,\n 'lat': store_data[\"geo\"].get(\"latitude\"),\n 'lon': store_data[\"geo\"].get(\"longitude\"),\n 'extras': {\n 'business_name': store_data.get('posStoreDetail').get('businessName'),\n 'retail_id': store_data.get('retailId'),\n 'store_type': store_data.get('posStoreDetail').get('storeType'),\n 'store_type_note': store_data.get('typeOfStore')\n }\n }\n\n hours = self.parse_hours(store_data.get(\"openingHoursSpecification\"))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/verizon.py"}]} | 1,799 | 775 |
gh_patches_debug_22389 | rasdani/github-patches | git_diff | ckan__ckan-7309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
White on Yellow?? Color Contrast
**CKAN version**
https://demo.ckan.org/en/user/edit/mgifford
**Describe the bug**
Title: WCAG 1.4.3: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (.btn-warning)
Tags: Accessibility, WCAG 1.4.3, color-contrast
Issue: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (color-contrast - https://accessibilityinsights.io/info-examples/web/color-contrast)
Target application: Manage - mgifford - Users - CKAN Demo - https://demo.ckan.org/en/user/edit/mgifford
Element path: .btn-warning
Snippet: <a class="btn btn-warning" href="/en/user/generate_key/b8037a86-a216-4c9b-8211-e197fa09143a" data-module="confirm-action" data-module-content="Are you sure you want to regenerate the API key?">Regenerate API Key</a>
How to fix:
Fix any of the following:
Element has insufficient color contrast of 1.94 (foreground color: #ffffff, background color: #f0ad4e, font size: 10.5pt (14px), font weight: bold). Expected contrast ratio of 4.5:1
Environment: Microsoft Edge version 107.0.1418.35
====
This accessibility issue was found using Accessibility Insights for Web 2.35.0 (axe-core 4.4.1), a tool that helps find and fix accessibility issues. Get more information & download this tool at http://aka.ms/AccessibilityInsights.
</issue>
<code>
[start of ckan/cli/sass.py]
1 # encoding: utf-8
2 from __future__ import annotations
3
4 import subprocess
5 import os
6
7 import click
8 import six
9
10 from ckan.common import config
11
12
13 @click.command(
14 name=u'sass',
15 short_help=u'Compile all root sass documents into their CSS counterparts')
16 def sass():
17 command = (u'npm', u'run', u'build')
18
19 public = config.get_value(u'ckan.base_public_folder')
20
21 root = os.path.join(os.path.dirname(__file__), u'..', public, u'base')
22 root = os.path.abspath(root)
23 _compile_sass(root, command, u'main')
24
25
26 def _compile_sass(root: str, command: tuple[str, ...], color: str):
27 click.echo(u'compile {}.css'.format(color))
28 command = command + (u'--', u'--' + color)
29
30 process = subprocess.Popen(
31 command,
32 stdout=subprocess.PIPE,
33 stderr=subprocess.PIPE)
34 output = process.communicate()
35 for block in output:
36 click.echo(six.ensure_text(block))
37
[end of ckan/cli/sass.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/cli/sass.py b/ckan/cli/sass.py
--- a/ckan/cli/sass.py
+++ b/ckan/cli/sass.py
@@ -11,21 +11,32 @@
@click.command(
- name=u'sass',
- short_help=u'Compile all root sass documents into their CSS counterparts')
-def sass():
- command = (u'npm', u'run', u'build')
-
- public = config.get_value(u'ckan.base_public_folder')
-
- root = os.path.join(os.path.dirname(__file__), u'..', public, u'base')
+ name='sass',
+ short_help='Compile all root sass documents into their CSS counterparts')
[email protected](
+ '-d',
+ '--debug',
+ is_flag=True,
+ help="Compile css with sourcemaps.")
+def sass(debug: bool):
+ command = ('npm', 'run', 'build')
+
+ public = config.get_value('ckan.base_public_folder')
+
+ root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
- _compile_sass(root, command, u'main')
-
-
-def _compile_sass(root: str, command: tuple[str, ...], color: str):
- click.echo(u'compile {}.css'.format(color))
- command = command + (u'--', u'--' + color)
+ _compile_sass(root, command, 'main', debug)
+
+
+def _compile_sass(
+ root: str,
+ command: tuple[str, ...],
+ color: str,
+ debug: bool):
+ click.echo('compile {}.css'.format(color))
+ command = command + ('--', '--' + color)
+ if debug:
+ command = command + ('--debug',)
process = subprocess.Popen(
command,
| {"golden_diff": "diff --git a/ckan/cli/sass.py b/ckan/cli/sass.py\n--- a/ckan/cli/sass.py\n+++ b/ckan/cli/sass.py\n@@ -11,21 +11,32 @@\n \n \n @click.command(\n- name=u'sass',\n- short_help=u'Compile all root sass documents into their CSS counterparts')\n-def sass():\n- command = (u'npm', u'run', u'build')\n-\n- public = config.get_value(u'ckan.base_public_folder')\n-\n- root = os.path.join(os.path.dirname(__file__), u'..', public, u'base')\n+ name='sass',\n+ short_help='Compile all root sass documents into their CSS counterparts')\[email protected](\n+ '-d',\n+ '--debug',\n+ is_flag=True,\n+ help=\"Compile css with sourcemaps.\")\n+def sass(debug: bool):\n+ command = ('npm', 'run', 'build')\n+\n+ public = config.get_value('ckan.base_public_folder')\n+\n+ root = os.path.join(os.path.dirname(__file__), '..', public, 'base')\n root = os.path.abspath(root)\n- _compile_sass(root, command, u'main')\n-\n-\n-def _compile_sass(root: str, command: tuple[str, ...], color: str):\n- click.echo(u'compile {}.css'.format(color))\n- command = command + (u'--', u'--' + color)\n+ _compile_sass(root, command, 'main', debug)\n+\n+\n+def _compile_sass(\n+ root: str,\n+ command: tuple[str, ...],\n+ color: str,\n+ debug: bool):\n+ click.echo('compile {}.css'.format(color))\n+ command = command + ('--', '--' + color)\n+ if debug:\n+ command = command + ('--debug',)\n \n process = subprocess.Popen(\n command,\n", "issue": "White on Yellow?? Color Contrast\n**CKAN version**\r\nhttps://demo.ckan.org/en/user/edit/mgifford\r\n\r\n**Describe the bug**\r\nTitle: WCAG 1.4.3: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (.btn-warning)\r\nTags: Accessibility, WCAG 1.4.3, color-contrast\r\n\r\nIssue: Ensures the contrast between foreground and background colors meets WCAG 2 AA contrast ratio thresholds (color-contrast - https://accessibilityinsights.io/info-examples/web/color-contrast)\r\n\r\nTarget application: Manage - mgifford - Users - CKAN Demo - https://demo.ckan.org/en/user/edit/mgifford\r\n\r\nElement path: .btn-warning\r\n\r\nSnippet: <a class=\"btn btn-warning\" href=\"/en/user/generate_key/b8037a86-a216-4c9b-8211-e197fa09143a\" data-module=\"confirm-action\" data-module-content=\"Are you sure you want to regenerate the API key?\">Regenerate API Key</a>\r\n\r\nHow to fix: \r\nFix any of the following:\r\n Element has insufficient color contrast of 1.94 (foreground color: #ffffff, background color: #f0ad4e, font size: 10.5pt (14px), font weight: bold). Expected contrast ratio of 4.5:1\r\n\r\nEnvironment: Microsoft Edge version 107.0.1418.35\r\n\r\n====\r\n\r\nThis accessibility issue was found using Accessibility Insights for Web 2.35.0 (axe-core 4.4.1), a tool that helps find and fix accessibility issues. Get more information & download this tool at http://aka.ms/AccessibilityInsights.\n", "before_files": [{"content": "# encoding: utf-8\nfrom __future__ import annotations\n\nimport subprocess\nimport os\n\nimport click\nimport six\n\nfrom ckan.common import config\n\n\[email protected](\n name=u'sass',\n short_help=u'Compile all root sass documents into their CSS counterparts')\ndef sass():\n command = (u'npm', u'run', u'build')\n\n public = config.get_value(u'ckan.base_public_folder')\n\n root = os.path.join(os.path.dirname(__file__), u'..', public, u'base')\n root = os.path.abspath(root)\n _compile_sass(root, command, u'main')\n\n\ndef _compile_sass(root: str, command: tuple[str, ...], color: str):\n click.echo(u'compile {}.css'.format(color))\n command = command + (u'--', u'--' + color)\n\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output = process.communicate()\n for block in output:\n click.echo(six.ensure_text(block))\n", "path": "ckan/cli/sass.py"}]} | 1,222 | 428 |
gh_patches_debug_10692 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4865 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update ansible to 2.6.18 or later due to CVE-2019-10156
## Description
We should update Ansible to version 2.6.18 or later due to [CVE-2019-10156](https://nvd.nist.gov/vuln/detail/CVE-2019-10156). This is a templating vulnerability that would require an attacker to first insert malicious templates into the Admin workstation so the impact is minimal for SecureDrop. Nevertheless, to reduce alert noise and not be using dependencies with known vulnerabilities, we should update.
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/ansible_version_check.py]
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, \
3 unicode_literals
4
5 import sys
6
7 import ansible
8
9 try:
10 # Version 2.0+
11 from ansible.plugins.callback import CallbackBase
12 except ImportError:
13 CallbackBase = object
14
15
16 def print_red_bold(text):
17 print('\x1b[31;1m' + text + '\x1b[0m')
18
19
20 class CallbackModule(CallbackBase):
21 def __init__(self):
22 # Can't use `on_X` because this isn't forwards compatible
23 # with Ansible 2.0+
24 required_version = '2.6.14' # Keep synchronized with requirements files
25 if not ansible.__version__.startswith(required_version):
26 print_red_bold(
27 "SecureDrop restriction: only Ansible {version}.*"
28 "is supported."
29 .format(version=required_version)
30 )
31 sys.exit(1)
32
[end of install_files/ansible-base/callback_plugins/ansible_version_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -21,7 +21,7 @@
def __init__(self):
# Can't use `on_X` because this isn't forwards compatible
# with Ansible 2.0+
- required_version = '2.6.14' # Keep synchronized with requirements files
+ required_version = '2.6.19' # Keep synchronized with requirements files
if not ansible.__version__.startswith(required_version):
print_red_bold(
"SecureDrop restriction: only Ansible {version}.*"
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -21,7 +21,7 @@\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n- required_version = '2.6.14' # Keep synchronized with requirements files\n+ required_version = '2.6.19' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n", "issue": "update ansible to 2.6.18 or later due to CVE-2019-10156\n## Description\r\n\r\nWe should update Ansible to version 2.6.18 or later due to [CVE-2019-10156](https://nvd.nist.gov/vuln/detail/CVE-2019-10156). This is a templating vulnerability that would require an attacker to first insert malicious templates into the Admin workstation so the impact is minimal for SecureDrop. Nevertheless, to reduce alert noise and not be using dependencies with known vulnerabilities, we should update. \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.6.14' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]} | 948 | 178 |
gh_patches_debug_60820 | rasdani/github-patches | git_diff | cltk__cltk-575 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Double-check code and data for new French PR
This issue is for @nat1881 to follow up on her large PR #571 for Old and Middle French.
Natasha, I would like you to do the following steps, to be certain that the code works as you intended:
* Start a brand new clone of (this) cltk repo.
* Make a new virtual env
* Mk source tarball and install (this should install all dependencies, too): `python setup.py sdist install`
* Temporarily rename your `~/cltk_data` dir (eg, `mv ~/cltk_data ~/cltk_data_backup`)
* Import the french corpora and make sure they appear as they should
* Check in ipython all of your commands that you have added to the docs. Copy-paste these exactly as they are in the docs.
* Follow up on any bugs in your own updated branch ([this is what I recommend for updating your branch](https://github.com/cltk/cltk/wiki/Example-Git-and-Python-workflow))
* Bump the version in `setup.py` and make PR for this
* Then @diyclassics or I will push the code to PyPI
You may be tired of this, but you're getting close! :weary:
cc @mlj
</issue>
<code>
[start of setup.py]
1 """Config for PyPI."""
2
3 from setuptools import find_packages
4 from setuptools import setup
5
6
7 setup(
8 author='Kyle P. Johnson',
9 author_email='[email protected]',
10 classifiers=[
11 'Intended Audience :: Education',
12 'Intended Audience :: Science/Research',
13 'License :: OSI Approved :: MIT License',
14 'Natural Language :: Chinese (Traditional)',
15 'Natural Language :: English',
16 'Natural Language :: Greek',
17 'Natural Language :: Latin',
18 'Operating System :: POSIX',
19 'Programming Language :: Python :: 3.6',
20 'Topic :: Scientific/Engineering :: Artificial Intelligence',
21 'Topic :: Text Processing',
22 'Topic :: Text Processing :: General',
23 'Topic :: Text Processing :: Linguistic',
24 ],
25 description='NLP for the ancient world',
26 install_requires=['gitpython',
27 'nltk',
28 'python-crfsuite',
29 'pyuca',
30 'pyyaml',
31 'regex',
32 'whoosh'],
33 keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan'],
34 license='MIT',
35 long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301,
36 name='cltk',
37 packages=find_packages(),
38 url='https://github.com/cltk/cltk',
39 version='0.1.63',
40 zip_safe=True,
41 test_suite='cltk.tests.test_cltk',
42 )
43
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
name='cltk',
packages=find_packages(),
url='https://github.com/cltk/cltk',
- version='0.1.63',
+ version='0.1.64',
zip_safe=True,
test_suite='cltk.tests.test_cltk',
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,7 @@\n name='cltk',\n packages=find_packages(),\n url='https://github.com/cltk/cltk',\n- version='0.1.63',\n+ version='0.1.64',\n zip_safe=True,\n test_suite='cltk.tests.test_cltk',\n )\n", "issue": "Double-check code and data for new French PR\nThis issue is for @nat1881 to follow up on her large PR #571 for Old and Middle French.\r\n\r\nNatasha, I would like you to do the following steps, to be certain that the code works as you intended:\r\n\r\n* Start a brand new clone of (this) cltk repo.\r\n* Make a new virtual env\r\n* Mk source tarball and install (this should install all dependencies, too): `python setup.py sdist install`\r\n* Temporarily rename your `~/cltk_data` dir (eg, `mv ~/cltk_data ~/cltk_data_backup`)\r\n* Import the french corpora and make sure they appear as they should\r\n* Check in ipython all of your commands that you have added to the docs. Copy-paste these exactly as they are in the docs.\r\n* Follow up on any bugs in your own updated branch ([this is what I recommend for updating your branch](https://github.com/cltk/cltk/wiki/Example-Git-and-Python-workflow))\r\n* Bump the version in `setup.py` and make PR for this\r\n* Then @diyclassics or I will push the code to PyPI\r\n\r\nYou may be tired of this, but you're getting close! :weary:\r\n\r\ncc @mlj \n", "before_files": [{"content": "\"\"\"Config for PyPI.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n author='Kyle P. Johnson',\n author_email='[email protected]',\n classifiers=[\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: Chinese (Traditional)',\n 'Natural Language :: English',\n 'Natural Language :: Greek',\n 'Natural Language :: Latin',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Text Processing',\n 'Topic :: Text Processing :: General',\n 'Topic :: Text Processing :: Linguistic',\n ],\n description='NLP for the ancient world',\n install_requires=['gitpython',\n 'nltk',\n 'python-crfsuite',\n 'pyuca',\n 'pyyaml',\n 'regex',\n 'whoosh'],\n keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan'],\n license='MIT',\n long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301,\n name='cltk',\n packages=find_packages(),\n url='https://github.com/cltk/cltk',\n version='0.1.63',\n zip_safe=True,\n test_suite='cltk.tests.test_cltk',\n)\n", "path": "setup.py"}]} | 1,219 | 95 |
gh_patches_debug_6139 | rasdani/github-patches | git_diff | uclapi__uclapi-140 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Documentation] Docs link is Absolute, not Relative
The documentation link always goes to `https://uclapi.com/docs`, even if running in, for example, staging. Just linking to `/docs` would be adequate to fix this.
</issue>
<code>
[start of backend/uclapi/resources/views.py]
1 import os
2 import requests
3
4 from lxml import etree
5
6 from common.decorators import uclapi_protected_endpoint
7 from common.helpers import PrettyJsonResponse as JsonResponse
8
9 from rest_framework.decorators import api_view
10
11
12 @api_view(['GET'])
13 @uclapi_protected_endpoint()
14 def get_pc_availability(request, *args, **kwargs):
15 try:
16 r = requests.get(os.environ["PCA_LINK"])
17 except requests.exceptions.MissingSchema:
18 resp = JsonResponse({
19 "ok": False,
20 "error": ("Could not retrieve availability data."
21 " Please try again later or contact us for support.")
22 }, rate_limiting_data=kwargs)
23 resp.status_code = 400
24 return resp
25
26 try:
27 e = etree.fromstring(r.content)
28 except (ValueError, etree.XMLSyntaxError):
29 resp = JsonResponse({
30 "ok": False,
31 "error": ("Could not parse the desktop availability data."
32 " Please try again later or contact us for support.")
33 }, rate_limiting_data=kwargs)
34 resp.status_code = 400
35 return resp
36
37 data = []
38 for pc in e.findall("room"):
39 _ = pc.get
40 data.append({
41 "location": {
42 "room_name": _("location"),
43 "room_id": _("rid"),
44 "latitude": _("latitude"),
45 "longitude": _("longitude"),
46 "building_name": _("buildingName"),
47 "address": _("buildingAddress"),
48 "postcode": _("buildingPostCode")
49 },
50 "free_seats": _("free"),
51 "total_seats": _("seats"),
52 "room_status": _("info")
53 })
54
55 return JsonResponse({
56 "ok": True,
57 "data": data
58 }, rate_limiting_data=kwargs)
59
[end of backend/uclapi/resources/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/resources/views.py b/backend/uclapi/resources/views.py
--- a/backend/uclapi/resources/views.py
+++ b/backend/uclapi/resources/views.py
@@ -45,7 +45,7 @@
"longitude": _("longitude"),
"building_name": _("buildingName"),
"address": _("buildingAddress"),
- "postcode": _("buildingPostCode")
+ "postcode": _("buildingPostcode")
},
"free_seats": _("free"),
"total_seats": _("seats"),
| {"golden_diff": "diff --git a/backend/uclapi/resources/views.py b/backend/uclapi/resources/views.py\n--- a/backend/uclapi/resources/views.py\n+++ b/backend/uclapi/resources/views.py\n@@ -45,7 +45,7 @@\n \"longitude\": _(\"longitude\"),\n \"building_name\": _(\"buildingName\"),\n \"address\": _(\"buildingAddress\"),\n- \"postcode\": _(\"buildingPostCode\")\n+ \"postcode\": _(\"buildingPostcode\")\n },\n \"free_seats\": _(\"free\"),\n \"total_seats\": _(\"seats\"),\n", "issue": "[Documentation] Docs link is Absolute, not Relative\nThe documentation link always goes to `https://uclapi.com/docs`, even if running in, for example, staging. Just linking to `/docs` would be adequate to fix this.\n", "before_files": [{"content": "import os\nimport requests\n\nfrom lxml import etree\n\nfrom common.decorators import uclapi_protected_endpoint\nfrom common.helpers import PrettyJsonResponse as JsonResponse\n\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['GET'])\n@uclapi_protected_endpoint()\ndef get_pc_availability(request, *args, **kwargs):\n try:\n r = requests.get(os.environ[\"PCA_LINK\"])\n except requests.exceptions.MissingSchema:\n resp = JsonResponse({\n \"ok\": False,\n \"error\": (\"Could not retrieve availability data.\"\n \" Please try again later or contact us for support.\")\n }, rate_limiting_data=kwargs)\n resp.status_code = 400\n return resp\n\n try:\n e = etree.fromstring(r.content)\n except (ValueError, etree.XMLSyntaxError):\n resp = JsonResponse({\n \"ok\": False,\n \"error\": (\"Could not parse the desktop availability data.\"\n \" Please try again later or contact us for support.\")\n }, rate_limiting_data=kwargs)\n resp.status_code = 400\n return resp\n\n data = []\n for pc in e.findall(\"room\"):\n _ = pc.get\n data.append({\n \"location\": {\n \"room_name\": _(\"location\"),\n \"room_id\": _(\"rid\"),\n \"latitude\": _(\"latitude\"),\n \"longitude\": _(\"longitude\"),\n \"building_name\": _(\"buildingName\"),\n \"address\": _(\"buildingAddress\"),\n \"postcode\": _(\"buildingPostCode\")\n },\n \"free_seats\": _(\"free\"),\n \"total_seats\": _(\"seats\"),\n \"room_status\": _(\"info\")\n })\n\n return JsonResponse({\n \"ok\": True,\n \"data\": data\n }, rate_limiting_data=kwargs)\n", "path": "backend/uclapi/resources/views.py"}]} | 1,074 | 118 |
gh_patches_debug_24152 | rasdani/github-patches | git_diff | ckan__ckan-5093 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing dependency in requirements.txt (cookiecutter)
https://github.com/ckan/ckan/blob/f2cea089bc0aaeede06d98449c4e9eb65e8c2f14/ckan/cli/generate.py#L7
- cookiecutter lib will be imported on a `ckan` cli attempt, but as it is missing from requirments.txt, is not present which will result to ImportError
- cookiecutter is listed in requirments-dev.txt, but docker builds don't use it
Tested on a docker personal build, by :
> docker build -t ckan .
> docker run --rm -it --entrypoint bash --name ckan -p 5000:5000 --link db:db --link redis:redis --link solr:solr ckan
> (activated-env)
> ckan
</issue>
<code>
[start of ckan/cli/generate.py]
1 # encoding: utf-8
2
3 import os
4 import sys
5 import click
6 from ckan.cli import error_shout
7 from cookiecutter.main import cookiecutter
8
9
10 @click.group(name=u'generate',
11 short_help=u"Generate empty extension files to expand CKAN.")
12 def generate():
13 pass
14
15
16 @generate.command(name=u'extension', short_help=u"Create empty extension.")
17 @click.option(u'-o', u'--output-dir', help=u"Location to put the generated "
18 u"template.",
19 default=u'.')
20 def extension(output_dir):
21 cur_loc = os.path.dirname(os.path.abspath(__file__))
22 os.chdir(cur_loc)
23 os.chdir(u'../../contrib/cookiecutter/ckan_extension/')
24 template_loc = os.getcwd()
25
26 # Prompt user for information
27 click.echo(u"\n")
28 name = click.prompt(u"Extenion's name", default=u"must begin 'ckanext-'")
29 author = click.prompt(u"Author's name", default=u"")
30 email = click.prompt(u"Author's email", default=u"")
31 github = click.prompt(u"Your Github user or organization name",
32 default=u"")
33 description = click.prompt(u"Brief description of the project",
34 default=u"")
35 keywords = click.prompt(u"List of keywords (seperated by spaces)",
36 default=u"CKAN")
37
38 # Ensure one instance of 'CKAN' in keywords
39 keywords = keywords.strip().split()
40 keywords = [keyword for keyword in keywords
41 if keyword not in (u'ckan', u'CKAN')]
42 keywords.insert(0, u'CKAN')
43 keywords = u' '.join(keywords)
44
45 # Set short name and plugin class name
46 project_short = name[8:].lower().replace(u'-', u'_')
47 plugin_class_name = project_short.title().replace(u'_', u'') + u'Plugin'
48
49 context = {u"project": name,
50 u"description": description,
51 u"author": author,
52 u"author_email": email,
53 u"keywords": keywords,
54 u"github_user_name": github,
55 u"project_shortname": project_short,
56 u"plugin_class_name": plugin_class_name,
57 u"_source": u"cli"}
58
59 if output_dir == u'.':
60 os.chdir(u'../../../..')
61 output_dir = os.getcwd()
62
63 if not name.startswith(u"ckanext-"):
64 print(u"\nERROR: Project name must start with 'ckanext-' > {}"
65 .format(name))
66 sys.exit(1)
67
68 cookiecutter(template_loc, no_input=True, extra_context=context,
69 output_dir=output_dir)
70
[end of ckan/cli/generate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/cli/generate.py b/ckan/cli/generate.py
--- a/ckan/cli/generate.py
+++ b/ckan/cli/generate.py
@@ -4,13 +4,21 @@
import sys
import click
from ckan.cli import error_shout
-from cookiecutter.main import cookiecutter
[email protected](name=u'generate',
- short_help=u"Generate empty extension files to expand CKAN.")
[email protected](
+ name=u'generate',
+ short_help=u"Generate empty extension files to expand CKAN.",
+ invoke_without_command=True,
+)
def generate():
- pass
+ try:
+ from cookiecutter.main import cookiecutter
+ except ImportError:
+ error_shout(u"`cookiecutter` library is missing from import path.")
+ error_shout(u"Make sure you have dev-dependencies installed:")
+ error_shout(u"\tpip install -r dev-requirements.txt")
+ raise click.Abort()
@generate.command(name=u'extension', short_help=u"Create empty extension.")
@@ -18,6 +26,7 @@
u"template.",
default=u'.')
def extension(output_dir):
+ from cookiecutter.main import cookiecutter
cur_loc = os.path.dirname(os.path.abspath(__file__))
os.chdir(cur_loc)
os.chdir(u'../../contrib/cookiecutter/ckan_extension/')
| {"golden_diff": "diff --git a/ckan/cli/generate.py b/ckan/cli/generate.py\n--- a/ckan/cli/generate.py\n+++ b/ckan/cli/generate.py\n@@ -4,13 +4,21 @@\n import sys\n import click\n from ckan.cli import error_shout\n-from cookiecutter.main import cookiecutter\n \n \[email protected](name=u'generate',\n- short_help=u\"Generate empty extension files to expand CKAN.\")\[email protected](\n+ name=u'generate',\n+ short_help=u\"Generate empty extension files to expand CKAN.\",\n+ invoke_without_command=True,\n+)\n def generate():\n- pass\n+ try:\n+ from cookiecutter.main import cookiecutter\n+ except ImportError:\n+ error_shout(u\"`cookiecutter` library is missing from import path.\")\n+ error_shout(u\"Make sure you have dev-dependencies installed:\")\n+ error_shout(u\"\\tpip install -r dev-requirements.txt\")\n+ raise click.Abort()\n \n \n @generate.command(name=u'extension', short_help=u\"Create empty extension.\")\n@@ -18,6 +26,7 @@\n u\"template.\",\n default=u'.')\n def extension(output_dir):\n+ from cookiecutter.main import cookiecutter\n cur_loc = os.path.dirname(os.path.abspath(__file__))\n os.chdir(cur_loc)\n os.chdir(u'../../contrib/cookiecutter/ckan_extension/')\n", "issue": "Missing dependency in requirements.txt (cookiecutter) \nhttps://github.com/ckan/ckan/blob/f2cea089bc0aaeede06d98449c4e9eb65e8c2f14/ckan/cli/generate.py#L7\r\n\r\n- cookiecutter lib will be imported on a `ckan` cli attempt, but as it is missing from requirments.txt, is not present which will result to ImportError \r\n\r\n- cookiecutter is listed in requirments-dev.txt, but docker builds don't use it\r\n\r\nTested on a docker personal build, by : \r\n> docker build -t ckan .\r\n> docker run --rm -it --entrypoint bash --name ckan -p 5000:5000 --link db:db --link redis:redis --link solr:solr ckan\r\n> (activated-env)\r\n> ckan\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\nimport sys\nimport click\nfrom ckan.cli import error_shout\nfrom cookiecutter.main import cookiecutter\n\n\[email protected](name=u'generate',\n short_help=u\"Generate empty extension files to expand CKAN.\")\ndef generate():\n pass\n\n\[email protected](name=u'extension', short_help=u\"Create empty extension.\")\[email protected](u'-o', u'--output-dir', help=u\"Location to put the generated \"\n u\"template.\",\n default=u'.')\ndef extension(output_dir):\n cur_loc = os.path.dirname(os.path.abspath(__file__))\n os.chdir(cur_loc)\n os.chdir(u'../../contrib/cookiecutter/ckan_extension/')\n template_loc = os.getcwd()\n\n # Prompt user for information\n click.echo(u\"\\n\")\n name = click.prompt(u\"Extenion's name\", default=u\"must begin 'ckanext-'\")\n author = click.prompt(u\"Author's name\", default=u\"\")\n email = click.prompt(u\"Author's email\", default=u\"\")\n github = click.prompt(u\"Your Github user or organization name\",\n default=u\"\")\n description = click.prompt(u\"Brief description of the project\",\n default=u\"\")\n keywords = click.prompt(u\"List of keywords (seperated by spaces)\",\n default=u\"CKAN\")\n\n # Ensure one instance of 'CKAN' in keywords\n keywords = keywords.strip().split()\n keywords = [keyword for keyword in keywords\n if keyword not in (u'ckan', u'CKAN')]\n keywords.insert(0, u'CKAN')\n keywords = u' '.join(keywords)\n\n # Set short name and plugin class name\n project_short = name[8:].lower().replace(u'-', u'_')\n plugin_class_name = project_short.title().replace(u'_', u'') + u'Plugin'\n\n context = {u\"project\": name,\n u\"description\": description,\n u\"author\": author,\n u\"author_email\": email,\n u\"keywords\": keywords,\n u\"github_user_name\": github,\n u\"project_shortname\": project_short,\n u\"plugin_class_name\": plugin_class_name,\n u\"_source\": u\"cli\"}\n\n if output_dir == u'.':\n os.chdir(u'../../../..')\n output_dir = os.getcwd()\n\n if not name.startswith(u\"ckanext-\"):\n print(u\"\\nERROR: Project name must start with 'ckanext-' > {}\"\n .format(name))\n sys.exit(1)\n\n cookiecutter(template_loc, no_input=True, extra_context=context,\n output_dir=output_dir)\n", "path": "ckan/cli/generate.py"}]} | 1,447 | 316 |
gh_patches_debug_1208 | rasdani/github-patches | git_diff | OCA__server-tools-464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
runbot 9.0 red due to letsencrypt?
Hi,
It seems the 9.0 branch is red on runbot due to the letsencrypt module?
```
Call of self.pool.get('letsencrypt').cron(cr, uid, *()) failed in Job 2
Traceback (most recent call last):
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/base/ir/ir_cron.py", line 129, in _callback
getattr(model, method_name)(cr, uid, *args)
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py", line 250, in wrapper
return old_api(self, *args, **kwargs)
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py", line 354, in old_api
result = method(recs, *args, **kwargs)
File "/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/letsencrypt/models/letsencrypt.py", line 151, in cron
account_key, csr, acme_challenge, log=_logger, CA=DEFAULT_CA)
File "/srv/openerp/instances/openerp-oca-runbot/sandbox/local/lib/python2.7/site-packages/acme_tiny.py", line 104, in get_crt
raise ValueError("Error requesting challenges: {0} {1}".format(code, result))
ValueError: Error requesting challenges: 400 {
"type": "urn:acme:error:malformed",
"detail": "Error creating new authz :: Invalid character in DNS name",
"status": 400
}
```
@hbrunn
</issue>
<code>
[start of letsencrypt/__openerp__.py]
1 # -*- coding: utf-8 -*-
2 # © 2016 Therp BV <http://therp.nl>
3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
4 {
5 "name": "Let's encrypt",
6 "version": "9.0.1.0.0",
7 "author": "Therp BV,"
8 "Tecnativa,"
9 "Odoo Community Association (OCA)",
10 "license": "AGPL-3",
11 "category": "Hidden/Dependency",
12 "summary": "Request SSL certificates from letsencrypt.org",
13 "depends": [
14 'base',
15 ],
16 "data": [
17 "data/ir_config_parameter.xml",
18 "data/ir_cron.xml",
19 ],
20 "post_init_hook": 'post_init_hook',
21 "installable": True,
22 "external_dependencies": {
23 'bin': [
24 'openssl',
25 ],
26 'python': [
27 'acme_tiny',
28 'IPy',
29 ],
30 },
31 }
32
[end of letsencrypt/__openerp__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/letsencrypt/__openerp__.py b/letsencrypt/__openerp__.py
--- a/letsencrypt/__openerp__.py
+++ b/letsencrypt/__openerp__.py
@@ -16,6 +16,7 @@
"data": [
"data/ir_config_parameter.xml",
"data/ir_cron.xml",
+ "demo/ir_cron.xml",
],
"post_init_hook": 'post_init_hook',
"installable": True,
| {"golden_diff": "diff --git a/letsencrypt/__openerp__.py b/letsencrypt/__openerp__.py\n--- a/letsencrypt/__openerp__.py\n+++ b/letsencrypt/__openerp__.py\n@@ -16,6 +16,7 @@\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n+ \"demo/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n", "issue": "runbot 9.0 red due to letsencrypt?\nHi,\n\nIt seems the 9.0 branch is red on runbot due to the letsencrypt module?\n\n```\nCall of self.pool.get('letsencrypt').cron(cr, uid, *()) failed in Job 2\nTraceback (most recent call last):\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/base/ir/ir_cron.py\", line 129, in _callback\n getattr(model, method_name)(cr, uid, *args)\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py\", line 250, in wrapper\n return old_api(self, *args, **kwargs)\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/api.py\", line 354, in old_api\n result = method(recs, *args, **kwargs)\n File \"/srv/openerp/instances/openerp-oca-runbot/parts/odoo-extra/runbot/static/build/3148182-9-0-209efa/openerp/addons/letsencrypt/models/letsencrypt.py\", line 151, in cron\n account_key, csr, acme_challenge, log=_logger, CA=DEFAULT_CA)\n File \"/srv/openerp/instances/openerp-oca-runbot/sandbox/local/lib/python2.7/site-packages/acme_tiny.py\", line 104, in get_crt\n raise ValueError(\"Error requesting challenges: {0} {1}\".format(code, result))\nValueError: Error requesting challenges: 400 {\n \"type\": \"urn:acme:error:malformed\",\n \"detail\": \"Error creating new authz :: Invalid character in DNS name\",\n \"status\": 400\n}\n```\n\n@hbrunn \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2016 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Let's encrypt\",\n \"version\": \"9.0.1.0.0\",\n \"author\": \"Therp BV,\"\n \"Tecnativa,\"\n \"Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Hidden/Dependency\",\n \"summary\": \"Request SSL certificates from letsencrypt.org\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_config_parameter.xml\",\n \"data/ir_cron.xml\",\n ],\n \"post_init_hook\": 'post_init_hook',\n \"installable\": True,\n \"external_dependencies\": {\n 'bin': [\n 'openssl',\n ],\n 'python': [\n 'acme_tiny',\n 'IPy',\n ],\n },\n}\n", "path": "letsencrypt/__openerp__.py"}]} | 1,314 | 114 |
gh_patches_debug_615 | rasdani/github-patches | git_diff | pex-tool__pex-1255 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.32
On the docket:
+ [x] Venv `pex` and bin scripts can run afoul of shebang length limits. #1252
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.31"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.31"
+__version__ = "2.1.32"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.31\"\n+__version__ = \"2.1.32\"\n", "issue": "Release 2.1.32\nOn the docket:\r\n+ [x] Venv `pex` and bin scripts can run afoul of shebang length limits. #1252\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.31\"\n", "path": "pex/version.py"}]} | 626 | 97 |
gh_patches_debug_56669 | rasdani/github-patches | git_diff | magenta__magenta-1079 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in running Onsets and Frames Colab Notebook
Hi @cghawthorne
I am using your [Colab notebook](https://colab.research.google.com/notebook#fileId=/v2/external/notebooks/magenta/onsets_frames_transcription/onsets_frames_transcription.ipynb) to test your model but it stopped working a week ago.
Error on the inference section:
UnknownError: exceptions.AttributeError: 'module' object has no attribute 'logamplitude'
[[Node: wav_to_spec = PyFunc[Tin=[DT_STRING], Tout=[DT_FLOAT], token="pyfunc_1"](transform_wav_data_op)]]
[[Node: IteratorGetNext = IteratorGetNext[output_shapes=[[?], [?,?,88], [?,?,88], [?], [?], [?,?,88], [?,?,229,1]], output_types=[DT_STRING, DT_FLOAT, DT_FLOAT, DT_INT32, DT_STRING, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](Iterator)]]
Thanks,
Bardia
</issue>
<code>
[start of magenta/version.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""Separate file for storing the current version of Magenta.
15
16 Stored in a separate file so that setup.py can reference the version without
17 pulling in all the dependencies in __init__.py.
18 """
19
20 __version__ = '0.3.5'
21
[end of magenta/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/magenta/version.py b/magenta/version.py
--- a/magenta/version.py
+++ b/magenta/version.py
@@ -17,4 +17,4 @@
pulling in all the dependencies in __init__.py.
"""
-__version__ = '0.3.5'
+__version__ = '0.3.6'
| {"golden_diff": "diff --git a/magenta/version.py b/magenta/version.py\n--- a/magenta/version.py\n+++ b/magenta/version.py\n@@ -17,4 +17,4 @@\n pulling in all the dependencies in __init__.py.\n \"\"\"\n \n-__version__ = '0.3.5'\n+__version__ = '0.3.6'\n", "issue": "Error in running Onsets and Frames Colab Notebook\nHi @cghawthorne\r\nI am using your [Colab notebook](https://colab.research.google.com/notebook#fileId=/v2/external/notebooks/magenta/onsets_frames_transcription/onsets_frames_transcription.ipynb) to test your model but it stopped working a week ago.\r\n\r\nError on the inference section:\r\nUnknownError: exceptions.AttributeError: 'module' object has no attribute 'logamplitude'\r\n\t [[Node: wav_to_spec = PyFunc[Tin=[DT_STRING], Tout=[DT_FLOAT], token=\"pyfunc_1\"](transform_wav_data_op)]]\r\n\t [[Node: IteratorGetNext = IteratorGetNext[output_shapes=[[?], [?,?,88], [?,?,88], [?], [?], [?,?,88], [?,?,229,1]], output_types=[DT_STRING, DT_FLOAT, DT_FLOAT, DT_INT32, DT_STRING, DT_FLOAT, DT_FLOAT], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](Iterator)]]\r\n\r\nThanks,\r\nBardia\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '0.3.5'\n", "path": "magenta/version.py"}]} | 997 | 78 |
gh_patches_debug_11826 | rasdani/github-patches | git_diff | pypi__warehouse-12408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Monospaced font for text/plain long_description
Don't you think that would be nice to wrap project descriptions in text/plain with pre tag?
Close if duplicate. I'm really sorry, looking through over 400 issues of production system is beyond of my capabilities.
</issue>
<code>
[start of warehouse/utils/readme.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 """Utils for rendering and updating package descriptions (READMEs)."""
14
15 import cgi
16
17 import pkg_resources
18 import readme_renderer.markdown
19 import readme_renderer.rst
20 import readme_renderer.txt
21
22 _RENDERERS = {
23 None: readme_renderer.rst, # Default if description_content_type is None
24 "": readme_renderer.rst, # Default if description_content_type is None
25 "text/plain": readme_renderer.txt,
26 "text/x-rst": readme_renderer.rst,
27 "text/markdown": readme_renderer.markdown,
28 }
29
30
31 def render(value, content_type=None, use_fallback=True):
32 if value is None:
33 return value
34
35 content_type, parameters = cgi.parse_header(content_type or "")
36
37 # Get the appropriate renderer
38 renderer = _RENDERERS.get(content_type, readme_renderer.txt)
39
40 # Actually render the given value, this will not only render the value, but
41 # also ensure that it's had any disallowed markup removed.
42 rendered = renderer.render(value, **parameters)
43
44 # If the content was not rendered, we'll render as plaintext instead. The
45 # reason it's necessary to do this instead of just accepting plaintext is
46 # that readme_renderer will deal with sanitizing the content.
47 # Skip the fallback option when validating that rendered output is ok.
48 if use_fallback and rendered is None:
49 rendered = readme_renderer.txt.render(value)
50
51 return rendered
52
53
54 def renderer_version():
55 return pkg_resources.get_distribution("readme-renderer").version
56
[end of warehouse/utils/readme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/utils/readme.py b/warehouse/utils/readme.py
--- a/warehouse/utils/readme.py
+++ b/warehouse/utils/readme.py
@@ -41,6 +41,10 @@
# also ensure that it's had any disallowed markup removed.
rendered = renderer.render(value, **parameters)
+ # Wrap plaintext as preformatted to preserve whitespace.
+ if content_type == "text/plain":
+ rendered = f"<pre>{rendered}</pre>"
+
# If the content was not rendered, we'll render as plaintext instead. The
# reason it's necessary to do this instead of just accepting plaintext is
# that readme_renderer will deal with sanitizing the content.
| {"golden_diff": "diff --git a/warehouse/utils/readme.py b/warehouse/utils/readme.py\n--- a/warehouse/utils/readme.py\n+++ b/warehouse/utils/readme.py\n@@ -41,6 +41,10 @@\n # also ensure that it's had any disallowed markup removed.\n rendered = renderer.render(value, **parameters)\n \n+ # Wrap plaintext as preformatted to preserve whitespace.\n+ if content_type == \"text/plain\":\n+ rendered = f\"<pre>{rendered}</pre>\"\n+\n # If the content was not rendered, we'll render as plaintext instead. The\n # reason it's necessary to do this instead of just accepting plaintext is\n # that readme_renderer will deal with sanitizing the content.\n", "issue": "Monospaced font for text/plain long_description\nDon't you think that would be nice to wrap project descriptions in text/plain with pre tag?\r\nClose if duplicate. I'm really sorry, looking through over 400 issues of production system is beyond of my capabilities.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils for rendering and updating package descriptions (READMEs).\"\"\"\n\nimport cgi\n\nimport pkg_resources\nimport readme_renderer.markdown\nimport readme_renderer.rst\nimport readme_renderer.txt\n\n_RENDERERS = {\n None: readme_renderer.rst, # Default if description_content_type is None\n \"\": readme_renderer.rst, # Default if description_content_type is None\n \"text/plain\": readme_renderer.txt,\n \"text/x-rst\": readme_renderer.rst,\n \"text/markdown\": readme_renderer.markdown,\n}\n\n\ndef render(value, content_type=None, use_fallback=True):\n if value is None:\n return value\n\n content_type, parameters = cgi.parse_header(content_type or \"\")\n\n # Get the appropriate renderer\n renderer = _RENDERERS.get(content_type, readme_renderer.txt)\n\n # Actually render the given value, this will not only render the value, but\n # also ensure that it's had any disallowed markup removed.\n rendered = renderer.render(value, **parameters)\n\n # If the content was not rendered, we'll render as plaintext instead. The\n # reason it's necessary to do this instead of just accepting plaintext is\n # that readme_renderer will deal with sanitizing the content.\n # Skip the fallback option when validating that rendered output is ok.\n if use_fallback and rendered is None:\n rendered = readme_renderer.txt.render(value)\n\n return rendered\n\n\ndef renderer_version():\n return pkg_resources.get_distribution(\"readme-renderer\").version\n", "path": "warehouse/utils/readme.py"}]} | 1,139 | 158 |
gh_patches_debug_18559 | rasdani/github-patches | git_diff | pytorch__vision-360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
utils.make_grid should not return tensors of dimension 2 or 3 without normalizing them
When passing in a single image of dimensionality 2 or 3 to utils.make_grid, the function currently returns this image, without normalizing it (but it should, according to the function's documentation).
This is also problematic as utils.save_image calls utils.make_grid to normalize its images.
</issue>
<code>
[start of torchvision/utils.py]
1 import torch
2 import math
3 irange = range
4
5
6 def make_grid(tensor, nrow=8, padding=2,
7 normalize=False, range=None, scale_each=False, pad_value=0):
8 """Make a grid of images.
9
10 Args:
11 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
12 or a list of images all of the same size.
13 nrow (int, optional): Number of images displayed in each row of the grid.
14 The Final grid size is (B / nrow, nrow). Default is 8.
15 padding (int, optional): amount of padding. Default is 2.
16 normalize (bool, optional): If True, shift the image to the range (0, 1),
17 by subtracting the minimum and dividing by the maximum pixel value.
18 range (tuple, optional): tuple (min, max) where min and max are numbers,
19 then these numbers are used to normalize the image. By default, min and max
20 are computed from the tensor.
21 scale_each (bool, optional): If True, scale each image in the batch of
22 images separately rather than the (min, max) over all images.
23 pad_value (float, optional): Value for the padded pixels.
24
25 Example:
26 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
27
28 """
29 if not (torch.is_tensor(tensor) or
30 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
31 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
32
33 # if list of tensors, convert to a 4D mini-batch Tensor
34 if isinstance(tensor, list):
35 tensor = torch.stack(tensor, dim=0)
36
37 if tensor.dim() == 2: # single image H x W
38 tensor = tensor.view(1, tensor.size(0), tensor.size(1))
39 if tensor.dim() == 3: # single image
40 if tensor.size(0) == 1: # if single-channel, convert to 3-channel
41 tensor = torch.cat((tensor, tensor, tensor), 0)
42 return tensor
43 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
44 tensor = torch.cat((tensor, tensor, tensor), 1)
45
46 if normalize is True:
47 tensor = tensor.clone() # avoid modifying tensor in-place
48 if range is not None:
49 assert isinstance(range, tuple), \
50 "range has to be a tuple (min, max) if specified. min and max are numbers"
51
52 def norm_ip(img, min, max):
53 img.clamp_(min=min, max=max)
54 img.add_(-min).div_(max - min)
55
56 def norm_range(t, range):
57 if range is not None:
58 norm_ip(t, range[0], range[1])
59 else:
60 norm_ip(t, t.min(), t.max())
61
62 if scale_each is True:
63 for t in tensor: # loop over mini-batch dimension
64 norm_range(t, range)
65 else:
66 norm_range(tensor, range)
67
68 # make the mini-batch of images into a grid
69 nmaps = tensor.size(0)
70 xmaps = min(nrow, nmaps)
71 ymaps = int(math.ceil(float(nmaps) / xmaps))
72 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
73 grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)
74 k = 0
75 for y in irange(ymaps):
76 for x in irange(xmaps):
77 if k >= nmaps:
78 break
79 grid.narrow(1, y * height + padding, height - padding)\
80 .narrow(2, x * width + padding, width - padding)\
81 .copy_(tensor[k])
82 k = k + 1
83 return grid
84
85
86 def save_image(tensor, filename, nrow=8, padding=2,
87 normalize=False, range=None, scale_each=False, pad_value=0):
88 """Save a given Tensor into an image file.
89
90 Args:
91 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
92 saves the tensor as a grid of images by calling ``make_grid``.
93 **kwargs: Other arguments are documented in ``make_grid``.
94 """
95 from PIL import Image
96 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
97 normalize=normalize, range=range, scale_each=scale_each)
98 ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
99 im = Image.fromarray(ndarr)
100 im.save(filename)
101
[end of torchvision/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/utils.py b/torchvision/utils.py
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -39,7 +39,8 @@
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = torch.cat((tensor, tensor, tensor), 0)
- return tensor
+ tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))
+
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = torch.cat((tensor, tensor, tensor), 1)
@@ -65,6 +66,9 @@
else:
norm_range(tensor, range)
+ if tensor.size(0) == 1:
+ return tensor.squeeze()
+
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
| {"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -39,7 +39,8 @@\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n- return tensor\n+ tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))\n+\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n \n@@ -65,6 +66,9 @@\n else:\n norm_range(tensor, range)\n \n+ if tensor.size(0) == 1:\n+ return tensor.squeeze()\n+\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n", "issue": "utils.make_grid should not return tensors of dimension 2 or 3 without normalizing them\nWhen passing in a single image of dimensionality 2 or 3 to utils.make_grid, the function currently returns this image, without normalizing it (but it should, according to the function's documentation).\r\nThis is also problematic as utils.save_image calls utils.make_grid to normalize its images. \n", "before_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n return tensor\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}]} | 1,911 | 242 |
gh_patches_debug_14559 | rasdani/github-patches | git_diff | Mailu__Mailu-891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fetchmail sslproto configuration (typo?)
The fetchmail container repeatedly fires the following warning
`fetchmail: Invalid SSL protocol 'AUTO' specified, using default (SSLv23).`
From the fetchmail manpage, this appears to relate to the `--sslproto` switch.
IMHO the default should be an all lowercase `auto`. Otherwise, an improvement suggestion would be to make this configurable through the admin interface.
</issue>
<code>
[start of services/fetchmail/fetchmail.py]
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10
11
12 FETCHMAIL = """
13 fetchmail -N \
14 --sslcertck --sslcertpath /etc/ssl/certs \
15 -f {}
16 """
17
18
19 RC_LINE = """
20 poll "{host}" proto {protocol} port {port}
21 user "{username}" password "{password}"
22 is "{user_email}"
23 smtphost "{smtphost}"
24 {options}
25 sslproto 'AUTO'
26 """
27
28
29 def extract_host_port(host_and_port, default_port):
30 host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()
31 return host, int(port) if port else default_port
32
33
34 def escape_rc_string(arg):
35 return arg.replace("\\", "\\\\").replace('"', '\\"')
36
37
38 def fetchmail(fetchmailrc):
39 with tempfile.NamedTemporaryFile() as handler:
40 handler.write(fetchmailrc.encode("utf8"))
41 handler.flush()
42 command = FETCHMAIL.format(shlex.quote(handler.name))
43 output = subprocess.check_output(command, shell=True)
44 return output
45
46
47 def run(debug):
48 fetches = requests.get("http://admin/internal/fetch").json()
49 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
50 if smtpport is None:
51 smtphostport = smtphost
52 else:
53 smtphostport = "%s/%d" % (smtphost, smtpport)
54 for fetch in fetches:
55 fetchmailrc = ""
56 options = "options antispam 501, 504, 550, 553, 554"
57 options += " ssl" if fetch["tls"] else ""
58 options += " keep" if fetch["keep"] else " fetchall"
59 fetchmailrc += RC_LINE.format(
60 user_email=escape_rc_string(fetch["user_email"]),
61 protocol=fetch["protocol"],
62 host=escape_rc_string(fetch["host"]),
63 port=fetch["port"],
64 smtphost=smtphostport,
65 username=escape_rc_string(fetch["username"]),
66 password=escape_rc_string(fetch["password"]),
67 options=options
68 )
69 if debug:
70 print(fetchmailrc)
71 try:
72 print(fetchmail(fetchmailrc))
73 error_message = ""
74 except subprocess.CalledProcessError as error:
75 error_message = error.output.decode("utf8")
76 # No mail is not an error
77 if not error_message.startswith("fetchmail: No mail"):
78 print(error_message)
79 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
80 # Number of messages seen is not a error as well
81 if ("messages" in error_message and
82 "(seen " in error_message and
83 user_info in error_message):
84 print(error_message)
85 finally:
86 requests.post("http://admin/internal/fetch/{}".format(fetch["id"]),
87 json=error_message.split("\n")[0]
88 )
89
90
91 if __name__ == "__main__":
92 while True:
93 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
94 run(os.environ.get("DEBUG", None) == "True")
95
96
[end of services/fetchmail/fetchmail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/services/fetchmail/fetchmail.py b/services/fetchmail/fetchmail.py
--- a/services/fetchmail/fetchmail.py
+++ b/services/fetchmail/fetchmail.py
@@ -22,7 +22,6 @@
is "{user_email}"
smtphost "{smtphost}"
{options}
- sslproto 'AUTO'
"""
@@ -54,7 +53,7 @@
for fetch in fetches:
fetchmailrc = ""
options = "options antispam 501, 504, 550, 553, 554"
- options += " ssl" if fetch["tls"] else ""
+ options += " sslmode wrapped" if fetch["tls"] else ""
options += " keep" if fetch["keep"] else " fetchall"
fetchmailrc += RC_LINE.format(
user_email=escape_rc_string(fetch["user_email"]),
| {"golden_diff": "diff --git a/services/fetchmail/fetchmail.py b/services/fetchmail/fetchmail.py\n--- a/services/fetchmail/fetchmail.py\n+++ b/services/fetchmail/fetchmail.py\n@@ -22,7 +22,6 @@\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n- sslproto 'AUTO'\n \"\"\"\n \n \n@@ -54,7 +53,7 @@\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n- options += \" ssl\" if fetch[\"tls\"] else \"\"\n+ options += \" sslmode wrapped\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n", "issue": "Fetchmail sslproto configuration (typo?)\nThe fetchmail container repeatedly fires the following warning\r\n\r\n`fetchmail: Invalid SSL protocol 'AUTO' specified, using default (SSLv23).`\r\n\r\nFrom the fetchmail manpage, this appears to relate to the `--sslproto` switch. \r\n\r\nIMHO the default should be an all lowercase `auto`. Otherwise, an improvement suggestion would be to make this configurable through the admin interface.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n sslproto 'AUTO'\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return arg.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n fetches = requests.get(\"http://admin/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://admin/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n\n", "path": "services/fetchmail/fetchmail.py"}]} | 1,536 | 212 |
gh_patches_debug_18953 | rasdani/github-patches | git_diff | goauthentik__authentik-4920 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session duration not working correctly
**Describe the bug**
I changed the session duration of the default-authentication-login to 18 hours. Still, after a login the session is valid for 14 days.
For me, it looks like the session duration value is ignored.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Admin interface'
2. Click on 'Flows & Stages'
3. Edit 'default-authentication-login'
4. Change 'Session duration' to 'hours=18;minutes=0;seconds=0'
5. Logout & Login
6. Click on the settings-icon (“wheel”)
7. Check the session duration.
**Expected behavior**
I want to achieve, that every user has to authenticate once a day. Therefore, the session duration of 18 hours.
**Screenshots**


**Logs**
There are no logs.
**Version and Deployment (please complete the following information):**
- authentik 2023.1.2
- Deployment: docker-compose
</issue>
<code>
[start of authentik/stages/user_login/stage.py]
1 """Login stage logic"""
2 from django.contrib import messages
3 from django.contrib.auth import login
4 from django.http import HttpRequest, HttpResponse
5 from django.utils.translation import gettext as _
6
7 from authentik.core.models import AuthenticatedSession, User
8 from authentik.flows.planner import PLAN_CONTEXT_PENDING_USER, PLAN_CONTEXT_SOURCE
9 from authentik.flows.stage import StageView
10 from authentik.lib.utils.time import timedelta_from_string
11 from authentik.stages.password import BACKEND_INBUILT
12 from authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND
13
14
15 class UserLoginStageView(StageView):
16 """Finalise Authentication flow by logging the user in"""
17
18 def post(self, request: HttpRequest) -> HttpResponse:
19 """Wrapper for post requests"""
20 return self.get(request)
21
22 def get(self, request: HttpRequest) -> HttpResponse:
23 """Attach the currently pending user to the current session"""
24 if PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context:
25 message = _("No Pending user to login.")
26 messages.error(request, message)
27 self.logger.debug(message)
28 return self.executor.stage_invalid()
29 backend = self.executor.plan.context.get(
30 PLAN_CONTEXT_AUTHENTICATION_BACKEND, BACKEND_INBUILT
31 )
32 user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]
33 if not user.is_active:
34 self.logger.warning("User is not active, login will not work.")
35 login(
36 self.request,
37 user,
38 backend=backend,
39 )
40 delta = timedelta_from_string(self.executor.current_stage.session_duration)
41 if delta.total_seconds() == 0:
42 self.request.session.set_expiry(0)
43 else:
44 self.request.session.set_expiry(delta)
45 self.logger.debug(
46 "Logged in",
47 backend=backend,
48 user=user.username,
49 flow_slug=self.executor.flow.slug,
50 session_duration=self.executor.current_stage.session_duration,
51 )
52 # Only show success message if we don't have a source in the flow
53 # as sources show their own success messages
54 if not self.executor.plan.context.get(PLAN_CONTEXT_SOURCE, None):
55 messages.success(self.request, _("Successfully logged in!"))
56 if self.executor.current_stage.terminate_other_sessions:
57 AuthenticatedSession.objects.filter(
58 user=user,
59 ).exclude(session_key=self.request.session.session_key).delete()
60 return self.executor.stage_ok()
61
[end of authentik/stages/user_login/stage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/stages/user_login/stage.py b/authentik/stages/user_login/stage.py
--- a/authentik/stages/user_login/stage.py
+++ b/authentik/stages/user_login/stage.py
@@ -32,16 +32,16 @@
user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]
if not user.is_active:
self.logger.warning("User is not active, login will not work.")
- login(
- self.request,
- user,
- backend=backend,
- )
delta = timedelta_from_string(self.executor.current_stage.session_duration)
if delta.total_seconds() == 0:
self.request.session.set_expiry(0)
else:
self.request.session.set_expiry(delta)
+ login(
+ self.request,
+ user,
+ backend=backend,
+ )
self.logger.debug(
"Logged in",
backend=backend,
| {"golden_diff": "diff --git a/authentik/stages/user_login/stage.py b/authentik/stages/user_login/stage.py\n--- a/authentik/stages/user_login/stage.py\n+++ b/authentik/stages/user_login/stage.py\n@@ -32,16 +32,16 @@\n user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]\n if not user.is_active:\n self.logger.warning(\"User is not active, login will not work.\")\n- login(\n- self.request,\n- user,\n- backend=backend,\n- )\n delta = timedelta_from_string(self.executor.current_stage.session_duration)\n if delta.total_seconds() == 0:\n self.request.session.set_expiry(0)\n else:\n self.request.session.set_expiry(delta)\n+ login(\n+ self.request,\n+ user,\n+ backend=backend,\n+ )\n self.logger.debug(\n \"Logged in\",\n backend=backend,\n", "issue": "Session duration not working correctly\n**Describe the bug**\r\nI changed the session duration of the default-authentication-login to 18 hours. Still, after a login the session is valid for 14 days.\r\nFor me, it looks like the session duration value is ignored.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Admin interface'\r\n2. Click on 'Flows & Stages'\r\n3. Edit 'default-authentication-login'\r\n4. Change 'Session duration' to 'hours=18;minutes=0;seconds=0'\r\n5. Logout & Login\r\n6. Click on the settings-icon (\u201cwheel\u201d)\r\n7. Check the session duration.\r\n\r\n**Expected behavior**\r\nI want to achieve, that every user has to authenticate once a day. Therefore, the session duration of 18 hours. \r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n**Logs**\r\nThere are no logs.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik 2023.1.2\r\n - Deployment: docker-compose\r\n\n", "before_files": [{"content": "\"\"\"Login stage logic\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth import login\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom authentik.core.models import AuthenticatedSession, User\nfrom authentik.flows.planner import PLAN_CONTEXT_PENDING_USER, PLAN_CONTEXT_SOURCE\nfrom authentik.flows.stage import StageView\nfrom authentik.lib.utils.time import timedelta_from_string\nfrom authentik.stages.password import BACKEND_INBUILT\nfrom authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND\n\n\nclass UserLoginStageView(StageView):\n \"\"\"Finalise Authentication flow by logging the user in\"\"\"\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Wrapper for post requests\"\"\"\n return self.get(request)\n\n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Attach the currently pending user to the current session\"\"\"\n if PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context:\n message = _(\"No Pending user to login.\")\n messages.error(request, message)\n self.logger.debug(message)\n return self.executor.stage_invalid()\n backend = self.executor.plan.context.get(\n PLAN_CONTEXT_AUTHENTICATION_BACKEND, BACKEND_INBUILT\n )\n user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]\n if not user.is_active:\n self.logger.warning(\"User is not active, login will not work.\")\n login(\n self.request,\n user,\n backend=backend,\n )\n delta = timedelta_from_string(self.executor.current_stage.session_duration)\n if delta.total_seconds() == 0:\n self.request.session.set_expiry(0)\n else:\n self.request.session.set_expiry(delta)\n self.logger.debug(\n \"Logged in\",\n backend=backend,\n user=user.username,\n flow_slug=self.executor.flow.slug,\n session_duration=self.executor.current_stage.session_duration,\n )\n # Only show success message if we don't have a source in the flow\n # as sources show their own success messages\n if not self.executor.plan.context.get(PLAN_CONTEXT_SOURCE, None):\n messages.success(self.request, _(\"Successfully logged in!\"))\n if self.executor.current_stage.terminate_other_sessions:\n AuthenticatedSession.objects.filter(\n user=user,\n ).exclude(session_key=self.request.session.session_key).delete()\n return self.executor.stage_ok()\n", "path": "authentik/stages/user_login/stage.py"}]} | 1,489 | 206 |
gh_patches_debug_35310 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-57 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cloud SDK credentials should use the 'active' config not the 'default' config
Context: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2588
</issue>
<code>
[start of google/auth/_cloud_sdk.py]
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helpers for reading the Google Cloud SDK's configuration."""
16
17 import os
18
19 import six
20 from six.moves import configparser
21
22 from google.auth import environment_vars
23 import google.oauth2.credentials
24
25 # The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
26 _GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'
27
28 # The ~/.config subdirectory containing gcloud credentials.
29 _CONFIG_DIRECTORY = 'gcloud'
30 # Windows systems store config at %APPDATA%\gcloud
31 _WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'
32 # The name of the file in the Cloud SDK config that contains default
33 # credentials.
34 _CREDENTIALS_FILENAME = 'application_default_credentials.json'
35 # The name of the file in the Cloud SDK config that contains the
36 # active configuration.
37 _ACTIVE_CONFIG_FILENAME = os.path.join(
38 'configurations', 'config_default')
39 # The config section and key for the project ID in the cloud SDK config.
40 _PROJECT_CONFIG_SECTION = 'core'
41 _PROJECT_CONFIG_KEY = 'project'
42
43
44 def get_config_path():
45 """Returns the absolute path the the Cloud SDK's configuration directory.
46
47 Returns:
48 str: The Cloud SDK config path.
49 """
50 # If the path is explicitly set, return that.
51 try:
52 return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]
53 except KeyError:
54 pass
55
56 # Non-windows systems store this at ~/.config/gcloud
57 if os.name != 'nt':
58 return os.path.join(
59 os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)
60 # Windows systems store config at %APPDATA%\gcloud
61 else:
62 try:
63 return os.path.join(
64 os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],
65 _CONFIG_DIRECTORY)
66 except KeyError:
67 # This should never happen unless someone is really
68 # messing with things, but we'll cover the case anyway.
69 drive = os.environ.get('SystemDrive', 'C:')
70 return os.path.join(
71 drive, '\\', _CONFIG_DIRECTORY)
72
73
74 def get_application_default_credentials_path():
75 """Gets the path to the application default credentials file.
76
77 The path may or may not exist.
78
79 Returns:
80 str: The full path to application default credentials.
81 """
82 config_path = get_config_path()
83 return os.path.join(config_path, _CREDENTIALS_FILENAME)
84
85
86 def get_project_id():
87 """Gets the project ID from the Cloud SDK's configuration.
88
89 Returns:
90 Optional[str]: The project ID.
91 """
92 config_path = get_config_path()
93 config_file = os.path.join(config_path, _ACTIVE_CONFIG_FILENAME)
94
95 if not os.path.isfile(config_file):
96 return None
97
98 config = configparser.RawConfigParser()
99
100 try:
101 config.read(config_file)
102 except configparser.Error:
103 return None
104
105 if config.has_section(_PROJECT_CONFIG_SECTION):
106 return config.get(
107 _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)
108
109
110 def load_authorized_user_credentials(info):
111 """Loads an authorized user credential.
112
113 Args:
114 info (Mapping[str, str]): The loaded file's data.
115
116 Returns:
117 google.oauth2.credentials.Credentials: The constructed credentials.
118
119 Raises:
120 ValueError: if the info is in the wrong format or missing data.
121 """
122 keys_needed = set(('refresh_token', 'client_id', 'client_secret'))
123 missing = keys_needed.difference(six.iterkeys(info))
124
125 if missing:
126 raise ValueError(
127 'Authorized user info was not in the expected format, missing '
128 'fields {}.'.format(', '.join(missing)))
129
130 return google.oauth2.credentials.Credentials(
131 None, # No access token, must be refreshed.
132 refresh_token=info['refresh_token'],
133 token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,
134 client_id=info['client_id'],
135 client_secret=info['client_secret'])
136
[end of google/auth/_cloud_sdk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py
--- a/google/auth/_cloud_sdk.py
+++ b/google/auth/_cloud_sdk.py
@@ -14,6 +14,7 @@
"""Helpers for reading the Google Cloud SDK's configuration."""
+import io
import os
import six
@@ -32,10 +33,6 @@
# The name of the file in the Cloud SDK config that contains default
# credentials.
_CREDENTIALS_FILENAME = 'application_default_credentials.json'
-# The name of the file in the Cloud SDK config that contains the
-# active configuration.
-_ACTIVE_CONFIG_FILENAME = os.path.join(
- 'configurations', 'config_default')
# The config section and key for the project ID in the cloud SDK config.
_PROJECT_CONFIG_SECTION = 'core'
_PROJECT_CONFIG_KEY = 'project'
@@ -83,6 +80,40 @@
return os.path.join(config_path, _CREDENTIALS_FILENAME)
+def _get_active_config(config_path):
+ """Gets the active config for the Cloud SDK.
+
+ Args:
+ config_path (str): The Cloud SDK's config path.
+
+ Returns:
+ str: The active configuration name.
+ """
+ active_config_filename = os.path.join(config_path, 'active_config')
+
+ if not os.path.isfile(active_config_filename):
+ return 'default'
+
+ with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:
+ active_config_name = file_obj.read().strip()
+
+ return active_config_name
+
+
+def _get_config_file(config_path, config_name):
+ """Returns the full path to a configuration's config file.
+
+ Args:
+ config_path (str): The Cloud SDK's config path.
+ config_name (str): The configuration name.
+
+ Returns:
+ str: The config file path.
+ """
+ return os.path.join(
+ config_path, 'configurations', 'config_{}'.format(config_name))
+
+
def get_project_id():
"""Gets the project ID from the Cloud SDK's configuration.
@@ -90,7 +121,8 @@
Optional[str]: The project ID.
"""
config_path = get_config_path()
- config_file = os.path.join(config_path, _ACTIVE_CONFIG_FILENAME)
+ active_config = _get_active_config(config_path)
+ config_file = _get_config_file(config_path, active_config)
if not os.path.isfile(config_file):
return None
| {"golden_diff": "diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py\n--- a/google/auth/_cloud_sdk.py\n+++ b/google/auth/_cloud_sdk.py\n@@ -14,6 +14,7 @@\n \n \"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n \n+import io\n import os\n \n import six\n@@ -32,10 +33,6 @@\n # The name of the file in the Cloud SDK config that contains default\n # credentials.\n _CREDENTIALS_FILENAME = 'application_default_credentials.json'\n-# The name of the file in the Cloud SDK config that contains the\n-# active configuration.\n-_ACTIVE_CONFIG_FILENAME = os.path.join(\n- 'configurations', 'config_default')\n # The config section and key for the project ID in the cloud SDK config.\n _PROJECT_CONFIG_SECTION = 'core'\n _PROJECT_CONFIG_KEY = 'project'\n@@ -83,6 +80,40 @@\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n \n \n+def _get_active_config(config_path):\n+ \"\"\"Gets the active config for the Cloud SDK.\n+\n+ Args:\n+ config_path (str): The Cloud SDK's config path.\n+\n+ Returns:\n+ str: The active configuration name.\n+ \"\"\"\n+ active_config_filename = os.path.join(config_path, 'active_config')\n+\n+ if not os.path.isfile(active_config_filename):\n+ return 'default'\n+\n+ with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n+ active_config_name = file_obj.read().strip()\n+\n+ return active_config_name\n+\n+\n+def _get_config_file(config_path, config_name):\n+ \"\"\"Returns the full path to a configuration's config file.\n+\n+ Args:\n+ config_path (str): The Cloud SDK's config path.\n+ config_name (str): The configuration name.\n+\n+ Returns:\n+ str: The config file path.\n+ \"\"\"\n+ return os.path.join(\n+ config_path, 'configurations', 'config_{}'.format(config_name))\n+\n+\n def get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK's configuration.\n \n@@ -90,7 +121,8 @@\n Optional[str]: The project ID.\n \"\"\"\n config_path = get_config_path()\n- config_file = os.path.join(config_path, _ACTIVE_CONFIG_FILENAME)\n+ active_config = _get_active_config(config_path)\n+ config_file = _get_config_file(config_path, active_config)\n \n if not os.path.isfile(config_file):\n return None\n", "issue": "Cloud SDK credentials should use the 'active' config not the 'default' config\nContext: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2588\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport os\n\nimport six\nfrom six.moves import configparser\n\nfrom google.auth import environment_vars\nimport google.oauth2.credentials\n\n# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.\n_GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = 'gcloud'\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = 'application_default_credentials.json'\n# The name of the file in the Cloud SDK config that contains the\n# active configuration.\n_ACTIVE_CONFIG_FILENAME = os.path.join(\n 'configurations', 'config_default')\n# The config section and key for the project ID in the cloud SDK config.\n_PROJECT_CONFIG_SECTION = 'core'\n_PROJECT_CONFIG_KEY = 'project'\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != 'nt':\n return os.path.join(\n os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],\n _CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get('SystemDrive', 'C:')\n return os.path.join(\n drive, '\\\\', _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK's configuration.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n config_path = get_config_path()\n config_file = os.path.join(config_path, _ACTIVE_CONFIG_FILENAME)\n\n if not os.path.isfile(config_file):\n return None\n\n config = configparser.RawConfigParser()\n\n try:\n config.read(config_file)\n except configparser.Error:\n return None\n\n if config.has_section(_PROJECT_CONFIG_SECTION):\n return config.get(\n _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n\n\ndef load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n\n Args:\n info (Mapping[str, str]): The loaded file's data.\n\n Returns:\n google.oauth2.credentials.Credentials: The constructed credentials.\n\n Raises:\n ValueError: if the info is in the wrong format or missing data.\n \"\"\"\n keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n missing = keys_needed.difference(six.iterkeys(info))\n\n if missing:\n raise ValueError(\n 'Authorized user info was not in the expected format, missing '\n 'fields {}.'.format(', '.join(missing)))\n\n return google.oauth2.credentials.Credentials(\n None, # No access token, must be refreshed.\n refresh_token=info['refresh_token'],\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n", "path": "google/auth/_cloud_sdk.py"}]} | 1,833 | 557 |
gh_patches_debug_28137 | rasdani/github-patches | git_diff | bokeh__bokeh-9234 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOCS] page for selection tools does not tell users how to get the values/indices of the selection
I've been talking with another bokeh user at Scipy 2019 and found we shared very similar frustrations when starting to work with bokeh selections. The problem is the [documentation page for selection tools](https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#boxselecttool
) at bokeh.pydata.org does not include information on how you can get the indices/values after you have selected something.
(It's not that there's no documentation on this, the problem is that it's split up & scattered around the place, plus is difficult to surface on google.)
</issue>
<code>
[start of bokeh/models/selections.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7
8 #-----------------------------------------------------------------------------
9 # Boilerplate
10 #-----------------------------------------------------------------------------
11 from __future__ import absolute_import, division, print_function, unicode_literals
12
13 import logging
14 log = logging.getLogger(__name__)
15
16 #-----------------------------------------------------------------------------
17 # Imports
18 #-----------------------------------------------------------------------------
19
20 # Standard library imports
21
22 # External imports
23
24 # Bokeh imports
25 from ..core.has_props import abstract
26 from ..core.properties import Dict, Int, Seq, String
27 from ..model import Model
28
29 #-----------------------------------------------------------------------------
30 # Globals and constants
31 #-----------------------------------------------------------------------------
32
33 __all__ = (
34 'IntersectRenderers',
35 'Selection',
36 'SelectionPolicy',
37 'UnionRenderers',
38 )
39
40 #-----------------------------------------------------------------------------
41 # General API
42 #-----------------------------------------------------------------------------
43
44 class Selection(Model):
45 '''
46 A Selection represents a portion of the data in a ``DataSource``, which
47 can be visually manipulated in a plot.
48
49 Selections are typically created by selecting points in a plot with
50 a ``SelectTool``, but can also be programmatically specified.
51
52 '''
53
54 indices = Seq(Int, default=[], help="""
55 The indices included in a selection.
56 """)
57
58 line_indices = Seq(Int, default=[], help="""
59 """)
60
61 multiline_indices = Dict(String, Seq(Int), default={}, help="""
62 """)
63
64 # TODO (bev) image_indicies
65
66 @abstract
67 class SelectionPolicy(Model):
68 '''
69
70 '''
71
72 pass
73
74 class IntersectRenderers(SelectionPolicy):
75 '''
76 When a data source is shared between multiple renderers, a row in the data
77 source will only be selected if that point for each renderer is selected. The
78 selection is made from the intersection of hit test results from all renderers.
79
80 '''
81
82 pass
83
84 class UnionRenderers(SelectionPolicy):
85 '''
86 When a data source is shared between multiple renderers, selecting a point on
87 from any renderer will cause that row in the data source to be selected. The
88 selection is made from the union of hit test results from all renderers.
89
90 '''
91
92 pass
93
94 #-----------------------------------------------------------------------------
95 # Dev API
96 #-----------------------------------------------------------------------------
97
98 #-----------------------------------------------------------------------------
99 # Private API
100 #-----------------------------------------------------------------------------
101
102 #-----------------------------------------------------------------------------
103 # Code
104 #-----------------------------------------------------------------------------
105
[end of bokeh/models/selections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/selections.py b/bokeh/models/selections.py
--- a/bokeh/models/selections.py
+++ b/bokeh/models/selections.py
@@ -49,16 +49,37 @@
Selections are typically created by selecting points in a plot with
a ``SelectTool``, but can also be programmatically specified.
+ For most glyphs, the ``indices`` property is the relevant value to use.
+
'''
indices = Seq(Int, default=[], help="""
- The indices included in a selection.
+ The "scatter" level indices included in a selection. For example, for a
+ selection on a ``Circle`` glyph, this list records the indices of whicn
+ individual circles are selected.
+
+ For "multi" glyphs such as ``Patches``, ``MultiLine``, ``MultiPolygons``,
+ etc, this list records the indices of which entire sub-items are selected.
+ For example, which indidual polygons of a ``MultiPolygon`` are selected.
""")
line_indices = Seq(Int, default=[], help="""
+ The point indices included in a selection on a ``Line`` glyph.
+
+ This value records the indices of the individual points on a ``Line`` that
+ were selected by a selection tool.
""")
multiline_indices = Dict(String, Seq(Int), default={}, help="""
+ The detailed point indices included in a selection on a ``MultiLine``.
+
+ This value records which points, on which lines, are part of a seletion on
+ a ``MulitLine``. The keys are the top level indices (i.e., which line)
+ which map to lists of indices (i.e. which points on that line).
+
+ If you only need to know which lines are selected, without knowing what
+ individual points on those lines are selected, then you can look at the
+ keys of this dictionary (converted to ints).
""")
# TODO (bev) image_indicies
| {"golden_diff": "diff --git a/bokeh/models/selections.py b/bokeh/models/selections.py\n--- a/bokeh/models/selections.py\n+++ b/bokeh/models/selections.py\n@@ -49,16 +49,37 @@\n Selections are typically created by selecting points in a plot with\n a ``SelectTool``, but can also be programmatically specified.\n \n+ For most glyphs, the ``indices`` property is the relevant value to use.\n+\n '''\n \n indices = Seq(Int, default=[], help=\"\"\"\n- The indices included in a selection.\n+ The \"scatter\" level indices included in a selection. For example, for a\n+ selection on a ``Circle`` glyph, this list records the indices of whicn\n+ individual circles are selected.\n+\n+ For \"multi\" glyphs such as ``Patches``, ``MultiLine``, ``MultiPolygons``,\n+ etc, this list records the indices of which entire sub-items are selected.\n+ For example, which indidual polygons of a ``MultiPolygon`` are selected.\n \"\"\")\n \n line_indices = Seq(Int, default=[], help=\"\"\"\n+ The point indices included in a selection on a ``Line`` glyph.\n+\n+ This value records the indices of the individual points on a ``Line`` that\n+ were selected by a selection tool.\n \"\"\")\n \n multiline_indices = Dict(String, Seq(Int), default={}, help=\"\"\"\n+ The detailed point indices included in a selection on a ``MultiLine``.\n+\n+ This value records which points, on which lines, are part of a seletion on\n+ a ``MulitLine``. The keys are the top level indices (i.e., which line)\n+ which map to lists of indices (i.e. which points on that line).\n+\n+ If you only need to know which lines are selected, without knowing what\n+ individual points on those lines are selected, then you can look at the\n+ keys of this dictionary (converted to ints).\n \"\"\")\n \n # TODO (bev) image_indicies\n", "issue": "[DOCS] page for selection tools does not tell users how to get the values/indices of the selection\nI've been talking with another bokeh user at Scipy 2019 and found we shared very similar frustrations when starting to work with bokeh selections. The problem is the [documentation page for selection tools](https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#boxselecttool\r\n) at bokeh.pydata.org does not include information on how you can get the indices/values after you have selected something.\r\n\r\n(It's not that there's no documentation on this, the problem is that it's split up & scattered around the place, plus is difficult to surface on google.)\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\n\n# External imports\n\n# Bokeh imports\nfrom ..core.has_props import abstract\nfrom ..core.properties import Dict, Int, Seq, String\nfrom ..model import Model\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'IntersectRenderers',\n 'Selection',\n 'SelectionPolicy',\n 'UnionRenderers',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\nclass Selection(Model):\n '''\n A Selection represents a portion of the data in a ``DataSource``, which\n can be visually manipulated in a plot.\n\n Selections are typically created by selecting points in a plot with\n a ``SelectTool``, but can also be programmatically specified.\n\n '''\n\n indices = Seq(Int, default=[], help=\"\"\"\n The indices included in a selection.\n \"\"\")\n\n line_indices = Seq(Int, default=[], help=\"\"\"\n \"\"\")\n\n multiline_indices = Dict(String, Seq(Int), default={}, help=\"\"\"\n \"\"\")\n\n # TODO (bev) image_indicies\n\n@abstract\nclass SelectionPolicy(Model):\n '''\n\n '''\n\n pass\n\nclass IntersectRenderers(SelectionPolicy):\n '''\n When a data source is shared between multiple renderers, a row in the data\n source will only be selected if that point for each renderer is selected. The\n selection is made from the intersection of hit test results from all renderers.\n\n '''\n\n pass\n\nclass UnionRenderers(SelectionPolicy):\n '''\n When a data source is shared between multiple renderers, selecting a point on\n from any renderer will cause that row in the data source to be selected. The\n selection is made from the union of hit test results from all renderers.\n\n '''\n\n pass\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/models/selections.py"}]} | 1,413 | 454 |
gh_patches_debug_36723 | rasdani/github-patches | git_diff | mindsdb__lightwood-689 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect `Sktime` forecasting horizon starting point
* Lightwood version: 1.5.0
From a few internal tests, it seems the `sktime` time series mixer is not emitting forecasts from the end of the validation dataset, but from the training dataset instead, leading to predictions that will be incorrectly displaced.
</issue>
<code>
[start of lightwood/mixer/sktime.py]
1 import numpy as np
2 import pandas as pd
3 from typing import Dict, Union
4 from sktime.forecasting.arima import AutoARIMA
5
6 from lightwood.api import dtype
7 from lightwood.helpers.log import log
8 from lightwood.mixer.base import BaseMixer
9 from lightwood.api.types import PredictionArguments
10 from lightwood.encoder.time_series.helpers.common import get_group_matches
11 from lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs
12
13
14 class SkTime(BaseMixer):
15 forecaster: str
16 n_ts_predictions: int
17 target: str
18 supports_proba: bool
19
20 def __init__(
21 self, stop_after: int, target: str, dtype_dict: Dict[str, str],
22 n_ts_predictions: int, ts_analysis: Dict):
23 super().__init__(stop_after)
24 self.target = target
25 dtype_dict[target] = dtype.float
26 self.model_class = AutoARIMA
27 self.models = {}
28 self.n_ts_predictions = n_ts_predictions
29 self.ts_analysis = ts_analysis
30 self.forecasting_horizon = np.arange(1, self.n_ts_predictions)
31 self.cutoff_index = {} # marks index at which training data stops and forecasting window starts
32 self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by
33 self.supports_proba = False
34 self.stable = True
35
36 def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:
37 log.info('Started fitting sktime forecaster for array prediction')
38
39 all_subsets = ConcatedEncodedDs([train_data, dev_data])
40 df = all_subsets.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis["tss"].order_by[0]}')
41 data = {'data': df[self.target],
42 'group_info': {gcol: df[gcol].tolist()
43 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
44
45 for group in self.ts_analysis['group_combinations']:
46 # many warnings might be thrown inside of statsmodels during stepwise procedure
47 self.models[group] = self.model_class(suppress_warnings=True)
48
49 if self.grouped_by == ['__default']:
50 series_idxs = data['data'].index
51 series_data = data['data'].values
52 else:
53 series_idxs, series_data = get_group_matches(data, group)
54
55 if series_data.size > 0:
56 series = pd.Series(series_data.squeeze(), index=series_idxs)
57 series = series.sort_index(ascending=True)
58 series = series.reset_index(drop=True)
59 try:
60 self.models[group].fit(series)
61 except ValueError:
62 self.models[group] = self.model_class(deseasonalize=False)
63 self.models[group].fit(series)
64
65 self.cutoff_index[group] = len(series)
66
67 if self.grouped_by == ['__default']:
68 break
69
70 def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],
71 args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:
72 if args.predict_proba:
73 log.warning('This model does not output probability estimates')
74
75 length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)
76 ydf = pd.DataFrame(0, # zero-filled
77 index=np.arange(length),
78 columns=['prediction'],
79 dtype=object)
80
81 data = {'data': ds.data_frame[self.target].reset_index(drop=True),
82 'group_info': {gcol: ds.data_frame[gcol].tolist()
83 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
84
85 # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder
86
87 for group in self.ts_analysis['group_combinations']:
88
89 if self.grouped_by == ['__default']:
90 series_idxs = data['data'].index
91 series_data = data['data'].values
92 else:
93 series_idxs, series_data = get_group_matches(data, group)
94
95 if series_data.size > 0:
96 forecaster = self.models[group] if self.models[group].is_fitted else self.models['__default']
97
98 series = pd.Series(series_data.squeeze(), index=series_idxs)
99 series = series.sort_index(ascending=True)
100 series = series.reset_index(drop=True)
101
102 for idx, _ in enumerate(series.iteritems()):
103 ydf['prediction'].iloc[series_idxs[idx]] = forecaster.predict(
104 np.arange(idx, # +cutoff
105 idx + self.n_ts_predictions)).tolist() # +cutoff
106
107 if self.grouped_by == ['__default']:
108 break
109
110 return ydf[['prediction']]
111
[end of lightwood/mixer/sktime.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightwood/mixer/sktime.py b/lightwood/mixer/sktime.py
--- a/lightwood/mixer/sktime.py
+++ b/lightwood/mixer/sktime.py
@@ -32,6 +32,7 @@
self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by
self.supports_proba = False
self.stable = True
+ self.prepared = False
def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:
log.info('Started fitting sktime forecaster for array prediction')
@@ -67,10 +68,26 @@
if self.grouped_by == ['__default']:
break
+ def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:
+ """
+ Note: sktime asks for "specification of the time points for which forecasts are requested",
+ and this mixer complies by assuming forecasts will start immediately after the last observed
+ value.
+
+ Because of this, `partial_fit` ensures that both `dev` and `test` splits are used to fit the AutoARIMA model.
+
+ Due to how lightwood implements the `update` procedure, expected inputs are (for a train-dev-test split):
+
+ :param dev_data: original `test` split (used to validate and select model if ensemble is `BestOf`)
+ :param train_data: includes original `train` and `dev` split
+ """ # noqa
+ self.fit(dev_data, train_data)
+ self.prepared = True
+
def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],
args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:
if args.predict_proba:
- log.warning('This model does not output probability estimates')
+ log.warning('This mixer does not output probability estimates')
length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)
ydf = pd.DataFrame(0, # zero-filled
@@ -101,8 +118,7 @@
for idx, _ in enumerate(series.iteritems()):
ydf['prediction'].iloc[series_idxs[idx]] = forecaster.predict(
- np.arange(idx, # +cutoff
- idx + self.n_ts_predictions)).tolist() # +cutoff
+ np.arange(idx, idx + self.n_ts_predictions)).tolist()
if self.grouped_by == ['__default']:
break
| {"golden_diff": "diff --git a/lightwood/mixer/sktime.py b/lightwood/mixer/sktime.py\n--- a/lightwood/mixer/sktime.py\n+++ b/lightwood/mixer/sktime.py\n@@ -32,6 +32,7 @@\n self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by\n self.supports_proba = False\n self.stable = True\n+ self.prepared = False\n \n def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Started fitting sktime forecaster for array prediction')\n@@ -67,10 +68,26 @@\n if self.grouped_by == ['__default']:\n break\n \n+ def partial_fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n+ \"\"\"\n+ Note: sktime asks for \"specification of the time points for which forecasts are requested\",\n+ and this mixer complies by assuming forecasts will start immediately after the last observed\n+ value.\n+\n+ Because of this, `partial_fit` ensures that both `dev` and `test` splits are used to fit the AutoARIMA model.\n+\n+ Due to how lightwood implements the `update` procedure, expected inputs are (for a train-dev-test split):\n+\n+ :param dev_data: original `test` split (used to validate and select model if ensemble is `BestOf`)\n+ :param train_data: includes original `train` and `dev` split\n+ \"\"\" # noqa\n+ self.fit(dev_data, train_data)\n+ self.prepared = True\n+\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],\n args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:\n if args.predict_proba:\n- log.warning('This model does not output probability estimates')\n+ log.warning('This mixer does not output probability estimates')\n \n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n@@ -101,8 +118,7 @@\n \n for idx, _ in enumerate(series.iteritems()):\n ydf['prediction'].iloc[series_idxs[idx]] = forecaster.predict(\n- np.arange(idx, # +cutoff\n- idx + self.n_ts_predictions)).tolist() # +cutoff\n+ np.arange(idx, idx + self.n_ts_predictions)).tolist()\n \n if self.grouped_by == ['__default']:\n break\n", "issue": "Incorrect `Sktime` forecasting horizon starting point\n* Lightwood version: 1.5.0\r\n\r\nFrom a few internal tests, it seems the `sktime` time series mixer is not emitting forecasts from the end of the validation dataset, but from the training dataset instead, leading to predictions that will be incorrectly displaced.\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom typing import Dict, Union\nfrom sktime.forecasting.arima import AutoARIMA\n\nfrom lightwood.api import dtype\nfrom lightwood.helpers.log import log\nfrom lightwood.mixer.base import BaseMixer\nfrom lightwood.api.types import PredictionArguments\nfrom lightwood.encoder.time_series.helpers.common import get_group_matches\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass SkTime(BaseMixer):\n forecaster: str\n n_ts_predictions: int\n target: str\n supports_proba: bool\n\n def __init__(\n self, stop_after: int, target: str, dtype_dict: Dict[str, str],\n n_ts_predictions: int, ts_analysis: Dict):\n super().__init__(stop_after)\n self.target = target\n dtype_dict[target] = dtype.float\n self.model_class = AutoARIMA\n self.models = {}\n self.n_ts_predictions = n_ts_predictions\n self.ts_analysis = ts_analysis\n self.forecasting_horizon = np.arange(1, self.n_ts_predictions)\n self.cutoff_index = {} # marks index at which training data stops and forecasting window starts\n self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by\n self.supports_proba = False\n self.stable = True\n\n def fit(self, train_data: EncodedDs, dev_data: EncodedDs) -> None:\n log.info('Started fitting sktime forecaster for array prediction')\n\n all_subsets = ConcatedEncodedDs([train_data, dev_data])\n df = all_subsets.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis[\"tss\"].order_by[0]}')\n data = {'data': df[self.target],\n 'group_info': {gcol: df[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n for group in self.ts_analysis['group_combinations']:\n # many warnings might be thrown inside of statsmodels during stepwise procedure\n self.models[group] = self.model_class(suppress_warnings=True)\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n try:\n self.models[group].fit(series)\n except ValueError:\n self.models[group] = self.model_class(deseasonalize=False)\n self.models[group].fit(series)\n\n self.cutoff_index[group] = len(series)\n\n if self.grouped_by == ['__default']:\n break\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs],\n args: PredictionArguments = PredictionArguments()) -> pd.DataFrame:\n if args.predict_proba:\n log.warning('This model does not output probability estimates')\n\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=['prediction'],\n dtype=object)\n\n data = {'data': ds.data_frame[self.target].reset_index(drop=True),\n 'group_info': {gcol: ds.data_frame[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder\n\n for group in self.ts_analysis['group_combinations']:\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n forecaster = self.models[group] if self.models[group].is_fitted else self.models['__default']\n\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n\n for idx, _ in enumerate(series.iteritems()):\n ydf['prediction'].iloc[series_idxs[idx]] = forecaster.predict(\n np.arange(idx, # +cutoff\n idx + self.n_ts_predictions)).tolist() # +cutoff\n\n if self.grouped_by == ['__default']:\n break\n\n return ydf[['prediction']]\n", "path": "lightwood/mixer/sktime.py"}]} | 1,866 | 578 |
gh_patches_debug_662 | rasdani/github-patches | git_diff | pex-tool__pex-1976 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.113
On the docket:
+ [x] Restore AtomicDirectory non-locked good behavior. #1974
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.112"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.112"
+__version__ = "2.1.113"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.112\"\n+__version__ = \"2.1.113\"\n", "issue": "Release 2.1.113\nOn the docket:\r\n+ [x] Restore AtomicDirectory non-locked good behavior. #1974\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.112\"\n", "path": "pex/version.py"}]} | 618 | 99 |
gh_patches_debug_536 | rasdani/github-patches | git_diff | translate__pootle-5863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Templates language is taken into account in the main view
I'm testing Pootle 2.8.0RC3 and I've found an issue related to #4568.
When I filter the translations for a single project, the progress bar now shows 100% (the templates aren't taken into account now, great):

However, when I go back to the global view, that project shows a progress bar including the templates result:

Thank you!
</issue>
<code>
[start of pootle/apps/pootle_data/project_data.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from pootle.core.delegate import revision
10
11 from .utils import RelatedStoresDataTool, RelatedTPsDataTool
12
13
14 class ProjectDataTool(RelatedTPsDataTool):
15 """Retrieves aggregate stats for a Project"""
16
17 cache_key_name = "project"
18
19 def filter_data(self, qs):
20 return qs.filter(tp__project=self.context)
21
22 @property
23 def rev_cache_key(self):
24 return revision.get(
25 self.context.__class__)(self.context.directory).get(key="stats")
26
27
28 class ProjectResourceDataTool(RelatedStoresDataTool):
29 group_by = ("store__translation_project__language__code", )
30 cache_key_name = "project_resource"
31
32 @property
33 def project_path(self):
34 return (
35 "/%s%s"
36 % (self.project_code, self.tp_path))
37
38 @property
39 def tp_path(self):
40 return (
41 "/%s%s"
42 % (self.dir_path,
43 self.filename))
44
45 def filter_data(self, qs):
46 return (
47 qs.filter(store__translation_project__project__code=self.project_code)
48 .filter(store__tp_path__startswith=self.tp_path))
49
50 @property
51 def context_name(self):
52 return "/projects%s" % self.project_path
53
54
55 class ProjectSetDataTool(RelatedTPsDataTool):
56 group_by = ("tp__project__code", )
57 cache_key_name = "projects"
58
59 def get_root_child_path(self, child):
60 return child[self.group_by[0]]
61
62 @property
63 def context_name(self):
64 return "ALL"
65
[end of pootle/apps/pootle_data/project_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py
--- a/pootle/apps/pootle_data/project_data.py
+++ b/pootle/apps/pootle_data/project_data.py
@@ -62,3 +62,7 @@
@property
def context_name(self):
return "ALL"
+
+ def filter_data(self, qs):
+ qs = super(ProjectSetDataTool, self).filter_data(qs)
+ return qs.exclude(tp__language__code="templates")
| {"golden_diff": "diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py\n--- a/pootle/apps/pootle_data/project_data.py\n+++ b/pootle/apps/pootle_data/project_data.py\n@@ -62,3 +62,7 @@\n @property\n def context_name(self):\n return \"ALL\"\n+\n+ def filter_data(self, qs):\n+ qs = super(ProjectSetDataTool, self).filter_data(qs)\n+ return qs.exclude(tp__language__code=\"templates\")\n", "issue": "Templates language is taken into account in the main view\nI'm testing Pootle 2.8.0RC3 and I've found an issue related to #4568.\r\n\r\nWhen I filter the translations for a single project, the progress bar now shows 100% (the templates aren't taken into account now, great):\r\n\r\n\r\nHowever, when I go back to the global view, that project shows a progress bar including the templates result:\r\n\r\n\r\nThank you!\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom pootle.core.delegate import revision\n\nfrom .utils import RelatedStoresDataTool, RelatedTPsDataTool\n\n\nclass ProjectDataTool(RelatedTPsDataTool):\n \"\"\"Retrieves aggregate stats for a Project\"\"\"\n\n cache_key_name = \"project\"\n\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n\n @property\n def rev_cache_key(self):\n return revision.get(\n self.context.__class__)(self.context.directory).get(key=\"stats\")\n\n\nclass ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n cache_key_name = \"project_resource\"\n\n @property\n def project_path(self):\n return (\n \"/%s%s\"\n % (self.project_code, self.tp_path))\n\n @property\n def tp_path(self):\n return (\n \"/%s%s\"\n % (self.dir_path,\n self.filename))\n\n def filter_data(self, qs):\n return (\n qs.filter(store__translation_project__project__code=self.project_code)\n .filter(store__tp_path__startswith=self.tp_path))\n\n @property\n def context_name(self):\n return \"/projects%s\" % self.project_path\n\n\nclass ProjectSetDataTool(RelatedTPsDataTool):\n group_by = (\"tp__project__code\", )\n cache_key_name = \"projects\"\n\n def get_root_child_path(self, child):\n return child[self.group_by[0]]\n\n @property\n def context_name(self):\n return \"ALL\"\n", "path": "pootle/apps/pootle_data/project_data.py"}]} | 1,303 | 124 |
gh_patches_debug_18446 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2515 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError at /rest/v1/recipient_country/
'ascii' codec can't decode byte 0xc3 in position 7: ordinal not in range(128)
</issue>
<code>
[start of akvo/rsr/models/country.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from django.db import models
9 from django.core.validators import MaxValueValidator, MinValueValidator
10 from django.utils.translation import ugettext_lazy as _
11
12 from ..fields import ValidXMLCharField
13 from ..iso3166 import ISO_3166_COUNTRIES, CONTINENTS, COUNTRY_CONTINENTS
14
15 from akvo.codelists import models as codelist_models
16 from akvo.codelists.store.codelists_v202 import COUNTRY
17 from akvo.utils import codelist_choices, codelist_value
18
19
20 class Country(models.Model):
21 name = ValidXMLCharField(_(u'country name'), max_length=50, unique=True, db_index=True)
22 iso_code = ValidXMLCharField(
23 _(u'ISO 3166 code'), max_length=2, unique=True, db_index=True, choices=ISO_3166_COUNTRIES
24 )
25 continent = ValidXMLCharField(_(u'continent name'), max_length=20, db_index=True)
26 continent_code = ValidXMLCharField(
27 _(u'continent code'), max_length=2, db_index=True, choices=CONTINENTS
28 )
29
30 def __unicode__(self):
31 return self.name
32
33 @classmethod
34 def fields_from_iso_code(cls, iso_code):
35 continent_code = COUNTRY_CONTINENTS[iso_code]
36 name = dict(ISO_3166_COUNTRIES)[iso_code]
37 continent = dict(CONTINENTS)[continent_code]
38 return dict(
39 iso_code=iso_code, name=name, continent=continent, continent_code=continent_code
40 )
41
42 class Meta:
43 app_label = 'rsr'
44 verbose_name = _(u'country')
45 verbose_name_plural = _(u'countries')
46 ordering = ['name']
47
48
49 class RecipientCountry(models.Model):
50 project = models.ForeignKey(
51 'Project', verbose_name=_(u'project'), related_name='recipient_countries'
52 )
53 country = ValidXMLCharField(
54 _(u'recipient country'), blank=True, max_length=2,choices=codelist_choices(COUNTRY, show_code=False),
55 help_text=_(u'The country that benefits from the project.')
56 )
57 percentage = models.DecimalField(
58 _(u'recipient country percentage'), blank=True, null=True, max_digits=4, decimal_places=1,
59 validators=[MaxValueValidator(100), MinValueValidator(0)],
60 help_text=_(u'The percentage of total commitments or total activity budget allocated to '
61 u'this country. Content must be a positive decimal number between 0 and 100, '
62 u'with no percentage sign. Percentages for all reported countries and regions '
63 u'MUST add up to 100%. Use a period to denote decimals.')
64 )
65 text = ValidXMLCharField(
66 _(u'recipient country description'), blank=True, max_length=50,
67 help_text=_(u'Enter additional information about the recipient country, if necessary.')
68 )
69
70 def __unicode__(self):
71 if self.country:
72 try:
73 country_unicode = self.iati_country().name
74 except (AttributeError, codelist_models.Country.DoesNotExist):
75 country_unicode = self.country
76 else:
77 country_unicode = u'%s' % _(u'No country specified')
78
79 if self.percentage:
80 country_unicode += u' (%s%%)' % str(self.percentage)
81
82 return country_unicode
83
84 def iati_country(self):
85 return codelist_value(codelist_models.Country, self, 'country')
86
87 def iati_country_unicode(self):
88 return str(self.iati_country())
89
90 class Meta:
91 app_label = 'rsr'
92 verbose_name = _(u'recipient country')
93 verbose_name_plural = _(u'recipient countries')
94 ordering = ('-percentage', 'country')
95
[end of akvo/rsr/models/country.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/models/country.py b/akvo/rsr/models/country.py
--- a/akvo/rsr/models/country.py
+++ b/akvo/rsr/models/country.py
@@ -51,7 +51,7 @@
'Project', verbose_name=_(u'project'), related_name='recipient_countries'
)
country = ValidXMLCharField(
- _(u'recipient country'), blank=True, max_length=2,choices=codelist_choices(COUNTRY, show_code=False),
+ _(u'recipient country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),
help_text=_(u'The country that benefits from the project.')
)
percentage = models.DecimalField(
@@ -85,7 +85,7 @@
return codelist_value(codelist_models.Country, self, 'country')
def iati_country_unicode(self):
- return str(self.iati_country())
+ return unicode(self.iati_country())
class Meta:
app_label = 'rsr'
| {"golden_diff": "diff --git a/akvo/rsr/models/country.py b/akvo/rsr/models/country.py\n--- a/akvo/rsr/models/country.py\n+++ b/akvo/rsr/models/country.py\n@@ -51,7 +51,7 @@\n 'Project', verbose_name=_(u'project'), related_name='recipient_countries'\n )\n country = ValidXMLCharField(\n- _(u'recipient country'), blank=True, max_length=2,choices=codelist_choices(COUNTRY, show_code=False),\n+ _(u'recipient country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),\n help_text=_(u'The country that benefits from the project.')\n )\n percentage = models.DecimalField(\n@@ -85,7 +85,7 @@\n return codelist_value(codelist_models.Country, self, 'country')\n \n def iati_country_unicode(self):\n- return str(self.iati_country())\n+ return unicode(self.iati_country())\n \n class Meta:\n app_label = 'rsr'\n", "issue": "UnicodeDecodeError at /rest/v1/recipient_country/\n'ascii' codec can't decode byte 0xc3 in position 7: ordinal not in range(128)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\nfrom ..iso3166 import ISO_3166_COUNTRIES, CONTINENTS, COUNTRY_CONTINENTS\n\nfrom akvo.codelists import models as codelist_models\nfrom akvo.codelists.store.codelists_v202 import COUNTRY\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Country(models.Model):\n name = ValidXMLCharField(_(u'country name'), max_length=50, unique=True, db_index=True)\n iso_code = ValidXMLCharField(\n _(u'ISO 3166 code'), max_length=2, unique=True, db_index=True, choices=ISO_3166_COUNTRIES\n )\n continent = ValidXMLCharField(_(u'continent name'), max_length=20, db_index=True)\n continent_code = ValidXMLCharField(\n _(u'continent code'), max_length=2, db_index=True, choices=CONTINENTS\n )\n\n def __unicode__(self):\n return self.name\n\n @classmethod\n def fields_from_iso_code(cls, iso_code):\n continent_code = COUNTRY_CONTINENTS[iso_code]\n name = dict(ISO_3166_COUNTRIES)[iso_code]\n continent = dict(CONTINENTS)[continent_code]\n return dict(\n iso_code=iso_code, name=name, continent=continent, continent_code=continent_code\n )\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'country')\n verbose_name_plural = _(u'countries')\n ordering = ['name']\n\n\nclass RecipientCountry(models.Model):\n project = models.ForeignKey(\n 'Project', verbose_name=_(u'project'), related_name='recipient_countries'\n )\n country = ValidXMLCharField(\n _(u'recipient country'), blank=True, max_length=2,choices=codelist_choices(COUNTRY, show_code=False),\n help_text=_(u'The country that benefits from the project.')\n )\n percentage = models.DecimalField(\n _(u'recipient country percentage'), blank=True, null=True, max_digits=4, decimal_places=1,\n validators=[MaxValueValidator(100), MinValueValidator(0)],\n help_text=_(u'The percentage of total commitments or total activity budget allocated to '\n u'this country. Content must be a positive decimal number between 0 and 100, '\n u'with no percentage sign. Percentages for all reported countries and regions '\n u'MUST add up to 100%. Use a period to denote decimals.')\n )\n text = ValidXMLCharField(\n _(u'recipient country description'), blank=True, max_length=50,\n help_text=_(u'Enter additional information about the recipient country, if necessary.')\n )\n\n def __unicode__(self):\n if self.country:\n try:\n country_unicode = self.iati_country().name\n except (AttributeError, codelist_models.Country.DoesNotExist):\n country_unicode = self.country\n else:\n country_unicode = u'%s' % _(u'No country specified')\n\n if self.percentage:\n country_unicode += u' (%s%%)' % str(self.percentage)\n\n return country_unicode\n\n def iati_country(self):\n return codelist_value(codelist_models.Country, self, 'country')\n\n def iati_country_unicode(self):\n return str(self.iati_country())\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'recipient country')\n verbose_name_plural = _(u'recipient countries')\n ordering = ('-percentage', 'country')\n", "path": "akvo/rsr/models/country.py"}]} | 1,659 | 235 |
gh_patches_debug_1895 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1452 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DISCOVERY: Notification and change log for domain managers
### Issue description
As a domain manager,
I want an in-app log of all changes made to my domain
So that I can ensure that it is correct, and track any changes that have been made, avoiding and correcting errors.
### Acceptance criteria
TBD
### Additional context
Notifications about changes to domain info:
All users wanted to be notified of changes to their domain information–in particular, updates to name servers. Most users said they’d like an email notifications because they rarely visit the registrar. However, an in-app audit trail would be helpful, as well, for future reference or in case an email was missed. Need to do some discovery and design exploration around this.
Souirce: [User feedback](https://docs.google.com/document/d/1M5foXX34qPc7R_J1uhBACHWUhg8WHwX3bB6nurvNNWE/edit#bookmark=id.pa0k2x54vkx1)
### Links to other issues
_No response_
</issue>
<code>
[start of src/registrar/models/__init__.py]
1 from auditlog.registry import auditlog # type: ignore
2 from .contact import Contact
3 from .domain_application import DomainApplication
4 from .domain_information import DomainInformation
5 from .domain import Domain
6 from .draft_domain import DraftDomain
7 from .host_ip import HostIP
8 from .host import Host
9 from .domain_invitation import DomainInvitation
10 from .nameserver import Nameserver
11 from .user_domain_role import UserDomainRole
12 from .public_contact import PublicContact
13 from .user import User
14 from .user_group import UserGroup
15 from .website import Website
16 from .transition_domain import TransitionDomain
17
18 __all__ = [
19 "Contact",
20 "DomainApplication",
21 "DomainInformation",
22 "Domain",
23 "DraftDomain",
24 "DomainInvitation",
25 "HostIP",
26 "Host",
27 "Nameserver",
28 "UserDomainRole",
29 "PublicContact",
30 "User",
31 "UserGroup",
32 "Website",
33 "TransitionDomain",
34 ]
35
36 auditlog.register(Contact)
37 auditlog.register(DomainApplication)
38 auditlog.register(Domain)
39 auditlog.register(DraftDomain)
40 auditlog.register(DomainInvitation)
41 auditlog.register(HostIP)
42 auditlog.register(Host)
43 auditlog.register(Nameserver)
44 auditlog.register(UserDomainRole)
45 auditlog.register(PublicContact)
46 auditlog.register(User, m2m_fields=["user_permissions", "groups"])
47 auditlog.register(UserGroup, m2m_fields=["permissions"])
48 auditlog.register(Website)
49 auditlog.register(TransitionDomain)
50
[end of src/registrar/models/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/models/__init__.py b/src/registrar/models/__init__.py
--- a/src/registrar/models/__init__.py
+++ b/src/registrar/models/__init__.py
@@ -38,6 +38,7 @@
auditlog.register(Domain)
auditlog.register(DraftDomain)
auditlog.register(DomainInvitation)
+auditlog.register(DomainInformation)
auditlog.register(HostIP)
auditlog.register(Host)
auditlog.register(Nameserver)
| {"golden_diff": "diff --git a/src/registrar/models/__init__.py b/src/registrar/models/__init__.py\n--- a/src/registrar/models/__init__.py\n+++ b/src/registrar/models/__init__.py\n@@ -38,6 +38,7 @@\n auditlog.register(Domain)\n auditlog.register(DraftDomain)\n auditlog.register(DomainInvitation)\n+auditlog.register(DomainInformation)\n auditlog.register(HostIP)\n auditlog.register(Host)\n auditlog.register(Nameserver)\n", "issue": "DISCOVERY: Notification and change log for domain managers\n### Issue description\n\nAs a domain manager,\nI want an in-app log of all changes made to my domain\nSo that I can ensure that it is correct, and track any changes that have been made, avoiding and correcting errors.\n\n### Acceptance criteria\n\nTBD\n\n### Additional context\n\nNotifications about changes to domain info:\n\nAll users wanted to be notified of changes to their domain information\u2013in particular, updates to name servers. Most users said they\u2019d like an email notifications because they rarely visit the registrar. However, an in-app audit trail would be helpful, as well, for future reference or in case an email was missed. Need to do some discovery and design exploration around this.\n\nSouirce: [User feedback](https://docs.google.com/document/d/1M5foXX34qPc7R_J1uhBACHWUhg8WHwX3bB6nurvNNWE/edit#bookmark=id.pa0k2x54vkx1)\n\n### Links to other issues\n\n_No response_\n", "before_files": [{"content": "from auditlog.registry import auditlog # type: ignore\nfrom .contact import Contact\nfrom .domain_application import DomainApplication\nfrom .domain_information import DomainInformation\nfrom .domain import Domain\nfrom .draft_domain import DraftDomain\nfrom .host_ip import HostIP\nfrom .host import Host\nfrom .domain_invitation import DomainInvitation\nfrom .nameserver import Nameserver\nfrom .user_domain_role import UserDomainRole\nfrom .public_contact import PublicContact\nfrom .user import User\nfrom .user_group import UserGroup\nfrom .website import Website\nfrom .transition_domain import TransitionDomain\n\n__all__ = [\n \"Contact\",\n \"DomainApplication\",\n \"DomainInformation\",\n \"Domain\",\n \"DraftDomain\",\n \"DomainInvitation\",\n \"HostIP\",\n \"Host\",\n \"Nameserver\",\n \"UserDomainRole\",\n \"PublicContact\",\n \"User\",\n \"UserGroup\",\n \"Website\",\n \"TransitionDomain\",\n]\n\nauditlog.register(Contact)\nauditlog.register(DomainApplication)\nauditlog.register(Domain)\nauditlog.register(DraftDomain)\nauditlog.register(DomainInvitation)\nauditlog.register(HostIP)\nauditlog.register(Host)\nauditlog.register(Nameserver)\nauditlog.register(UserDomainRole)\nauditlog.register(PublicContact)\nauditlog.register(User, m2m_fields=[\"user_permissions\", \"groups\"])\nauditlog.register(UserGroup, m2m_fields=[\"permissions\"])\nauditlog.register(Website)\nauditlog.register(TransitionDomain)\n", "path": "src/registrar/models/__init__.py"}]} | 1,171 | 106 |
gh_patches_debug_32255 | rasdani/github-patches | git_diff | ckan__ckan-5639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No way to disable reloader when starting the development server
**CKAN version**
2.9.0
**Describe the bug**
When trying to start the development server without a reloader i encountered problems with the `--reloader` argument.
The reloader option requires a TEXT argument, therefore i expected that --reloader False disables the reloader.
**Steps to reproduce**
Start ckan with following command:
`ckan -c [PATH_TO_CONFIG] run --host 0.0.0.0 --reloader False`
**Expected behavior**
Server starts without reloader
**Additional details**
Currently the `reloader` option is passed as string and if it's not provided it defaults to the boolean value `True`
So we have two cases when the `run_simple` method is called:
1. `--reloader` argument is not provided --> reloader=True
2. `--reloader` argument is provided --> some string is passed as reloader argument to the `run_simple` method, which evaluates to true in the if statement distinguishing whether the reloader should be used or not.
So the `--reloader` argument does not affect anything.
_My suggestion:_ rename the argument to `disable-reloader` and turn it into a boolean flag. This enables the user to disable the reloader and the default behaviour (i.e. the dev server starts with a reloader) stays the same.
</issue>
<code>
[start of ckan/cli/server.py]
1 # encoding: utf-8
2
3 import logging
4
5 import click
6 from werkzeug.serving import run_simple
7
8 from ckan.common import config
9 import ckan.plugins.toolkit as tk
10
11 log = logging.getLogger(__name__)
12
13
14 @click.command(u"run", short_help=u"Start development server")
15 @click.option(u"-H", u"--host", default=u"localhost", help=u"Set host")
16 @click.option(u"-p", u"--port", default=5000, help=u"Set port")
17 @click.option(u"-r", u"--reloader", default=True, help=u"Use reloader")
18 @click.option(
19 u"-t", u"--threaded", is_flag=True,
20 help=u"Handle each request in a separate thread"
21 )
22 @click.option(u"-e", u"--extra-files", multiple=True)
23 @click.option(
24 u"--processes", type=int, default=0,
25 help=u"Maximum number of concurrent processes"
26 )
27 @click.pass_context
28 def run(ctx, host, port, reloader, threaded, extra_files, processes):
29 u"""Runs the Werkzeug development server"""
30 threaded = threaded or tk.asbool(config.get(u"ckan.devserver.threaded"))
31 processes = processes or tk.asint(
32 config.get(u"ckan.devserver.multiprocess", 1)
33 )
34 if threaded and processes > 1:
35 tk.error_shout(u"Cannot have a multithreaded and multi process server")
36 raise click.Abort()
37
38 log.info(u"Running server {0} on port {1}".format(host, port))
39
40 config_extra_files = tk.aslist(
41 config.get(u"ckan.devserver.watch_patterns")
42 )
43 extra_files = list(extra_files) + [
44 config[u"__file__"]
45 ] + config_extra_files
46
47 run_simple(
48 host,
49 port,
50 ctx.obj.app,
51 use_reloader=reloader,
52 use_evalex=True,
53 threaded=threaded,
54 processes=processes,
55 extra_files=extra_files,
56 )
57
[end of ckan/cli/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/cli/server.py b/ckan/cli/server.py
--- a/ckan/cli/server.py
+++ b/ckan/cli/server.py
@@ -5,8 +5,8 @@
import click
from werkzeug.serving import run_simple
-from ckan.common import config
import ckan.plugins.toolkit as tk
+from ckan.common import config
log = logging.getLogger(__name__)
@@ -14,7 +14,8 @@
@click.command(u"run", short_help=u"Start development server")
@click.option(u"-H", u"--host", default=u"localhost", help=u"Set host")
@click.option(u"-p", u"--port", default=5000, help=u"Set port")
[email protected](u"-r", u"--reloader", default=True, help=u"Use reloader")
[email protected](u"-r", u"--disable-reloader", is_flag=True,
+ help=u"Disable reloader")
@click.option(
u"-t", u"--threaded", is_flag=True,
help=u"Handle each request in a separate thread"
@@ -25,8 +26,9 @@
help=u"Maximum number of concurrent processes"
)
@click.pass_context
-def run(ctx, host, port, reloader, threaded, extra_files, processes):
+def run(ctx, host, port, disable_reloader, threaded, extra_files, processes):
u"""Runs the Werkzeug development server"""
+ use_reloader = not disable_reloader
threaded = threaded or tk.asbool(config.get(u"ckan.devserver.threaded"))
processes = processes or tk.asint(
config.get(u"ckan.devserver.multiprocess", 1)
@@ -48,7 +50,7 @@
host,
port,
ctx.obj.app,
- use_reloader=reloader,
+ use_reloader=use_reloader,
use_evalex=True,
threaded=threaded,
processes=processes,
| {"golden_diff": "diff --git a/ckan/cli/server.py b/ckan/cli/server.py\n--- a/ckan/cli/server.py\n+++ b/ckan/cli/server.py\n@@ -5,8 +5,8 @@\n import click\n from werkzeug.serving import run_simple\n \n-from ckan.common import config\n import ckan.plugins.toolkit as tk\n+from ckan.common import config\n \n log = logging.getLogger(__name__)\n \n@@ -14,7 +14,8 @@\n @click.command(u\"run\", short_help=u\"Start development server\")\n @click.option(u\"-H\", u\"--host\", default=u\"localhost\", help=u\"Set host\")\n @click.option(u\"-p\", u\"--port\", default=5000, help=u\"Set port\")\[email protected](u\"-r\", u\"--reloader\", default=True, help=u\"Use reloader\")\[email protected](u\"-r\", u\"--disable-reloader\", is_flag=True,\n+ help=u\"Disable reloader\")\n @click.option(\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n@@ -25,8 +26,9 @@\n help=u\"Maximum number of concurrent processes\"\n )\n @click.pass_context\n-def run(ctx, host, port, reloader, threaded, extra_files, processes):\n+def run(ctx, host, port, disable_reloader, threaded, extra_files, processes):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n+ use_reloader = not disable_reloader\n threaded = threaded or tk.asbool(config.get(u\"ckan.devserver.threaded\"))\n processes = processes or tk.asint(\n config.get(u\"ckan.devserver.multiprocess\", 1)\n@@ -48,7 +50,7 @@\n host,\n port,\n ctx.obj.app,\n- use_reloader=reloader,\n+ use_reloader=use_reloader,\n use_evalex=True,\n threaded=threaded,\n processes=processes,\n", "issue": "No way to disable reloader when starting the development server\n**CKAN version**\r\n2.9.0\r\n\r\n**Describe the bug**\r\nWhen trying to start the development server without a reloader i encountered problems with the `--reloader` argument.\r\nThe reloader option requires a TEXT argument, therefore i expected that --reloader False disables the reloader.\r\n\r\n\r\n**Steps to reproduce**\r\nStart ckan with following command:\r\n\r\n`ckan -c [PATH_TO_CONFIG] run --host 0.0.0.0 --reloader False`\r\n\r\n**Expected behavior**\r\nServer starts without reloader\r\n\r\n**Additional details**\r\n\r\nCurrently the `reloader` option is passed as string and if it's not provided it defaults to the boolean value `True`\r\n\r\nSo we have two cases when the `run_simple` method is called:\r\n1. `--reloader` argument is not provided --> reloader=True\r\n2. `--reloader` argument is provided --> some string is passed as reloader argument to the `run_simple` method, which evaluates to true in the if statement distinguishing whether the reloader should be used or not.\r\n\r\nSo the `--reloader` argument does not affect anything.\r\n\r\n_My suggestion:_ rename the argument to `disable-reloader` and turn it into a boolean flag. This enables the user to disable the reloader and the default behaviour (i.e. the dev server starts with a reloader) stays the same.\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\n\nimport click\nfrom werkzeug.serving import run_simple\n\nfrom ckan.common import config\nimport ckan.plugins.toolkit as tk\n\nlog = logging.getLogger(__name__)\n\n\[email protected](u\"run\", short_help=u\"Start development server\")\[email protected](u\"-H\", u\"--host\", default=u\"localhost\", help=u\"Set host\")\[email protected](u\"-p\", u\"--port\", default=5000, help=u\"Set port\")\[email protected](u\"-r\", u\"--reloader\", default=True, help=u\"Use reloader\")\[email protected](\n u\"-t\", u\"--threaded\", is_flag=True,\n help=u\"Handle each request in a separate thread\"\n)\[email protected](u\"-e\", u\"--extra-files\", multiple=True)\[email protected](\n u\"--processes\", type=int, default=0,\n help=u\"Maximum number of concurrent processes\"\n)\[email protected]_context\ndef run(ctx, host, port, reloader, threaded, extra_files, processes):\n u\"\"\"Runs the Werkzeug development server\"\"\"\n threaded = threaded or tk.asbool(config.get(u\"ckan.devserver.threaded\"))\n processes = processes or tk.asint(\n config.get(u\"ckan.devserver.multiprocess\", 1)\n )\n if threaded and processes > 1:\n tk.error_shout(u\"Cannot have a multithreaded and multi process server\")\n raise click.Abort()\n\n log.info(u\"Running server {0} on port {1}\".format(host, port))\n\n config_extra_files = tk.aslist(\n config.get(u\"ckan.devserver.watch_patterns\")\n )\n extra_files = list(extra_files) + [\n config[u\"__file__\"]\n ] + config_extra_files\n\n run_simple(\n host,\n port,\n ctx.obj.app,\n use_reloader=reloader,\n use_evalex=True,\n threaded=threaded,\n processes=processes,\n extra_files=extra_files,\n )\n", "path": "ckan/cli/server.py"}]} | 1,380 | 434 |
gh_patches_debug_553 | rasdani/github-patches | git_diff | pex-tool__pex-884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.2
On the docket:
+ [x] Isolating a pex chroot doesn't work from a zipped pex #882
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.1'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.1'
+__version__ = '2.1.2'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.1'\n+__version__ = '2.1.2'\n", "issue": "Release 2.1.2\nOn the docket:\r\n+ [x] Isolating a pex chroot doesn't work from a zipped pex #882 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.1'\n", "path": "pex/version.py"}]} | 621 | 95 |
gh_patches_debug_14667 | rasdani/github-patches | git_diff | Parsl__parsl-3151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove BlueWaters from userguide
**Describe the bug**
Bluewaters is a supercomputer at the NCSA (UIUC) that is now retired. Our userguide still contains an example configuration for this defunct machine that should be removed.
Here's a link to the userguide section as rendered: https://parsl.readthedocs.io/en/stable/userguide/configuring.html#blue-waters-ncsa
**Expected behavior**
Bluewaters should be removed from our userguide section.
Here's a quick sketch of the work involved:
1. Remove the section on Bluewaters from `docs/userguide/configuring.rst`
2. Remove the example configuration file here `parsl/configs/bluewaters.py`
3. Rebuild the documentation: `cd docs; make clean html; `
4. Check the newly rebuild docs with `cd docs/_build/html; python3 -m http.server 8080` and load `http://localhost:8080` in your browser to load the html pages that was newly rebuilt in step.3.
</issue>
<code>
[start of parsl/configs/bluewaters.py]
1 from parsl.config import Config
2 from parsl.executors import HighThroughputExecutor
3 from parsl.launchers import AprunLauncher
4 from parsl.providers import TorqueProvider
5
6
7 config = Config(
8 executors=[
9 HighThroughputExecutor(
10 label="bw_htex",
11 cores_per_worker=1,
12 worker_debug=False,
13 provider=TorqueProvider(
14 queue='normal',
15 launcher=AprunLauncher(overrides="-b -- bwpy-environ --"),
16 scheduler_options='', # string to prepend to #SBATCH blocks in the submit script to the scheduler
17 worker_init='', # command to run before starting a worker, such as 'source activate env'
18 init_blocks=1,
19 max_blocks=1,
20 min_blocks=1,
21 nodes_per_block=2,
22 walltime='00:10:00'
23 ),
24 )
25
26 ],
27
28 )
29
[end of parsl/configs/bluewaters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/configs/bluewaters.py b/parsl/configs/bluewaters.py
deleted file mode 100644
--- a/parsl/configs/bluewaters.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from parsl.config import Config
-from parsl.executors import HighThroughputExecutor
-from parsl.launchers import AprunLauncher
-from parsl.providers import TorqueProvider
-
-
-config = Config(
- executors=[
- HighThroughputExecutor(
- label="bw_htex",
- cores_per_worker=1,
- worker_debug=False,
- provider=TorqueProvider(
- queue='normal',
- launcher=AprunLauncher(overrides="-b -- bwpy-environ --"),
- scheduler_options='', # string to prepend to #SBATCH blocks in the submit script to the scheduler
- worker_init='', # command to run before starting a worker, such as 'source activate env'
- init_blocks=1,
- max_blocks=1,
- min_blocks=1,
- nodes_per_block=2,
- walltime='00:10:00'
- ),
- )
-
- ],
-
-)
| {"golden_diff": "diff --git a/parsl/configs/bluewaters.py b/parsl/configs/bluewaters.py\ndeleted file mode 100644\n--- a/parsl/configs/bluewaters.py\n+++ /dev/null\n@@ -1,28 +0,0 @@\n-from parsl.config import Config\n-from parsl.executors import HighThroughputExecutor\n-from parsl.launchers import AprunLauncher\n-from parsl.providers import TorqueProvider\n-\n-\n-config = Config(\n- executors=[\n- HighThroughputExecutor(\n- label=\"bw_htex\",\n- cores_per_worker=1,\n- worker_debug=False,\n- provider=TorqueProvider(\n- queue='normal',\n- launcher=AprunLauncher(overrides=\"-b -- bwpy-environ --\"),\n- scheduler_options='', # string to prepend to #SBATCH blocks in the submit script to the scheduler\n- worker_init='', # command to run before starting a worker, such as 'source activate env'\n- init_blocks=1,\n- max_blocks=1,\n- min_blocks=1,\n- nodes_per_block=2,\n- walltime='00:10:00'\n- ),\n- )\n-\n- ],\n-\n-)\n", "issue": "Remove BlueWaters from userguide\n**Describe the bug**\r\n\r\nBluewaters is a supercomputer at the NCSA (UIUC) that is now retired. Our userguide still contains an example configuration for this defunct machine that should be removed.\r\n\r\nHere's a link to the userguide section as rendered: https://parsl.readthedocs.io/en/stable/userguide/configuring.html#blue-waters-ncsa\r\n\r\n\r\n**Expected behavior**\r\n\r\nBluewaters should be removed from our userguide section.\r\n\r\nHere's a quick sketch of the work involved:\r\n1. Remove the section on Bluewaters from `docs/userguide/configuring.rst`\r\n2. Remove the example configuration file here `parsl/configs/bluewaters.py`\r\n3. Rebuild the documentation: `cd docs; make clean html; `\r\n4. Check the newly rebuild docs with `cd docs/_build/html; python3 -m http.server 8080` and load `http://localhost:8080` in your browser to load the html pages that was newly rebuilt in step.3.\r\n\r\n\n", "before_files": [{"content": "from parsl.config import Config\nfrom parsl.executors import HighThroughputExecutor\nfrom parsl.launchers import AprunLauncher\nfrom parsl.providers import TorqueProvider\n\n\nconfig = Config(\n executors=[\n HighThroughputExecutor(\n label=\"bw_htex\",\n cores_per_worker=1,\n worker_debug=False,\n provider=TorqueProvider(\n queue='normal',\n launcher=AprunLauncher(overrides=\"-b -- bwpy-environ --\"),\n scheduler_options='', # string to prepend to #SBATCH blocks in the submit script to the scheduler\n worker_init='', # command to run before starting a worker, such as 'source activate env'\n init_blocks=1,\n max_blocks=1,\n min_blocks=1,\n nodes_per_block=2,\n walltime='00:10:00'\n ),\n )\n\n ],\n\n)\n", "path": "parsl/configs/bluewaters.py"}]} | 1,003 | 270 |
gh_patches_debug_32517 | rasdani/github-patches | git_diff | scrapy__scrapy-4623 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`scrapy genspider` should not overwrite existing file
## Summary
If the file mentioned in `scrapy genspider` already exists, then genspider should refuse to generate the the file.
## Motivation
As it stands, existing code can be blown away if this command runs twice.
## Describe alternatives you've considered
Prompting the user for overwriting existing spider.
</issue>
<code>
[start of scrapy/commands/genspider.py]
1 import os
2 import shutil
3 import string
4
5 from importlib import import_module
6 from os.path import join, dirname, abspath, exists, splitext
7
8 import scrapy
9 from scrapy.commands import ScrapyCommand
10 from scrapy.utils.template import render_templatefile, string_camelcase
11 from scrapy.exceptions import UsageError
12
13
14 def sanitize_module_name(module_name):
15 """Sanitize the given module name, by replacing dashes and points
16 with underscores and prefixing it with a letter if it doesn't start
17 with one
18 """
19 module_name = module_name.replace('-', '_').replace('.', '_')
20 if module_name[0] not in string.ascii_letters:
21 module_name = "a" + module_name
22 return module_name
23
24
25 class Command(ScrapyCommand):
26
27 requires_project = False
28 default_settings = {'LOG_ENABLED': False}
29
30 def syntax(self):
31 return "[options] <name> <domain>"
32
33 def short_desc(self):
34 return "Generate new spider using pre-defined templates"
35
36 def add_options(self, parser):
37 ScrapyCommand.add_options(self, parser)
38 parser.add_option("-l", "--list", dest="list", action="store_true",
39 help="List available templates")
40 parser.add_option("-e", "--edit", dest="edit", action="store_true",
41 help="Edit spider after creating it")
42 parser.add_option("-d", "--dump", dest="dump", metavar="TEMPLATE",
43 help="Dump template to standard output")
44 parser.add_option("-t", "--template", dest="template", default="basic",
45 help="Uses a custom template.")
46 parser.add_option("--force", dest="force", action="store_true",
47 help="If the spider already exists, overwrite it with the template")
48
49 def run(self, args, opts):
50 if opts.list:
51 self._list_templates()
52 return
53 if opts.dump:
54 template_file = self._find_template(opts.dump)
55 if template_file:
56 with open(template_file, "r") as f:
57 print(f.read())
58 return
59 if len(args) != 2:
60 raise UsageError()
61
62 name, domain = args[0:2]
63 module = sanitize_module_name(name)
64
65 if self.settings.get('BOT_NAME') == module:
66 print("Cannot create a spider with the same name as your project")
67 return
68
69 try:
70 spidercls = self.crawler_process.spider_loader.load(name)
71 except KeyError:
72 pass
73 else:
74 # if spider already exists and not --force then halt
75 if not opts.force:
76 print("Spider %r already exists in module:" % name)
77 print(" %s" % spidercls.__module__)
78 return
79 template_file = self._find_template(opts.template)
80 if template_file:
81 self._genspider(module, name, domain, opts.template, template_file)
82 if opts.edit:
83 self.exitcode = os.system('scrapy edit "%s"' % name)
84
85 def _genspider(self, module, name, domain, template_name, template_file):
86 """Generate the spider module, based on the given template"""
87 tvars = {
88 'project_name': self.settings.get('BOT_NAME'),
89 'ProjectName': string_camelcase(self.settings.get('BOT_NAME')),
90 'module': module,
91 'name': name,
92 'domain': domain,
93 'classname': '%sSpider' % ''.join(s.capitalize() for s in module.split('_'))
94 }
95 if self.settings.get('NEWSPIDER_MODULE'):
96 spiders_module = import_module(self.settings['NEWSPIDER_MODULE'])
97 spiders_dir = abspath(dirname(spiders_module.__file__))
98 else:
99 spiders_module = None
100 spiders_dir = "."
101 spider_file = "%s.py" % join(spiders_dir, module)
102 shutil.copyfile(template_file, spider_file)
103 render_templatefile(spider_file, **tvars)
104 print("Created spider %r using template %r "
105 % (name, template_name), end=('' if spiders_module else '\n'))
106 if spiders_module:
107 print("in module:\n %s.%s" % (spiders_module.__name__, module))
108
109 def _find_template(self, template):
110 template_file = join(self.templates_dir, '%s.tmpl' % template)
111 if exists(template_file):
112 return template_file
113 print("Unable to find template: %s\n" % template)
114 print('Use "scrapy genspider --list" to see all available templates.')
115
116 def _list_templates(self):
117 print("Available templates:")
118 for filename in sorted(os.listdir(self.templates_dir)):
119 if filename.endswith('.tmpl'):
120 print(" %s" % splitext(filename)[0])
121
122 @property
123 def templates_dir(self):
124 return join(
125 self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),
126 'spiders'
127 )
128
[end of scrapy/commands/genspider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/commands/genspider.py b/scrapy/commands/genspider.py
--- a/scrapy/commands/genspider.py
+++ b/scrapy/commands/genspider.py
@@ -66,16 +66,9 @@
print("Cannot create a spider with the same name as your project")
return
- try:
- spidercls = self.crawler_process.spider_loader.load(name)
- except KeyError:
- pass
- else:
- # if spider already exists and not --force then halt
- if not opts.force:
- print("Spider %r already exists in module:" % name)
- print(" %s" % spidercls.__module__)
- return
+ if not opts.force and self._spider_exists(name):
+ return
+
template_file = self._find_template(opts.template)
if template_file:
self._genspider(module, name, domain, opts.template, template_file)
@@ -119,6 +112,34 @@
if filename.endswith('.tmpl'):
print(" %s" % splitext(filename)[0])
+ def _spider_exists(self, name):
+ if not self.settings.get('NEWSPIDER_MODULE'):
+ # if run as a standalone command and file with same filename already exists
+ if exists(name + ".py"):
+ print("%s already exists" % (abspath(name + ".py")))
+ return True
+ return False
+
+ try:
+ spidercls = self.crawler_process.spider_loader.load(name)
+ except KeyError:
+ pass
+ else:
+ # if spider with same name exists
+ print("Spider %r already exists in module:" % name)
+ print(" %s" % spidercls.__module__)
+ return True
+
+ # a file with the same name exists in the target directory
+ spiders_module = import_module(self.settings['NEWSPIDER_MODULE'])
+ spiders_dir = dirname(spiders_module.__file__)
+ spiders_dir_abs = abspath(spiders_dir)
+ if exists(join(spiders_dir_abs, name + ".py")):
+ print("%s already exists" % (join(spiders_dir_abs, (name + ".py"))))
+ return True
+
+ return False
+
@property
def templates_dir(self):
return join(
| {"golden_diff": "diff --git a/scrapy/commands/genspider.py b/scrapy/commands/genspider.py\n--- a/scrapy/commands/genspider.py\n+++ b/scrapy/commands/genspider.py\n@@ -66,16 +66,9 @@\n print(\"Cannot create a spider with the same name as your project\")\n return\n \n- try:\n- spidercls = self.crawler_process.spider_loader.load(name)\n- except KeyError:\n- pass\n- else:\n- # if spider already exists and not --force then halt\n- if not opts.force:\n- print(\"Spider %r already exists in module:\" % name)\n- print(\" %s\" % spidercls.__module__)\n- return\n+ if not opts.force and self._spider_exists(name):\n+ return\n+\n template_file = self._find_template(opts.template)\n if template_file:\n self._genspider(module, name, domain, opts.template, template_file)\n@@ -119,6 +112,34 @@\n if filename.endswith('.tmpl'):\n print(\" %s\" % splitext(filename)[0])\n \n+ def _spider_exists(self, name):\n+ if not self.settings.get('NEWSPIDER_MODULE'):\n+ # if run as a standalone command and file with same filename already exists\n+ if exists(name + \".py\"):\n+ print(\"%s already exists\" % (abspath(name + \".py\")))\n+ return True\n+ return False\n+\n+ try:\n+ spidercls = self.crawler_process.spider_loader.load(name)\n+ except KeyError:\n+ pass\n+ else:\n+ # if spider with same name exists\n+ print(\"Spider %r already exists in module:\" % name)\n+ print(\" %s\" % spidercls.__module__)\n+ return True\n+\n+ # a file with the same name exists in the target directory\n+ spiders_module = import_module(self.settings['NEWSPIDER_MODULE'])\n+ spiders_dir = dirname(spiders_module.__file__)\n+ spiders_dir_abs = abspath(spiders_dir)\n+ if exists(join(spiders_dir_abs, name + \".py\")):\n+ print(\"%s already exists\" % (join(spiders_dir_abs, (name + \".py\"))))\n+ return True\n+\n+ return False\n+\n @property\n def templates_dir(self):\n return join(\n", "issue": "`scrapy genspider` should not overwrite existing file\n\r\n## Summary\r\n\r\nIf the file mentioned in `scrapy genspider` already exists, then genspider should refuse to generate the the file.\r\n\r\n## Motivation\r\n\r\nAs it stands, existing code can be blown away if this command runs twice.\r\n\r\n## Describe alternatives you've considered\r\n\r\nPrompting the user for overwriting existing spider.\r\n\r\n\n", "before_files": [{"content": "import os\nimport shutil\nimport string\n\nfrom importlib import import_module\nfrom os.path import join, dirname, abspath, exists, splitext\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\ndef sanitize_module_name(module_name):\n \"\"\"Sanitize the given module name, by replacing dashes and points\n with underscores and prefixing it with a letter if it doesn't start\n with one\n \"\"\"\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <name> <domain>\"\n\n def short_desc(self):\n return \"Generate new spider using pre-defined templates\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"List available templates\")\n parser.add_option(\"-e\", \"--edit\", dest=\"edit\", action=\"store_true\",\n help=\"Edit spider after creating it\")\n parser.add_option(\"-d\", \"--dump\", dest=\"dump\", metavar=\"TEMPLATE\",\n help=\"Dump template to standard output\")\n parser.add_option(\"-t\", \"--template\", dest=\"template\", default=\"basic\",\n help=\"Uses a custom template.\")\n parser.add_option(\"--force\", dest=\"force\", action=\"store_true\",\n help=\"If the spider already exists, overwrite it with the template\")\n\n def run(self, args, opts):\n if opts.list:\n self._list_templates()\n return\n if opts.dump:\n template_file = self._find_template(opts.dump)\n if template_file:\n with open(template_file, \"r\") as f:\n print(f.read())\n return\n if len(args) != 2:\n raise UsageError()\n\n name, domain = args[0:2]\n module = sanitize_module_name(name)\n\n if self.settings.get('BOT_NAME') == module:\n print(\"Cannot create a spider with the same name as your project\")\n return\n\n try:\n spidercls = self.crawler_process.spider_loader.load(name)\n except KeyError:\n pass\n else:\n # if spider already exists and not --force then halt\n if not opts.force:\n print(\"Spider %r already exists in module:\" % name)\n print(\" %s\" % spidercls.__module__)\n return\n template_file = self._find_template(opts.template)\n if template_file:\n self._genspider(module, name, domain, opts.template, template_file)\n if opts.edit:\n self.exitcode = os.system('scrapy edit \"%s\"' % name)\n\n def _genspider(self, module, name, domain, template_name, template_file):\n \"\"\"Generate the spider module, based on the given template\"\"\"\n tvars = {\n 'project_name': self.settings.get('BOT_NAME'),\n 'ProjectName': string_camelcase(self.settings.get('BOT_NAME')),\n 'module': module,\n 'name': name,\n 'domain': domain,\n 'classname': '%sSpider' % ''.join(s.capitalize() for s in module.split('_'))\n }\n if self.settings.get('NEWSPIDER_MODULE'):\n spiders_module = import_module(self.settings['NEWSPIDER_MODULE'])\n spiders_dir = abspath(dirname(spiders_module.__file__))\n else:\n spiders_module = None\n spiders_dir = \".\"\n spider_file = \"%s.py\" % join(spiders_dir, module)\n shutil.copyfile(template_file, spider_file)\n render_templatefile(spider_file, **tvars)\n print(\"Created spider %r using template %r \"\n % (name, template_name), end=('' if spiders_module else '\\n'))\n if spiders_module:\n print(\"in module:\\n %s.%s\" % (spiders_module.__name__, module))\n\n def _find_template(self, template):\n template_file = join(self.templates_dir, '%s.tmpl' % template)\n if exists(template_file):\n return template_file\n print(\"Unable to find template: %s\\n\" % template)\n print('Use \"scrapy genspider --list\" to see all available templates.')\n\n def _list_templates(self):\n print(\"Available templates:\")\n for filename in sorted(os.listdir(self.templates_dir)):\n if filename.endswith('.tmpl'):\n print(\" %s\" % splitext(filename)[0])\n\n @property\n def templates_dir(self):\n return join(\n self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),\n 'spiders'\n )\n", "path": "scrapy/commands/genspider.py"}]} | 1,956 | 533 |
gh_patches_debug_10030 | rasdani/github-patches | git_diff | mozilla__bugbug-3696 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop the `bug` model
The model has been renamed to `defect` in https://github.com/mozilla/bugbug/pull/335/files#diff-74f32ab12cc9be314b1fead3a281590c768699c2e9883b8346066f6c4d6daa90
However, the definition of `bug` model was not dropped.
We need to drop the following line: https://github.com/mozilla/bugbug/blob/c228c2a686b271f7ca24a0716cda4f9059229d9f/bugbug/models/__init__.py#L16
</issue>
<code>
[start of bugbug/models/__init__.py]
1 # -*- coding: utf-8 -*-
2 import importlib
3 import logging
4 from typing import Type
5
6 from bugbug.model import Model
7
8 LOGGER = logging.getLogger()
9
10
11 MODELS = {
12 "annotateignore": "bugbug.models.annotate_ignore.AnnotateIgnoreModel",
13 "assignee": "bugbug.models.assignee.AssigneeModel",
14 "backout": "bugbug.models.backout.BackoutModel",
15 "browsername": "bugbug.models.browsername.BrowserNameModel",
16 "bug": "bugbug.model.BugModel",
17 "bugtype": "bugbug.models.bugtype.BugTypeModel",
18 "component": "bugbug.models.component.ComponentModel",
19 "component_nn": "bugbug.models.component_nn.ComponentNNModel",
20 "defect": "bugbug.models.defect.DefectModel",
21 "defectenhancementtask": "bugbug.models.defect_enhancement_task.DefectEnhancementTaskModel",
22 "devdocneeded": "bugbug.models.devdocneeded.DevDocNeededModel",
23 "duplicate": "bugbug.models.duplicate.DuplicateModel",
24 "fixtime": "bugbug.models.fixtime.FixTimeModel",
25 "needsdiagnosis": "bugbug.models.needsdiagnosis.NeedsDiagnosisModel",
26 "qaneeded": "bugbug.models.qaneeded.QANeededModel",
27 "rcatype": "bugbug.models.rcatype.RCATypeModel",
28 "regression": "bugbug.models.regression.RegressionModel",
29 "regressionrange": "bugbug.models.regressionrange.RegressionRangeModel",
30 "regressor": "bugbug.models.regressor.RegressorModel",
31 "spambug": "bugbug.models.spambug.SpamBugModel",
32 "stepstoreproduce": "bugbug.models.stepstoreproduce.StepsToReproduceModel",
33 "testlabelselect": "bugbug.models.testselect.TestLabelSelectModel",
34 "testgroupselect": "bugbug.models.testselect.TestGroupSelectModel",
35 "testconfiggroupselect": "bugbug.models.testselect.TestConfigGroupSelectModel",
36 "testfailure": "bugbug.models.testfailure.TestFailureModel",
37 "tracking": "bugbug.models.tracking.TrackingModel",
38 "uplift": "bugbug.models.uplift.UpliftModel",
39 }
40
41
42 def get_model_class(model_name: str) -> Type[Model]:
43 if model_name not in MODELS:
44 err_msg = f"Invalid name {model_name}, not in {list(MODELS.keys())}"
45 raise ValueError(err_msg)
46
47 full_qualified_class_name = MODELS[model_name]
48 module_name, class_name = full_qualified_class_name.rsplit(".", 1)
49
50 module = importlib.import_module(module_name)
51
52 return getattr(module, class_name)
53
[end of bugbug/models/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/__init__.py b/bugbug/models/__init__.py
--- a/bugbug/models/__init__.py
+++ b/bugbug/models/__init__.py
@@ -13,7 +13,6 @@
"assignee": "bugbug.models.assignee.AssigneeModel",
"backout": "bugbug.models.backout.BackoutModel",
"browsername": "bugbug.models.browsername.BrowserNameModel",
- "bug": "bugbug.model.BugModel",
"bugtype": "bugbug.models.bugtype.BugTypeModel",
"component": "bugbug.models.component.ComponentModel",
"component_nn": "bugbug.models.component_nn.ComponentNNModel",
| {"golden_diff": "diff --git a/bugbug/models/__init__.py b/bugbug/models/__init__.py\n--- a/bugbug/models/__init__.py\n+++ b/bugbug/models/__init__.py\n@@ -13,7 +13,6 @@\n \"assignee\": \"bugbug.models.assignee.AssigneeModel\",\n \"backout\": \"bugbug.models.backout.BackoutModel\",\n \"browsername\": \"bugbug.models.browsername.BrowserNameModel\",\n- \"bug\": \"bugbug.model.BugModel\",\n \"bugtype\": \"bugbug.models.bugtype.BugTypeModel\",\n \"component\": \"bugbug.models.component.ComponentModel\",\n \"component_nn\": \"bugbug.models.component_nn.ComponentNNModel\",\n", "issue": "Drop the `bug` model\nThe model has been renamed to `defect` in https://github.com/mozilla/bugbug/pull/335/files#diff-74f32ab12cc9be314b1fead3a281590c768699c2e9883b8346066f6c4d6daa90 \r\n\r\nHowever, the definition of `bug` model was not dropped.\r\n\r\n\r\nWe need to drop the following line: https://github.com/mozilla/bugbug/blob/c228c2a686b271f7ca24a0716cda4f9059229d9f/bugbug/models/__init__.py#L16\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport importlib\nimport logging\nfrom typing import Type\n\nfrom bugbug.model import Model\n\nLOGGER = logging.getLogger()\n\n\nMODELS = {\n \"annotateignore\": \"bugbug.models.annotate_ignore.AnnotateIgnoreModel\",\n \"assignee\": \"bugbug.models.assignee.AssigneeModel\",\n \"backout\": \"bugbug.models.backout.BackoutModel\",\n \"browsername\": \"bugbug.models.browsername.BrowserNameModel\",\n \"bug\": \"bugbug.model.BugModel\",\n \"bugtype\": \"bugbug.models.bugtype.BugTypeModel\",\n \"component\": \"bugbug.models.component.ComponentModel\",\n \"component_nn\": \"bugbug.models.component_nn.ComponentNNModel\",\n \"defect\": \"bugbug.models.defect.DefectModel\",\n \"defectenhancementtask\": \"bugbug.models.defect_enhancement_task.DefectEnhancementTaskModel\",\n \"devdocneeded\": \"bugbug.models.devdocneeded.DevDocNeededModel\",\n \"duplicate\": \"bugbug.models.duplicate.DuplicateModel\",\n \"fixtime\": \"bugbug.models.fixtime.FixTimeModel\",\n \"needsdiagnosis\": \"bugbug.models.needsdiagnosis.NeedsDiagnosisModel\",\n \"qaneeded\": \"bugbug.models.qaneeded.QANeededModel\",\n \"rcatype\": \"bugbug.models.rcatype.RCATypeModel\",\n \"regression\": \"bugbug.models.regression.RegressionModel\",\n \"regressionrange\": \"bugbug.models.regressionrange.RegressionRangeModel\",\n \"regressor\": \"bugbug.models.regressor.RegressorModel\",\n \"spambug\": \"bugbug.models.spambug.SpamBugModel\",\n \"stepstoreproduce\": \"bugbug.models.stepstoreproduce.StepsToReproduceModel\",\n \"testlabelselect\": \"bugbug.models.testselect.TestLabelSelectModel\",\n \"testgroupselect\": \"bugbug.models.testselect.TestGroupSelectModel\",\n \"testconfiggroupselect\": \"bugbug.models.testselect.TestConfigGroupSelectModel\",\n \"testfailure\": \"bugbug.models.testfailure.TestFailureModel\",\n \"tracking\": \"bugbug.models.tracking.TrackingModel\",\n \"uplift\": \"bugbug.models.uplift.UpliftModel\",\n}\n\n\ndef get_model_class(model_name: str) -> Type[Model]:\n if model_name not in MODELS:\n err_msg = f\"Invalid name {model_name}, not in {list(MODELS.keys())}\"\n raise ValueError(err_msg)\n\n full_qualified_class_name = MODELS[model_name]\n module_name, class_name = full_qualified_class_name.rsplit(\".\", 1)\n\n module = importlib.import_module(module_name)\n\n return getattr(module, class_name)\n", "path": "bugbug/models/__init__.py"}]} | 1,408 | 159 |
gh_patches_debug_4882 | rasdani/github-patches | git_diff | chainer__chainer-1501 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ProgressBar iters/sec is too low in CPU mode
The 'iters/sec' speed displayed in the `ProgressBar` extension seems too low when running a model in CPU mode. I suspect this is due to the use of `time.clock()` instead of `time.time()`. `time.clock()` apparently measures the total time spent in all CPU cores, which can cause the measured time to (sometimes greatly) exceed the wall clock time.
</issue>
<code>
[start of chainer/training/extensions/progress_bar.py]
1 from __future__ import division
2 import datetime
3 import sys
4 import time
5
6 from chainer.training import extension
7 from chainer.training import trigger
8
9
10 class ProgressBar(extension.Extension):
11
12 """Trainer extension to print a progress bar and recent training status.
13
14 This extension prints a progress bar at every call. It watches the current
15 iteration and epoch to print the bar.
16
17 Args:
18 training_length (tuple): Length of whole training. It consists of an
19 integer and either ``'epoch'`` or ``'iteration'``. If this value is
20 omitted and the stop trigger of the trainer is
21 :class:`IntervalTrigger`, this extension uses its attributes to
22 determine the length of the training.
23 update_interval (int): Number of iterations to skip printing the
24 progress bar.
25 bar_length (int): Length of the progress bar in characters.
26 out: Stream to print the bar. Standard output is used by default.
27
28 """
29 def __init__(self, training_length=None, update_interval=100,
30 bar_length=50, out=sys.stdout):
31 self._training_length = training_length
32 self._status_template = None
33 self._update_interval = update_interval
34 self._bar_length = bar_length
35 self._out = out
36 self._recent_timing = []
37
38 def __call__(self, trainer):
39 training_length = self._training_length
40
41 # initialize some attributes at the first call
42 if training_length is None:
43 t = trainer.stop_trigger
44 if not isinstance(t, trigger.IntervalTrigger):
45 raise TypeError(
46 'cannot retrieve the training length from %s' % type(t))
47 training_length = self._training_length = t.period, t.unit
48
49 stat_template = self._status_template
50 if stat_template is None:
51 stat_template = self._status_template = (
52 '{0.iteration:10} iter, {0.epoch} epoch / %s %ss\n' %
53 training_length)
54
55 length, unit = training_length
56 out = self._out
57
58 iteration = trainer.updater.iteration
59
60 # print the progress bar
61 if iteration % self._update_interval == 0:
62 epoch = trainer.updater.epoch_detail
63 recent_timing = self._recent_timing
64 now = time.clock()
65
66 if len(recent_timing) >= 1:
67 out.write('\033[J')
68
69 if unit == 'iteration':
70 rate = iteration / length
71 else:
72 rate = epoch / length
73
74 bar_length = self._bar_length
75 marks = '#' * int(rate * bar_length)
76 out.write(' total [{}{}] {:6.2%}\n'.format(
77 marks, '.' * (bar_length - len(marks)), rate))
78
79 epoch_rate = epoch - int(epoch)
80 marks = '#' * int(epoch_rate * bar_length)
81 out.write('this epoch [{}{}] {:6.2%}\n'.format(
82 marks, '.' * (bar_length - len(marks)), epoch_rate))
83
84 status = stat_template.format(trainer.updater)
85 out.write(status)
86
87 old_t, old_e, old_sec = recent_timing[0]
88 speed_t = (iteration - old_t) / (now - old_sec)
89 speed_e = (epoch - old_e) / (now - old_sec)
90 if unit == 'iteration':
91 estimated_time = (length - iteration) / speed_t
92 else:
93 estimated_time = (length - epoch) / speed_e
94 out.write('{:10.5g} iters/sec. Estimated time to finish: {}.\n'
95 .format(speed_t,
96 datetime.timedelta(seconds=estimated_time)))
97
98 # move the cursor to the head of the progress bar
99 out.write('\033[4A')
100 out.flush()
101
102 if len(recent_timing) > 100:
103 del recent_timing[0]
104
105 recent_timing.append((iteration, epoch, now))
106
107 def finalize(self):
108 # delete the progress bar
109 out = self._out
110 out.write('\033[J')
111 out.flush()
112
[end of chainer/training/extensions/progress_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/training/extensions/progress_bar.py b/chainer/training/extensions/progress_bar.py
--- a/chainer/training/extensions/progress_bar.py
+++ b/chainer/training/extensions/progress_bar.py
@@ -61,7 +61,7 @@
if iteration % self._update_interval == 0:
epoch = trainer.updater.epoch_detail
recent_timing = self._recent_timing
- now = time.clock()
+ now = time.time()
if len(recent_timing) >= 1:
out.write('\033[J')
| {"golden_diff": "diff --git a/chainer/training/extensions/progress_bar.py b/chainer/training/extensions/progress_bar.py\n--- a/chainer/training/extensions/progress_bar.py\n+++ b/chainer/training/extensions/progress_bar.py\n@@ -61,7 +61,7 @@\n if iteration % self._update_interval == 0:\n epoch = trainer.updater.epoch_detail\n recent_timing = self._recent_timing\n- now = time.clock()\n+ now = time.time()\n \n if len(recent_timing) >= 1:\n out.write('\\033[J')\n", "issue": "ProgressBar iters/sec is too low in CPU mode\nThe 'iters/sec' speed displayed in the `ProgressBar` extension seems too low when running a model in CPU mode. I suspect this is due to the use of `time.clock()` instead of `time.time()`. `time.clock()` apparently measures the total time spent in all CPU cores, which can cause the measured time to (sometimes greatly) exceed the wall clock time.\n\n", "before_files": [{"content": "from __future__ import division\nimport datetime\nimport sys\nimport time\n\nfrom chainer.training import extension\nfrom chainer.training import trigger\n\n\nclass ProgressBar(extension.Extension):\n\n \"\"\"Trainer extension to print a progress bar and recent training status.\n\n This extension prints a progress bar at every call. It watches the current\n iteration and epoch to print the bar.\n\n Args:\n training_length (tuple): Length of whole training. It consists of an\n integer and either ``'epoch'`` or ``'iteration'``. If this value is\n omitted and the stop trigger of the trainer is\n :class:`IntervalTrigger`, this extension uses its attributes to\n determine the length of the training.\n update_interval (int): Number of iterations to skip printing the\n progress bar.\n bar_length (int): Length of the progress bar in characters.\n out: Stream to print the bar. Standard output is used by default.\n\n \"\"\"\n def __init__(self, training_length=None, update_interval=100,\n bar_length=50, out=sys.stdout):\n self._training_length = training_length\n self._status_template = None\n self._update_interval = update_interval\n self._bar_length = bar_length\n self._out = out\n self._recent_timing = []\n\n def __call__(self, trainer):\n training_length = self._training_length\n\n # initialize some attributes at the first call\n if training_length is None:\n t = trainer.stop_trigger\n if not isinstance(t, trigger.IntervalTrigger):\n raise TypeError(\n 'cannot retrieve the training length from %s' % type(t))\n training_length = self._training_length = t.period, t.unit\n\n stat_template = self._status_template\n if stat_template is None:\n stat_template = self._status_template = (\n '{0.iteration:10} iter, {0.epoch} epoch / %s %ss\\n' %\n training_length)\n\n length, unit = training_length\n out = self._out\n\n iteration = trainer.updater.iteration\n\n # print the progress bar\n if iteration % self._update_interval == 0:\n epoch = trainer.updater.epoch_detail\n recent_timing = self._recent_timing\n now = time.clock()\n\n if len(recent_timing) >= 1:\n out.write('\\033[J')\n\n if unit == 'iteration':\n rate = iteration / length\n else:\n rate = epoch / length\n\n bar_length = self._bar_length\n marks = '#' * int(rate * bar_length)\n out.write(' total [{}{}] {:6.2%}\\n'.format(\n marks, '.' * (bar_length - len(marks)), rate))\n\n epoch_rate = epoch - int(epoch)\n marks = '#' * int(epoch_rate * bar_length)\n out.write('this epoch [{}{}] {:6.2%}\\n'.format(\n marks, '.' * (bar_length - len(marks)), epoch_rate))\n\n status = stat_template.format(trainer.updater)\n out.write(status)\n\n old_t, old_e, old_sec = recent_timing[0]\n speed_t = (iteration - old_t) / (now - old_sec)\n speed_e = (epoch - old_e) / (now - old_sec)\n if unit == 'iteration':\n estimated_time = (length - iteration) / speed_t\n else:\n estimated_time = (length - epoch) / speed_e\n out.write('{:10.5g} iters/sec. Estimated time to finish: {}.\\n'\n .format(speed_t,\n datetime.timedelta(seconds=estimated_time)))\n\n # move the cursor to the head of the progress bar\n out.write('\\033[4A')\n out.flush()\n\n if len(recent_timing) > 100:\n del recent_timing[0]\n\n recent_timing.append((iteration, epoch, now))\n\n def finalize(self):\n # delete the progress bar\n out = self._out\n out.write('\\033[J')\n out.flush()\n", "path": "chainer/training/extensions/progress_bar.py"}]} | 1,750 | 126 |
gh_patches_debug_957 | rasdani/github-patches | git_diff | pymeasure__pymeasure-909 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check all Channel classes for docstrings
#895 added a property docstring test. It works, however, only for the `Instrument` classes which are publicly available.
Channels (and some base instruments), which are not imported in the init files, are not checked.
This issue is about collecting all `Instrument` and `Channel` subclasses in order to check them for docstring consistencies.
</issue>
<code>
[start of pymeasure/instruments/__init__.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2023 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 from ..errors import RangeError, RangeException
26 from .channel import Channel
27 from .instrument import Instrument
28 from .resources import list_resources
29 from .validators import discreteTruncate
30
31 from . import activetechnologies
32 from . import advantest
33 from . import agilent
34 from . import aja
35 from . import ametek
36 from . import ami
37 from . import anaheimautomation
38 from . import anapico
39 from . import andeenhagerling
40 from . import anritsu
41 from . import attocube
42 from . import bkprecision
43 from . import danfysik
44 from . import deltaelektronika
45 from . import edwards
46 from . import eurotest
47 from . import fluke
48 from . import fwbell
49 from . import hcp
50 from . import heidenhain
51 from . import hp
52 from . import ipgphotonics
53 from . import keithley
54 from . import keysight
55 from . import lakeshore
56 from . import lecroy
57 from . import mksinst
58 from . import newport
59 from . import ni
60 from . import oxfordinstruments
61 from . import parker
62 from . import pendulum
63 from . import razorbill
64 from . import rohdeschwarz
65 from . import siglenttechnologies
66 from . import signalrecovery
67 from . import srs
68 from . import tcpowerconversion
69 from . import tektronix
70 from . import temptronic
71 from . import texio
72 from . import thermotron
73 from . import thorlabs
74 from . import toptica
75 from . import velleman
76 from . import yokogawa
77
[end of pymeasure/instruments/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pymeasure/instruments/__init__.py b/pymeasure/instruments/__init__.py
--- a/pymeasure/instruments/__init__.py
+++ b/pymeasure/instruments/__init__.py
@@ -67,6 +67,7 @@
from . import srs
from . import tcpowerconversion
from . import tektronix
+from . import teledyne
from . import temptronic
from . import texio
from . import thermotron
| {"golden_diff": "diff --git a/pymeasure/instruments/__init__.py b/pymeasure/instruments/__init__.py\n--- a/pymeasure/instruments/__init__.py\n+++ b/pymeasure/instruments/__init__.py\n@@ -67,6 +67,7 @@\n from . import srs\n from . import tcpowerconversion\n from . import tektronix\n+from . import teledyne\n from . import temptronic\n from . import texio\n from . import thermotron\n", "issue": "Check all Channel classes for docstrings\n#895 added a property docstring test. It works, however, only for the `Instrument` classes which are publicly available.\r\nChannels (and some base instruments), which are not imported in the init files, are not checked.\r\n\r\nThis issue is about collecting all `Instrument` and `Channel` subclasses in order to check them for docstring consistencies.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nfrom ..errors import RangeError, RangeException\nfrom .channel import Channel\nfrom .instrument import Instrument\nfrom .resources import list_resources\nfrom .validators import discreteTruncate\n\nfrom . import activetechnologies\nfrom . import advantest\nfrom . import agilent\nfrom . import aja\nfrom . import ametek\nfrom . import ami\nfrom . import anaheimautomation\nfrom . import anapico\nfrom . import andeenhagerling\nfrom . import anritsu\nfrom . import attocube\nfrom . import bkprecision\nfrom . import danfysik\nfrom . import deltaelektronika\nfrom . import edwards\nfrom . import eurotest\nfrom . import fluke\nfrom . import fwbell\nfrom . import hcp\nfrom . import heidenhain\nfrom . import hp\nfrom . import ipgphotonics\nfrom . import keithley\nfrom . import keysight\nfrom . import lakeshore\nfrom . import lecroy\nfrom . import mksinst\nfrom . import newport\nfrom . import ni\nfrom . import oxfordinstruments\nfrom . import parker\nfrom . import pendulum\nfrom . import razorbill\nfrom . import rohdeschwarz\nfrom . import siglenttechnologies\nfrom . import signalrecovery\nfrom . import srs\nfrom . import tcpowerconversion\nfrom . import tektronix\nfrom . import temptronic\nfrom . import texio\nfrom . import thermotron\nfrom . import thorlabs\nfrom . import toptica\nfrom . import velleman\nfrom . import yokogawa\n", "path": "pymeasure/instruments/__init__.py"}]} | 1,373 | 108 |
gh_patches_debug_3832 | rasdani/github-patches | git_diff | jazzband__pip-tools-1035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No handlers could be found for logger "pip.vcs.git"
Looks like logging is not totally set up. I get this when `pip-compile` wit VCS links:
```
No handlers could be found for logger "pip.vcs.git"
```
</issue>
<code>
[start of piptools/logging.py]
1 # coding: utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from . import click
5
6
7 class LogContext(object):
8 def __init__(self, verbosity=0):
9 self.verbosity = verbosity
10
11 def log(self, *args, **kwargs):
12 kwargs.setdefault("err", True)
13 click.secho(*args, **kwargs)
14
15 def debug(self, *args, **kwargs):
16 if self.verbosity >= 1:
17 self.log(*args, **kwargs)
18
19 def info(self, *args, **kwargs):
20 if self.verbosity >= 0:
21 self.log(*args, **kwargs)
22
23 def warning(self, *args, **kwargs):
24 kwargs.setdefault("fg", "yellow")
25 self.log(*args, **kwargs)
26
27 def error(self, *args, **kwargs):
28 kwargs.setdefault("fg", "red")
29 self.log(*args, **kwargs)
30
31
32 log = LogContext()
33
[end of piptools/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/piptools/logging.py b/piptools/logging.py
--- a/piptools/logging.py
+++ b/piptools/logging.py
@@ -1,8 +1,14 @@
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
+import logging
+
from . import click
+# Initialise the builtin logging module for other component using it.
+# Ex: pip
+logging.basicConfig()
+
class LogContext(object):
def __init__(self, verbosity=0):
| {"golden_diff": "diff --git a/piptools/logging.py b/piptools/logging.py\n--- a/piptools/logging.py\n+++ b/piptools/logging.py\n@@ -1,8 +1,14 @@\n # coding: utf-8\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import logging\n+\n from . import click\n \n+# Initialise the builtin logging module for other component using it.\n+# Ex: pip\n+logging.basicConfig()\n+\n \n class LogContext(object):\n def __init__(self, verbosity=0):\n", "issue": "No handlers could be found for logger \"pip.vcs.git\"\nLooks like logging is not totally set up. I get this when `pip-compile` wit VCS links:\n\n```\nNo handlers could be found for logger \"pip.vcs.git\"\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom . import click\n\n\nclass LogContext(object):\n def __init__(self, verbosity=0):\n self.verbosity = verbosity\n\n def log(self, *args, **kwargs):\n kwargs.setdefault(\"err\", True)\n click.secho(*args, **kwargs)\n\n def debug(self, *args, **kwargs):\n if self.verbosity >= 1:\n self.log(*args, **kwargs)\n\n def info(self, *args, **kwargs):\n if self.verbosity >= 0:\n self.log(*args, **kwargs)\n\n def warning(self, *args, **kwargs):\n kwargs.setdefault(\"fg\", \"yellow\")\n self.log(*args, **kwargs)\n\n def error(self, *args, **kwargs):\n kwargs.setdefault(\"fg\", \"red\")\n self.log(*args, **kwargs)\n\n\nlog = LogContext()\n", "path": "piptools/logging.py"}]} | 857 | 116 |
gh_patches_debug_202 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-232 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test coverage
Some first extra tests written to get the test coverage up a bit.
</issue>
<code>
[start of aimmo-game-creator/setup.py]
1 # -*- coding: utf-8 -*-
2 from setuptools import find_packages, setup
3
4
5 setup(
6 name='aimmo-game-creator',
7 packages=find_packages(),
8 include_package_data=True,
9 install_requires=[
10 'eventlet',
11 'pykube',
12 ],
13 tests_require=[
14 'httmock',
15 ],
16 test_suite='tests',
17 zip_safe=False,
18 )
19
[end of aimmo-game-creator/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aimmo-game-creator/setup.py b/aimmo-game-creator/setup.py
--- a/aimmo-game-creator/setup.py
+++ b/aimmo-game-creator/setup.py
@@ -12,6 +12,7 @@
],
tests_require=[
'httmock',
+ 'mock',
],
test_suite='tests',
zip_safe=False,
| {"golden_diff": "diff --git a/aimmo-game-creator/setup.py b/aimmo-game-creator/setup.py\n--- a/aimmo-game-creator/setup.py\n+++ b/aimmo-game-creator/setup.py\n@@ -12,6 +12,7 @@\n ],\n tests_require=[\n 'httmock',\n+ 'mock',\n ],\n test_suite='tests',\n zip_safe=False,\n", "issue": "Test coverage\nSome first extra tests written to get the test coverage up a bit.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\n\nsetup(\n name='aimmo-game-creator',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'eventlet',\n 'pykube',\n ],\n tests_require=[\n 'httmock',\n ],\n test_suite='tests',\n zip_safe=False,\n)\n", "path": "aimmo-game-creator/setup.py"}]} | 666 | 87 |
gh_patches_debug_52863 | rasdani/github-patches | git_diff | getsentry__sentry-70706 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue Details: Use new assignee selector component
Update issue details to use the new component in https://github.com/getsentry/sentry/issues/69054
This should be released under a short-lived feature flag.
</issue>
<code>
[start of fixtures/page_objects/issue_details.py]
1 from selenium.webdriver.common.by import By
2 from selenium.webdriver.support import expected_conditions
3 from selenium.webdriver.support.wait import WebDriverWait
4
5 from .base import BasePage
6 from .global_selection import GlobalSelectionPage
7
8
9 class IssueDetailsPage(BasePage):
10 def __init__(self, browser, client):
11 super().__init__(browser)
12 self.client = client
13 self.global_selection = GlobalSelectionPage(browser)
14
15 def visit_issue(self, org, groupid):
16 self.browser.get(f"/organizations/{org}/issues/{groupid}/")
17 self.wait_until_loaded()
18
19 def visit_issue_activity(self, org, groupid):
20 self.browser.get(f"/organizations/{org}/issues/{groupid}/activity/")
21 self.browser.wait_until_not('[data-test-id="loading-indicator"]')
22
23 def visit_issue_in_environment(self, org, groupid, environment):
24 self.browser.get(f"/organizations/{org}/issues/{groupid}/?environment={environment}")
25 self.browser.wait_until(".group-detail")
26
27 def visit_tag_values(self, org, groupid, tag):
28 self.browser.get(f"/organizations/{org}/issues/{groupid}/tags/{tag}/")
29 self.browser.wait_until('[data-test-id="group-tag-value"]')
30
31 def get_environment(self):
32 return self.browser.find_element(
33 by=By.CSS_SELECTOR, value='[data-test-id="env-label"'
34 ).text.lower()
35
36 def go_back_to_issues(self):
37 self.global_selection.go_back_to_issues()
38
39 def api_issue_get(self, groupid):
40 return self.client.get(f"/api/0/issues/{groupid}/")
41
42 def go_to_subtab(self, key):
43 tabs = self.browser.find_element(by=By.CSS_SELECTOR, value='[role="tablist"]')
44 tabs.find_element(by=By.CSS_SELECTOR, value=f'[role="tab"][data-key="{key}"]').click()
45 self.browser.wait_until_not('[data-test-id="loading-indicator"]')
46
47 def open_issue_errors(self):
48 self.browser.click(".errors-toggle")
49 self.browser.wait_until(".entries > .errors ul")
50
51 def open_curl(self):
52 self.browser.find_element(by=By.XPATH, value="//a//code[contains(text(), 'curl')]").click()
53
54 def resolve_issue(self):
55 self.browser.click('[aria-label="Resolve"]')
56 # Resolve should become unresolve
57 self.browser.wait_until('[aria-label="Resolved"]')
58
59 def archive_issue(self):
60 self.browser.click('[aria-label="Archive"]')
61 # Ignore should become unresolve
62 self.browser.wait_until('[aria-label="Archived"]')
63
64 def bookmark_issue(self):
65 self.browser.click('button[aria-label="More Actions"]')
66 self.browser.wait_until('[data-test-id="bookmark"]')
67 button = self.browser.element('[data-test-id="bookmark"]')
68 button.click()
69 self.browser.click('button[aria-label="More Actions"]')
70 self.browser.wait_until('[data-test-id="unbookmark"]')
71
72 def assign_to(self, user):
73 assignee = self.browser.find_element(
74 by=By.CSS_SELECTOR, value='[data-test-id="assigned-to"]'
75 )
76
77 # Open the assignee picker
78 assignee.find_element(
79 by=By.CSS_SELECTOR, value='[data-test-id="assignee-selector"]'
80 ).click()
81
82 # Wait for the input to be loaded
83 wait = WebDriverWait(assignee, 10)
84 wait.until(expected_conditions.presence_of_element_located((By.TAG_NAME, "input")))
85
86 assignee.find_element(by=By.TAG_NAME, value="input").send_keys(user)
87
88 # Click the member/team
89 options = assignee.find_elements(
90 by=By.CSS_SELECTOR, value='[data-test-id="assignee-option"]'
91 )
92 assert len(options) > 0, "No assignees could be found."
93 options[0].click()
94
95 self.browser.wait_until_not('[data-test-id="loading-indicator"]')
96
97 def find_comment_form(self):
98 self.browser.wait_until_test_id("note-input-form")
99 return self.browser.find_element(
100 by=By.CSS_SELECTOR, value='[data-test-id="note-input-form"]'
101 )
102
103 def has_comment(self, text):
104 element = self.browser.element('[data-test-id="activity-note-body"]')
105 return text in element.text
106
107 def wait_until_loaded(self):
108 self.browser.wait_until_not('[data-test-id="loading-indicator"]')
109 self.browser.wait_until_not('[data-test-id="event-errors-loading"]')
110 self.browser.wait_until_test_id("linked-issues")
111 self.browser.wait_until_test_id("loaded-device-name")
112 if self.browser.element_exists("#grouping-info"):
113 self.browser.wait_until_test_id("loaded-grouping-info")
114 self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
115
116 def mark_reviewed(self):
117 self.browser.click('[aria-label="More Actions"]')
118 self.browser.wait_until('[data-test-id="mark-review"]')
119 self.browser.click('[data-test-id="mark-review"]')
120 self.browser.click('[aria-label="More Actions"]')
121 self.browser.wait_until('[data-test-id="mark-review"][aria-disabled="true"]')
122
[end of fixtures/page_objects/issue_details.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fixtures/page_objects/issue_details.py b/fixtures/page_objects/issue_details.py
--- a/fixtures/page_objects/issue_details.py
+++ b/fixtures/page_objects/issue_details.py
@@ -86,9 +86,7 @@
assignee.find_element(by=By.TAG_NAME, value="input").send_keys(user)
# Click the member/team
- options = assignee.find_elements(
- by=By.CSS_SELECTOR, value='[data-test-id="assignee-option"]'
- )
+ options = assignee.find_elements(by=By.CSS_SELECTOR, value='[role="option"]')
assert len(options) > 0, "No assignees could be found."
options[0].click()
| {"golden_diff": "diff --git a/fixtures/page_objects/issue_details.py b/fixtures/page_objects/issue_details.py\n--- a/fixtures/page_objects/issue_details.py\n+++ b/fixtures/page_objects/issue_details.py\n@@ -86,9 +86,7 @@\n assignee.find_element(by=By.TAG_NAME, value=\"input\").send_keys(user)\n \n # Click the member/team\n- options = assignee.find_elements(\n- by=By.CSS_SELECTOR, value='[data-test-id=\"assignee-option\"]'\n- )\n+ options = assignee.find_elements(by=By.CSS_SELECTOR, value='[role=\"option\"]')\n assert len(options) > 0, \"No assignees could be found.\"\n options[0].click()\n", "issue": "Issue Details: Use new assignee selector component\nUpdate issue details to use the new component in https://github.com/getsentry/sentry/issues/69054\r\n\r\nThis should be released under a short-lived feature flag.\n", "before_files": [{"content": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom .base import BasePage\nfrom .global_selection import GlobalSelectionPage\n\n\nclass IssueDetailsPage(BasePage):\n def __init__(self, browser, client):\n super().__init__(browser)\n self.client = client\n self.global_selection = GlobalSelectionPage(browser)\n\n def visit_issue(self, org, groupid):\n self.browser.get(f\"/organizations/{org}/issues/{groupid}/\")\n self.wait_until_loaded()\n\n def visit_issue_activity(self, org, groupid):\n self.browser.get(f\"/organizations/{org}/issues/{groupid}/activity/\")\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n\n def visit_issue_in_environment(self, org, groupid, environment):\n self.browser.get(f\"/organizations/{org}/issues/{groupid}/?environment={environment}\")\n self.browser.wait_until(\".group-detail\")\n\n def visit_tag_values(self, org, groupid, tag):\n self.browser.get(f\"/organizations/{org}/issues/{groupid}/tags/{tag}/\")\n self.browser.wait_until('[data-test-id=\"group-tag-value\"]')\n\n def get_environment(self):\n return self.browser.find_element(\n by=By.CSS_SELECTOR, value='[data-test-id=\"env-label\"'\n ).text.lower()\n\n def go_back_to_issues(self):\n self.global_selection.go_back_to_issues()\n\n def api_issue_get(self, groupid):\n return self.client.get(f\"/api/0/issues/{groupid}/\")\n\n def go_to_subtab(self, key):\n tabs = self.browser.find_element(by=By.CSS_SELECTOR, value='[role=\"tablist\"]')\n tabs.find_element(by=By.CSS_SELECTOR, value=f'[role=\"tab\"][data-key=\"{key}\"]').click()\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n\n def open_issue_errors(self):\n self.browser.click(\".errors-toggle\")\n self.browser.wait_until(\".entries > .errors ul\")\n\n def open_curl(self):\n self.browser.find_element(by=By.XPATH, value=\"//a//code[contains(text(), 'curl')]\").click()\n\n def resolve_issue(self):\n self.browser.click('[aria-label=\"Resolve\"]')\n # Resolve should become unresolve\n self.browser.wait_until('[aria-label=\"Resolved\"]')\n\n def archive_issue(self):\n self.browser.click('[aria-label=\"Archive\"]')\n # Ignore should become unresolve\n self.browser.wait_until('[aria-label=\"Archived\"]')\n\n def bookmark_issue(self):\n self.browser.click('button[aria-label=\"More Actions\"]')\n self.browser.wait_until('[data-test-id=\"bookmark\"]')\n button = self.browser.element('[data-test-id=\"bookmark\"]')\n button.click()\n self.browser.click('button[aria-label=\"More Actions\"]')\n self.browser.wait_until('[data-test-id=\"unbookmark\"]')\n\n def assign_to(self, user):\n assignee = self.browser.find_element(\n by=By.CSS_SELECTOR, value='[data-test-id=\"assigned-to\"]'\n )\n\n # Open the assignee picker\n assignee.find_element(\n by=By.CSS_SELECTOR, value='[data-test-id=\"assignee-selector\"]'\n ).click()\n\n # Wait for the input to be loaded\n wait = WebDriverWait(assignee, 10)\n wait.until(expected_conditions.presence_of_element_located((By.TAG_NAME, \"input\")))\n\n assignee.find_element(by=By.TAG_NAME, value=\"input\").send_keys(user)\n\n # Click the member/team\n options = assignee.find_elements(\n by=By.CSS_SELECTOR, value='[data-test-id=\"assignee-option\"]'\n )\n assert len(options) > 0, \"No assignees could be found.\"\n options[0].click()\n\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n\n def find_comment_form(self):\n self.browser.wait_until_test_id(\"note-input-form\")\n return self.browser.find_element(\n by=By.CSS_SELECTOR, value='[data-test-id=\"note-input-form\"]'\n )\n\n def has_comment(self, text):\n element = self.browser.element('[data-test-id=\"activity-note-body\"]')\n return text in element.text\n\n def wait_until_loaded(self):\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n self.browser.wait_until_not('[data-test-id=\"event-errors-loading\"]')\n self.browser.wait_until_test_id(\"linked-issues\")\n self.browser.wait_until_test_id(\"loaded-device-name\")\n if self.browser.element_exists(\"#grouping-info\"):\n self.browser.wait_until_test_id(\"loaded-grouping-info\")\n self.browser.wait_until_not('[data-test-id=\"loading-placeholder\"]')\n\n def mark_reviewed(self):\n self.browser.click('[aria-label=\"More Actions\"]')\n self.browser.wait_until('[data-test-id=\"mark-review\"]')\n self.browser.click('[data-test-id=\"mark-review\"]')\n self.browser.click('[aria-label=\"More Actions\"]')\n self.browser.wait_until('[data-test-id=\"mark-review\"][aria-disabled=\"true\"]')\n", "path": "fixtures/page_objects/issue_details.py"}]} | 1,947 | 158 |
gh_patches_debug_1264 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1932 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bump MSAL to the latest version
**Is your feature request related to a problem? Please describe.**
Old version of MSAL is used in [botframework-connector](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/requirements.txt#L6) (v1.6.0)
**Describe the solution you'd like**
Upgrade to the [latest version](https://github.com/AzureAD/microsoft-authentication-library-for-python/releases) (v1.13.0 is the latest at this moment).
**Describe alternatives you've considered**
No alternatives.
**Additional context**
Please also consider to not pin this dependency (#1467).
</issue>
<code>
[start of libraries/botframework-connector/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 NAME = "botframework-connector"
8 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0"
9 REQUIRES = [
10 "msrest==0.6.19",
11 "requests>=2.23.0,<2.26",
12 "PyJWT>=1.5.3,<2.0.0",
13 "botbuilder-schema==4.15.0",
14 "msal==1.6.0",
15 ]
16
17 root = os.path.abspath(os.path.dirname(__file__))
18
19 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
20 long_description = f.read()
21
22 setup(
23 name=NAME,
24 version=VERSION,
25 description="Microsoft Bot Framework Bot Builder SDK for Python.",
26 author="Microsoft",
27 url="https://www.github.com/Microsoft/botbuilder-python",
28 keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"],
29 install_requires=REQUIRES,
30 packages=[
31 "botframework.connector",
32 "botframework.connector.auth",
33 "botframework.connector.async_mixin",
34 "botframework.connector.operations",
35 "botframework.connector.models",
36 "botframework.connector.aio",
37 "botframework.connector.aio.operations_async",
38 "botframework.connector.skills",
39 "botframework.connector.teams",
40 "botframework.connector.teams.operations",
41 "botframework.connector.token_api",
42 "botframework.connector.token_api.aio",
43 "botframework.connector.token_api.aio.operations_async",
44 "botframework.connector.token_api.models",
45 "botframework.connector.token_api.operations",
46 ],
47 include_package_data=True,
48 long_description=long_description,
49 long_description_content_type="text/x-rst",
50 license="MIT",
51 classifiers=[
52 "Programming Language :: Python :: 3.7",
53 "Intended Audience :: Developers",
54 "License :: OSI Approved :: MIT License",
55 "Operating System :: OS Independent",
56 "Development Status :: 5 - Production/Stable",
57 "Topic :: Scientific/Engineering :: Artificial Intelligence",
58 ],
59 )
60
[end of libraries/botframework-connector/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py
--- a/libraries/botframework-connector/setup.py
+++ b/libraries/botframework-connector/setup.py
@@ -11,7 +11,7 @@
"requests>=2.23.0,<2.26",
"PyJWT>=1.5.3,<2.0.0",
"botbuilder-schema==4.15.0",
- "msal==1.6.0",
+ "msal==1.17.0",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py\n--- a/libraries/botframework-connector/setup.py\n+++ b/libraries/botframework-connector/setup.py\n@@ -11,7 +11,7 @@\n \"requests>=2.23.0,<2.26\",\n \"PyJWT>=1.5.3,<2.0.0\",\n \"botbuilder-schema==4.15.0\",\n- \"msal==1.6.0\",\n+ \"msal==1.17.0\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "Bump MSAL to the latest version\n**Is your feature request related to a problem? Please describe.**\r\nOld version of MSAL is used in [botframework-connector](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/requirements.txt#L6) (v1.6.0)\r\n\r\n**Describe the solution you'd like**\r\nUpgrade to the [latest version](https://github.com/AzureAD/microsoft-authentication-library-for-python/releases) (v1.13.0 is the latest at this moment).\r\n\r\n**Describe alternatives you've considered**\r\nNo alternatives.\r\n\r\n**Additional context**\r\nPlease also consider to not pin this dependency (#1467).\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"msrest==0.6.19\",\n \"requests>=2.23.0,<2.26\",\n \"PyJWT>=1.5.3,<2.0.0\",\n \"botbuilder-schema==4.15.0\",\n \"msal==1.6.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.skills\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.aio.operations_async\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}]} | 1,278 | 145 |
gh_patches_debug_7019 | rasdani/github-patches | git_diff | python__peps-2541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change "Contents" to "Table of Contents" ? (Or delete it?)
Currently PEPs have a "mysterious" triangle pointing to the word "Contents". I don't know why, but somehow every time I see this I do a double take before I realize "oh, that's the ToC". Maybe spell "Table of Contents" in full? There should be plenty of horizontal space for that. (Not in the side bar though -- there mightn't be room for it, and there it's always expanded which provides enough context for the single word to be understood.)
Alternatively, why have this in the main body of the PEP at all when it's already in the sidebar?
(If there was a "nit" label I'd check it. :-)
</issue>
<code>
[start of pep_sphinx_extensions/pep_processor/html/pep_html_translator.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from docutils import nodes
6 import sphinx.writers.html5 as html5
7
8 if TYPE_CHECKING:
9 from sphinx.builders import html
10
11
12 class PEPTranslator(html5.HTML5Translator):
13 """Custom RST -> HTML translation rules for PEPs."""
14
15 def __init__(self, document: nodes.document, builder: html.StandaloneHTMLBuilder):
16 super().__init__(document, builder)
17 self.compact_simple: bool = False
18
19 @staticmethod
20 def should_be_compact_paragraph(node: nodes.paragraph) -> bool:
21 """Check if paragraph should be compact.
22
23 Omitting <p/> tags around paragraph nodes gives visually compact lists.
24
25 """
26 # Never compact paragraphs that are children of document or compound.
27 if isinstance(node.parent, (nodes.document, nodes.compound)):
28 return False
29
30 # Check for custom attributes in paragraph.
31 for key, value in node.non_default_attributes().items():
32 # if key equals "classes", carry on
33 # if value is empty, or contains only "first", only "last", or both
34 # "first" and "last", carry on
35 # else return False
36 if any((key != "classes", not set(value) <= {"first", "last"})):
37 return False
38
39 # Only first paragraph can be compact (ignoring initial label & invisible nodes)
40 first = isinstance(node.parent[0], nodes.label)
41 visible_siblings = [child for child in node.parent.children[first:] if not isinstance(child, nodes.Invisible)]
42 if visible_siblings[0] is not node:
43 return False
44
45 # otherwise, the paragraph should be compact
46 return True
47
48 def visit_paragraph(self, node: nodes.paragraph) -> None:
49 """Remove <p> tags if possible."""
50 if self.should_be_compact_paragraph(node):
51 self.context.append("")
52 else:
53 self.body.append(self.starttag(node, "p", ""))
54 self.context.append("</p>\n")
55
56 def depart_paragraph(self, _: nodes.paragraph) -> None:
57 """Add corresponding end tag from `visit_paragraph`."""
58 self.body.append(self.context.pop())
59
60 def visit_footnote_reference(self, node):
61 self.body.append(self.starttag(node, "a", suffix="[",
62 CLASS=f"footnote-reference {self.settings.footnote_references}",
63 href=f"#{node['refid']}"
64 ))
65
66 def depart_footnote_reference(self, node):
67 self.body.append(']</a>')
68
69 def visit_label(self, node):
70 # pass parent node to get id into starttag:
71 self.body.append(self.starttag(node.parent, "dt", suffix="[", CLASS="label"))
72
73 # footnote/citation backrefs:
74 back_refs = node.parent["backrefs"]
75 if self.settings.footnote_backlinks and len(back_refs) == 1:
76 self.body.append(f'<a href="#{back_refs[0]}">')
77 self.context.append(f"</a>]")
78 else:
79 self.context.append("]")
80
81 def depart_label(self, node) -> None:
82 """PEP link/citation block cleanup with italicised backlinks."""
83 self.body.append(self.context.pop())
84 back_refs = node.parent["backrefs"]
85 if self.settings.footnote_backlinks and len(back_refs) > 1:
86 back_links = ", ".join(f"<a href='#{ref}'>{i}</a>" for i, ref in enumerate(back_refs, start=1))
87 self.body.append(f"<em> ({back_links}) </em>")
88
89 # Close the def tags
90 self.body.append("</dt>\n<dd>")
91
92 def visit_bullet_list(self, node):
93 if isinstance(node.parent, nodes.section) and "contents" in node.parent["names"]:
94 self.body.append("<details><summary>Contents</summary>")
95 self.context.append("</details>")
96 super().visit_bullet_list(node)
97
98 def depart_bullet_list(self, node):
99 super().depart_bullet_list(node)
100 if isinstance(node.parent, nodes.section) and "contents" in node.parent["names"]:
101 self.body.append(self.context.pop())
102
103 def unknown_visit(self, node: nodes.Node) -> None:
104 """No processing for unknown node types."""
105 pass
106
[end of pep_sphinx_extensions/pep_processor/html/pep_html_translator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py b/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py
--- a/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py
+++ b/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py
@@ -91,7 +91,7 @@
def visit_bullet_list(self, node):
if isinstance(node.parent, nodes.section) and "contents" in node.parent["names"]:
- self.body.append("<details><summary>Contents</summary>")
+ self.body.append("<details><summary>Table of Contents</summary>")
self.context.append("</details>")
super().visit_bullet_list(node)
| {"golden_diff": "diff --git a/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py b/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py\n--- a/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py\n+++ b/pep_sphinx_extensions/pep_processor/html/pep_html_translator.py\n@@ -91,7 +91,7 @@\n \n def visit_bullet_list(self, node):\n if isinstance(node.parent, nodes.section) and \"contents\" in node.parent[\"names\"]:\n- self.body.append(\"<details><summary>Contents</summary>\")\n+ self.body.append(\"<details><summary>Table of Contents</summary>\")\n self.context.append(\"</details>\")\n super().visit_bullet_list(node)\n", "issue": "Change \"Contents\" to \"Table of Contents\" ? (Or delete it?)\nCurrently PEPs have a \"mysterious\" triangle pointing to the word \"Contents\". I don't know why, but somehow every time I see this I do a double take before I realize \"oh, that's the ToC\". Maybe spell \"Table of Contents\" in full? There should be plenty of horizontal space for that. (Not in the side bar though -- there mightn't be room for it, and there it's always expanded which provides enough context for the single word to be understood.)\r\n\r\nAlternatively, why have this in the main body of the PEP at all when it's already in the sidebar?\r\n\r\n(If there was a \"nit\" label I'd check it. :-)\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom docutils import nodes\nimport sphinx.writers.html5 as html5\n\nif TYPE_CHECKING:\n from sphinx.builders import html\n\n\nclass PEPTranslator(html5.HTML5Translator):\n \"\"\"Custom RST -> HTML translation rules for PEPs.\"\"\"\n\n def __init__(self, document: nodes.document, builder: html.StandaloneHTMLBuilder):\n super().__init__(document, builder)\n self.compact_simple: bool = False\n\n @staticmethod\n def should_be_compact_paragraph(node: nodes.paragraph) -> bool:\n \"\"\"Check if paragraph should be compact.\n\n Omitting <p/> tags around paragraph nodes gives visually compact lists.\n\n \"\"\"\n # Never compact paragraphs that are children of document or compound.\n if isinstance(node.parent, (nodes.document, nodes.compound)):\n return False\n\n # Check for custom attributes in paragraph.\n for key, value in node.non_default_attributes().items():\n # if key equals \"classes\", carry on\n # if value is empty, or contains only \"first\", only \"last\", or both\n # \"first\" and \"last\", carry on\n # else return False\n if any((key != \"classes\", not set(value) <= {\"first\", \"last\"})):\n return False\n\n # Only first paragraph can be compact (ignoring initial label & invisible nodes)\n first = isinstance(node.parent[0], nodes.label)\n visible_siblings = [child for child in node.parent.children[first:] if not isinstance(child, nodes.Invisible)]\n if visible_siblings[0] is not node:\n return False\n\n # otherwise, the paragraph should be compact\n return True\n\n def visit_paragraph(self, node: nodes.paragraph) -> None:\n \"\"\"Remove <p> tags if possible.\"\"\"\n if self.should_be_compact_paragraph(node):\n self.context.append(\"\")\n else:\n self.body.append(self.starttag(node, \"p\", \"\"))\n self.context.append(\"</p>\\n\")\n\n def depart_paragraph(self, _: nodes.paragraph) -> None:\n \"\"\"Add corresponding end tag from `visit_paragraph`.\"\"\"\n self.body.append(self.context.pop())\n\n def visit_footnote_reference(self, node):\n self.body.append(self.starttag(node, \"a\", suffix=\"[\",\n CLASS=f\"footnote-reference {self.settings.footnote_references}\",\n href=f\"#{node['refid']}\"\n ))\n\n def depart_footnote_reference(self, node):\n self.body.append(']</a>')\n\n def visit_label(self, node):\n # pass parent node to get id into starttag:\n self.body.append(self.starttag(node.parent, \"dt\", suffix=\"[\", CLASS=\"label\"))\n\n # footnote/citation backrefs:\n back_refs = node.parent[\"backrefs\"]\n if self.settings.footnote_backlinks and len(back_refs) == 1:\n self.body.append(f'<a href=\"#{back_refs[0]}\">')\n self.context.append(f\"</a>]\")\n else:\n self.context.append(\"]\")\n\n def depart_label(self, node) -> None:\n \"\"\"PEP link/citation block cleanup with italicised backlinks.\"\"\"\n self.body.append(self.context.pop())\n back_refs = node.parent[\"backrefs\"]\n if self.settings.footnote_backlinks and len(back_refs) > 1:\n back_links = \", \".join(f\"<a href='#{ref}'>{i}</a>\" for i, ref in enumerate(back_refs, start=1))\n self.body.append(f\"<em> ({back_links}) </em>\")\n\n # Close the def tags\n self.body.append(\"</dt>\\n<dd>\")\n\n def visit_bullet_list(self, node):\n if isinstance(node.parent, nodes.section) and \"contents\" in node.parent[\"names\"]:\n self.body.append(\"<details><summary>Contents</summary>\")\n self.context.append(\"</details>\")\n super().visit_bullet_list(node)\n\n def depart_bullet_list(self, node):\n super().depart_bullet_list(node)\n if isinstance(node.parent, nodes.section) and \"contents\" in node.parent[\"names\"]:\n self.body.append(self.context.pop())\n\n def unknown_visit(self, node: nodes.Node) -> None:\n \"\"\"No processing for unknown node types.\"\"\"\n pass\n", "path": "pep_sphinx_extensions/pep_processor/html/pep_html_translator.py"}]} | 1,849 | 171 |
gh_patches_debug_26485 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AWS::Events::Rule ScheduleExpression: "cron(* 1 * * * *)"
*cfn-lint version: (cfn-lint 0.27.5)*
*Description of issue.*
```
EventRule:
Type: "AWS::Events::Rule"
Properties:
ScheduleExpression: "cron(* 1 * * * *)"
State: "ENABLED"
Targets:
- Arn: !Ref Foo
Id: "Foo"
RoleArn: !GetAtt FooArn.Arn
```
Check should be probably in:
[src/cfnlint/rules/resources/events/RuleScheduleExpression.py](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/src/cfnlint/rules/resources/events/RuleScheduleExpression.py)
The above `ScheduleExpression` is invalid (need a value for minute if hour is set). For example `cron(0 1 * * ? *)`
---
[Schedule Expressions for Rules documentation](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html)
</issue>
<code>
[start of src/cfnlint/rules/resources/events/RuleScheduleExpression.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule
6 from cfnlint.rules import RuleMatch
7
8
9 class RuleScheduleExpression(CloudFormationLintRule):
10 """Validate AWS Events Schedule expression format"""
11 id = 'E3027'
12 shortdesc = 'Validate AWS Event ScheduleExpression format'
13 description = 'Validate the formation of the AWS::Event ScheduleExpression'
14 source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html'
15 tags = ['resources', 'events']
16
17 def initialize(self, cfn):
18 """Initialize the rule"""
19 self.resource_property_types = ['AWS::Events::Rule']
20
21 def check_rate(self, value, path):
22 """Check Rate configuration"""
23 matches = []
24 # Extract the expression from rate(XXX)
25 rate_expression = value[value.find('(')+1:value.find(')')]
26
27 if not rate_expression:
28 matches.append(RuleMatch(path, 'Rate value of ScheduleExpression cannot be empty'))
29 else:
30 # Rate format: rate(Value Unit)
31 items = rate_expression.split(' ')
32
33 if len(items) != 2:
34 message = 'Rate expression must contain 2 elements (Value Unit), rate contains {} elements'
35 matches.append(RuleMatch(path, message.format(len(items))))
36 else:
37 # Check the Value
38 if not items[0].isdigit():
39 message = 'Rate Value ({}) should be of type Integer.'
40 extra_args = {'actual_type': type(items[0]).__name__, 'expected_type': int.__name__}
41 matches.append(RuleMatch(path, message.format(items[0]), **extra_args))
42
43 return matches
44
45 def check_cron(self, value, path):
46 """Check Cron configuration"""
47 matches = []
48 # Extract the expression from cron(XXX)
49 cron_expression = value[value.find('(')+1:value.find(')')]
50
51 if not cron_expression:
52 matches.append(RuleMatch(path, 'Cron value of ScheduleExpression cannot be empty'))
53 else:
54 # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)
55 items = cron_expression.split(' ')
56
57 if len(items) != 6:
58 message = 'Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements'
59 matches.append(RuleMatch(path, message.format(len(items))))
60
61 return matches
62
63 def check_value(self, value, path):
64 """Count ScheduledExpression value"""
65 matches = []
66
67 # Value is either "cron()" or "rate()"
68 if value.startswith('rate(') and value.endswith(')'):
69 matches.extend(self.check_rate(value, path))
70 elif value.startswith('cron(') and value.endswith(')'):
71 matches.extend(self.check_cron(value, path))
72 else:
73 message = 'Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()'
74 matches.append(RuleMatch(path, message.format(value)))
75
76 return matches
77
78 def match_resource_properties(self, properties, _, path, cfn):
79 """Check CloudFormation Properties"""
80 matches = []
81
82 matches.extend(
83 cfn.check_value(
84 obj=properties, key='ScheduleExpression',
85 path=path[:],
86 check_value=self.check_value
87 ))
88
89 return matches
90
[end of src/cfnlint/rules/resources/events/RuleScheduleExpression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
@@ -37,7 +37,8 @@
# Check the Value
if not items[0].isdigit():
message = 'Rate Value ({}) should be of type Integer.'
- extra_args = {'actual_type': type(items[0]).__name__, 'expected_type': int.__name__}
+ extra_args = {'actual_type': type(
+ items[0]).__name__, 'expected_type': int.__name__}
matches.append(RuleMatch(path, message.format(items[0]), **extra_args))
return matches
@@ -57,6 +58,12 @@
if len(items) != 6:
message = 'Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements'
matches.append(RuleMatch(path, message.format(len(items))))
+ return matches
+
+ _, _, day_of_month, _, day_of_week, _ = cron_expression.split(' ')
+ if day_of_month != '?' and day_of_week != '?':
+ matches.append(RuleMatch(
+ path, 'Don\'t specify the Day-of-month and Day-of-week fields in the same cron expression'))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n@@ -37,7 +37,8 @@\n # Check the Value\n if not items[0].isdigit():\n message = 'Rate Value ({}) should be of type Integer.'\n- extra_args = {'actual_type': type(items[0]).__name__, 'expected_type': int.__name__}\n+ extra_args = {'actual_type': type(\n+ items[0]).__name__, 'expected_type': int.__name__}\n matches.append(RuleMatch(path, message.format(items[0]), **extra_args))\n \n return matches\n@@ -57,6 +58,12 @@\n if len(items) != 6:\n message = 'Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements'\n matches.append(RuleMatch(path, message.format(len(items))))\n+ return matches\n+\n+ _, _, day_of_month, _, day_of_week, _ = cron_expression.split(' ')\n+ if day_of_month != '?' and day_of_week != '?':\n+ matches.append(RuleMatch(\n+ path, 'Don\\'t specify the Day-of-month and Day-of-week fields in the same cron expression'))\n \n return matches\n", "issue": "AWS::Events::Rule ScheduleExpression: \"cron(* 1 * * * *)\" \n*cfn-lint version: (cfn-lint 0.27.5)*\r\n\r\n*Description of issue.*\r\n\r\n```\r\n EventRule:\r\n Type: \"AWS::Events::Rule\"\r\n Properties:\r\n ScheduleExpression: \"cron(* 1 * * * *)\" \r\n State: \"ENABLED\"\r\n Targets:\r\n - Arn: !Ref Foo\r\n Id: \"Foo\"\r\n RoleArn: !GetAtt FooArn.Arn\r\n```\r\n\r\nCheck should be probably in:\r\n\r\n[src/cfnlint/rules/resources/events/RuleScheduleExpression.py](https://github.com/aws-cloudformation/cfn-python-lint/blob/master/src/cfnlint/rules/resources/events/RuleScheduleExpression.py)\r\n\r\nThe above `ScheduleExpression` is invalid (need a value for minute if hour is set). For example `cron(0 1 * * ? *)`\r\n\r\n---\r\n\r\n[Schedule Expressions for Rules documentation](https://docs.aws.amazon.com/eventbridge/latest/userguide/scheduled-events.html)\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass RuleScheduleExpression(CloudFormationLintRule):\n \"\"\"Validate AWS Events Schedule expression format\"\"\"\n id = 'E3027'\n shortdesc = 'Validate AWS Event ScheduleExpression format'\n description = 'Validate the formation of the AWS::Event ScheduleExpression'\n source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html'\n tags = ['resources', 'events']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n self.resource_property_types = ['AWS::Events::Rule']\n\n def check_rate(self, value, path):\n \"\"\"Check Rate configuration\"\"\"\n matches = []\n # Extract the expression from rate(XXX)\n rate_expression = value[value.find('(')+1:value.find(')')]\n\n if not rate_expression:\n matches.append(RuleMatch(path, 'Rate value of ScheduleExpression cannot be empty'))\n else:\n # Rate format: rate(Value Unit)\n items = rate_expression.split(' ')\n\n if len(items) != 2:\n message = 'Rate expression must contain 2 elements (Value Unit), rate contains {} elements'\n matches.append(RuleMatch(path, message.format(len(items))))\n else:\n # Check the Value\n if not items[0].isdigit():\n message = 'Rate Value ({}) should be of type Integer.'\n extra_args = {'actual_type': type(items[0]).__name__, 'expected_type': int.__name__}\n matches.append(RuleMatch(path, message.format(items[0]), **extra_args))\n\n return matches\n\n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n matches = []\n # Extract the expression from cron(XXX)\n cron_expression = value[value.find('(')+1:value.find(')')]\n\n if not cron_expression:\n matches.append(RuleMatch(path, 'Cron value of ScheduleExpression cannot be empty'))\n else:\n # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)\n items = cron_expression.split(' ')\n\n if len(items) != 6:\n message = 'Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements'\n matches.append(RuleMatch(path, message.format(len(items))))\n\n return matches\n\n def check_value(self, value, path):\n \"\"\"Count ScheduledExpression value\"\"\"\n matches = []\n\n # Value is either \"cron()\" or \"rate()\"\n if value.startswith('rate(') and value.endswith(')'):\n matches.extend(self.check_rate(value, path))\n elif value.startswith('cron(') and value.endswith(')'):\n matches.extend(self.check_cron(value, path))\n else:\n message = 'Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties, key='ScheduleExpression',\n path=path[:],\n check_value=self.check_value\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleScheduleExpression.py"}]} | 1,674 | 322 |
gh_patches_debug_20115 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-1473 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
policies where Resource=user-pool results in error=Missing required parameter in input: "MaxResults"
A user-pool based resource policy results in an error for missing required parameter=MaxResults
```
policies:
- name: RequiredTagsAbsentCognitoUserPool
resource: user-pool
description: |
Notify if Required tags Absent
filters:
- "tag:OwnerEmail": absent
```
2017-08-03 22:49:43,321: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/policy.py", line 306, in run
resources = self.policy.resource_manager.resources()
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/query.py", line 292, in resources
resources = self.augment(self.source.resources(query))
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/query.py", line 154, in resources
resources = self.query.filter(self.manager.resource_type, **query)
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/query.py", line 67, in filter
data = op(**params)
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/client.py", line 310, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/client.py", line 573, in _make_api_call
api_params, operation_model, context=request_context)
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/client.py", line 628, in _convert_to_request_dict
api_params, operation_model)
File "/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/validate.py", line 291, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
ParamValidationError: Parameter validation failed:
Missing required parameter in input: "MaxResults"
</issue>
<code>
[start of c7n/resources/cognito.py]
1 # Copyright 2016 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.manager import resources
17 from c7n.query import QueryResourceManager
18
19
20 @resources.register('identity-pool')
21 class CognitoIdentityPool(QueryResourceManager):
22
23 class resource_type(object):
24 service = 'cognito-identity'
25 enum_spec = ('list_identity_pools', 'IdentityPools', None)
26 detail_spec = (
27 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId')
28 id = 'IdentityPoolId'
29 name = 'IdentityPoolName'
30 filter_name = None
31 dimension = None
32
33
34 @resources.register('user-pool')
35 class CognitoUserPool(QueryResourceManager):
36
37 class resource_type(object):
38 service = "cognito-idp"
39 enum_spec = ('list_user_pools', 'UserPools', None)
40 detail_spec = (
41 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')
42 id = 'Id'
43 name = 'Name'
44 filter_name = None
45 dimension = None
46
[end of c7n/resources/cognito.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py
--- a/c7n/resources/cognito.py
+++ b/c7n/resources/cognito.py
@@ -22,9 +22,9 @@
class resource_type(object):
service = 'cognito-identity'
- enum_spec = ('list_identity_pools', 'IdentityPools', None)
+ enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})
detail_spec = (
- 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId')
+ 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)
id = 'IdentityPoolId'
name = 'IdentityPoolName'
filter_name = None
@@ -36,7 +36,7 @@
class resource_type(object):
service = "cognito-idp"
- enum_spec = ('list_user_pools', 'UserPools', None)
+ enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})
detail_spec = (
'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')
id = 'Id'
| {"golden_diff": "diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py\n--- a/c7n/resources/cognito.py\n+++ b/c7n/resources/cognito.py\n@@ -22,9 +22,9 @@\n \n class resource_type(object):\n service = 'cognito-identity'\n- enum_spec = ('list_identity_pools', 'IdentityPools', None)\n+ enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})\n detail_spec = (\n- 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId')\n+ 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n filter_name = None\n@@ -36,7 +36,7 @@\n \n class resource_type(object):\n service = \"cognito-idp\"\n- enum_spec = ('list_user_pools', 'UserPools', None)\n+ enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n", "issue": "policies where Resource=user-pool results in error=Missing required parameter in input: \"MaxResults\" \nA user-pool based resource policy results in an error for missing required parameter=MaxResults\r\n```\r\npolicies:\r\n - name: RequiredTagsAbsentCognitoUserPool\r\n resource: user-pool\r\n description: |\r\n Notify if Required tags Absent\r\n filters:\r\n - \"tag:OwnerEmail\": absent\r\n```\r\n2017-08-03 22:49:43,321: custodian.output:ERROR Error while executing policy\r\nTraceback (most recent call last):\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/policy.py\", line 306, in run\r\n resources = self.policy.resource_manager.resources()\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/query.py\", line 292, in resources\r\n resources = self.augment(self.source.resources(query))\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/query.py\", line 154, in resources\r\n resources = self.query.filter(self.manager.resource_type, **query)\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/c7n/query.py\", line 67, in filter\r\n data = op(**params)\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/client.py\", line 310, in _api_call\r\n return self._make_api_call(operation_name, kwargs)\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/client.py\", line 573, in _make_api_call\r\n api_params, operation_model, context=request_context)\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/client.py\", line 628, in _convert_to_request_dict\r\n api_params, operation_model)\r\n File \"/mnt/home2/opensrc/cloud-custodian-public-release/.tox/py27/local/lib/python2.7/site-packages/botocore/validate.py\", line 291, in serialize_to_request\r\n raise ParamValidationError(report=report.generate_report())\r\nParamValidationError: Parameter validation failed:\r\nMissing required parameter in input: \"MaxResults\"\n", "before_files": [{"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\n\n\[email protected]('identity-pool')\nclass CognitoIdentityPool(QueryResourceManager):\n\n class resource_type(object):\n service = 'cognito-identity'\n enum_spec = ('list_identity_pools', 'IdentityPools', None)\n detail_spec = (\n 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId')\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n filter_name = None\n dimension = None\n\n\[email protected]('user-pool')\nclass CognitoUserPool(QueryResourceManager):\n\n class resource_type(object):\n service = \"cognito-idp\"\n enum_spec = ('list_user_pools', 'UserPools', None)\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n name = 'Name'\n filter_name = None\n dimension = None\n", "path": "c7n/resources/cognito.py"}]} | 1,586 | 273 |
gh_patches_debug_16900 | rasdani/github-patches | git_diff | beeware__toga-867 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On Linux, window contents are rendered behind a menubar
But only if the menubar wasn't defined in the application code. Here's a slightly modified [example code](https://toga.readthedocs.io/en/latest/tutorial/tutorial-0.html) I'm running:
```py
import toga
def build(app):
box = toga.Box()
button = toga.Button('Hello world', on_press=lambda _: print("hello"))
box.add(button)
return box
if __name__ == '__main__':
app = toga.App('First App', 'org.pybee.sample', startup=build)
app.main_loop()
```
Here's how it looks like to me:

Note that the button is behind the menubar (although no menubar was requested in the code). When I click on the menubar (including the Application or Help menu items), the button is being pressed instead.
I've tried this with a few GTK themes, including Arc-OSX-dark, Mint-X, Redmond and Adwaita, and in every case it behaves this way.
</issue>
<code>
[start of src/gtk/toga_gtk/widgets/box.py]
1 from ..libs import Gtk, Gdk
2 from .base import Widget
3
4
5 class TogaBox(Gtk.Fixed):
6 def __init__(self, impl):
7 super().__init__()
8 self._impl = impl
9 self.interface = self._impl.interface
10
11 def do_get_preferred_width(self):
12 # Calculate the minimum and natural width of the container.
13 # print("GET PREFERRED WIDTH", self._impl.native)
14 width = self._impl.interface.layout.width
15 min_width = self._impl.min_width
16 if min_width is None:
17 min_width = 0
18 elif min_width > width:
19 width = min_width
20
21 # print(min_width, width)
22 return min_width, width
23
24 def do_get_preferred_height(self):
25 # Calculate the minimum and natural height of the container.
26 # height = self._impl.layout.height
27 # print("GET PREFERRED HEIGHT", self._impl.native)
28 height = self._impl.interface.layout.height
29 min_height = self._impl.min_height
30 if min_height is None:
31 min_height = 0
32 elif min_height > height:
33 height = min_height
34 # print(min_height, height)
35 return min_height, height
36
37 def do_size_allocate(self, allocation):
38 # print(self._impl, "Container layout", allocation.width, 'x', allocation.height, ' @ ', allocation.x, 'x', allocation.y)
39 if self._impl.viewport is not None:
40 self.set_allocation(allocation)
41 self.interface.refresh()
42
43 # WARNING! This list of children is *not* the same
44 # as the list provided by the interface!
45 # For GTK's layout purposes, all widgets in the tree
46 # are children of the *container* - that is, the impl
47 # object of the root object in the tree of widgets.
48 for widget in self.get_children():
49 if not widget.get_visible():
50 # print("CHILD NOT VISIBLE", widget.interface)
51 pass
52 else:
53 # print("update ", widget.interface, widget.interface.layout)
54 widget.interface._impl.rehint()
55 widget_allocation = Gdk.Rectangle()
56 widget_allocation.x = widget.interface.layout.absolute_content_left
57 widget_allocation.y = widget.interface.layout.absolute_content_top
58 widget_allocation.width = widget.interface.layout.content_width
59 widget_allocation.height = widget.interface.layout.content_height
60
61 widget.size_allocate(widget_allocation)
62
63
64 class Box(Widget):
65 def create(self):
66 self.min_width = None
67 self.min_height = None
68 self.native = TogaBox(self)
69
[end of src/gtk/toga_gtk/widgets/box.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/gtk/toga_gtk/widgets/box.py b/src/gtk/toga_gtk/widgets/box.py
--- a/src/gtk/toga_gtk/widgets/box.py
+++ b/src/gtk/toga_gtk/widgets/box.py
@@ -53,8 +53,8 @@
# print("update ", widget.interface, widget.interface.layout)
widget.interface._impl.rehint()
widget_allocation = Gdk.Rectangle()
- widget_allocation.x = widget.interface.layout.absolute_content_left
- widget_allocation.y = widget.interface.layout.absolute_content_top
+ widget_allocation.x = widget.interface.layout.absolute_content_left + allocation.x
+ widget_allocation.y = widget.interface.layout.absolute_content_top + allocation.y
widget_allocation.width = widget.interface.layout.content_width
widget_allocation.height = widget.interface.layout.content_height
| {"golden_diff": "diff --git a/src/gtk/toga_gtk/widgets/box.py b/src/gtk/toga_gtk/widgets/box.py\n--- a/src/gtk/toga_gtk/widgets/box.py\n+++ b/src/gtk/toga_gtk/widgets/box.py\n@@ -53,8 +53,8 @@\n # print(\"update \", widget.interface, widget.interface.layout)\n widget.interface._impl.rehint()\n widget_allocation = Gdk.Rectangle()\n- widget_allocation.x = widget.interface.layout.absolute_content_left\n- widget_allocation.y = widget.interface.layout.absolute_content_top\n+ widget_allocation.x = widget.interface.layout.absolute_content_left + allocation.x\n+ widget_allocation.y = widget.interface.layout.absolute_content_top + allocation.y\n widget_allocation.width = widget.interface.layout.content_width\n widget_allocation.height = widget.interface.layout.content_height\n", "issue": "On Linux, window contents are rendered behind a menubar\nBut only if the menubar wasn't defined in the application code. Here's a slightly modified [example code](https://toga.readthedocs.io/en/latest/tutorial/tutorial-0.html) I'm running:\r\n\r\n```py\r\nimport toga\r\n\r\ndef build(app):\r\n box = toga.Box()\r\n button = toga.Button('Hello world', on_press=lambda _: print(\"hello\"))\r\n box.add(button)\r\n return box\r\n\r\nif __name__ == '__main__':\r\n app = toga.App('First App', 'org.pybee.sample', startup=build)\r\n app.main_loop()\r\n```\r\n\r\nHere's how it looks like to me:\r\n\r\n\r\n\r\nNote that the button is behind the menubar (although no menubar was requested in the code). When I click on the menubar (including the Application or Help menu items), the button is being pressed instead.\r\n\r\nI've tried this with a few GTK themes, including Arc-OSX-dark, Mint-X, Redmond and Adwaita, and in every case it behaves this way.\r\n\n", "before_files": [{"content": "from ..libs import Gtk, Gdk\nfrom .base import Widget\n\n\nclass TogaBox(Gtk.Fixed):\n def __init__(self, impl):\n super().__init__()\n self._impl = impl\n self.interface = self._impl.interface\n\n def do_get_preferred_width(self):\n # Calculate the minimum and natural width of the container.\n # print(\"GET PREFERRED WIDTH\", self._impl.native)\n width = self._impl.interface.layout.width\n min_width = self._impl.min_width\n if min_width is None:\n min_width = 0\n elif min_width > width:\n width = min_width\n\n # print(min_width, width)\n return min_width, width\n\n def do_get_preferred_height(self):\n # Calculate the minimum and natural height of the container.\n # height = self._impl.layout.height\n # print(\"GET PREFERRED HEIGHT\", self._impl.native)\n height = self._impl.interface.layout.height\n min_height = self._impl.min_height\n if min_height is None:\n min_height = 0\n elif min_height > height:\n height = min_height\n # print(min_height, height)\n return min_height, height\n\n def do_size_allocate(self, allocation):\n # print(self._impl, \"Container layout\", allocation.width, 'x', allocation.height, ' @ ', allocation.x, 'x', allocation.y)\n if self._impl.viewport is not None:\n self.set_allocation(allocation)\n self.interface.refresh()\n\n # WARNING! This list of children is *not* the same\n # as the list provided by the interface!\n # For GTK's layout purposes, all widgets in the tree\n # are children of the *container* - that is, the impl\n # object of the root object in the tree of widgets.\n for widget in self.get_children():\n if not widget.get_visible():\n # print(\"CHILD NOT VISIBLE\", widget.interface)\n pass\n else:\n # print(\"update \", widget.interface, widget.interface.layout)\n widget.interface._impl.rehint()\n widget_allocation = Gdk.Rectangle()\n widget_allocation.x = widget.interface.layout.absolute_content_left\n widget_allocation.y = widget.interface.layout.absolute_content_top\n widget_allocation.width = widget.interface.layout.content_width\n widget_allocation.height = widget.interface.layout.content_height\n\n widget.size_allocate(widget_allocation)\n\n\nclass Box(Widget):\n def create(self):\n self.min_width = None\n self.min_height = None\n self.native = TogaBox(self)\n", "path": "src/gtk/toga_gtk/widgets/box.py"}]} | 1,506 | 177 |
gh_patches_debug_6488 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1048 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Send an email on feeback record creation
- rel https://github.com/makinacorpus/Geotrek-rando/issues/132
- Send to managers
- Import test send email from Geotrek-rando
</issue>
<code>
[start of geotrek/feedback/models.py]
1 import logging
2
3 from django.conf import settings
4 from django.db import models
5 from django.contrib.gis.db import models as gis_models
6 from django.contrib.contenttypes.generic import GenericForeignKey
7 from django.contrib.contenttypes.models import ContentType
8 from django.utils.translation import ugettext_lazy as _
9 from django.db.models.signals import post_save
10 from django.dispatch import receiver
11 from mapentity.models import MapEntityMixin
12
13 from geotrek.common.models import TimeStampedModel
14
15 from .helpers import send_report_managers
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class Report(MapEntityMixin, TimeStampedModel):
22 """ User reports, mainly submitted via *Geotrek-rando*.
23 """
24 name = models.CharField(verbose_name=_(u"Name"), max_length=256)
25 email = models.EmailField(verbose_name=_(u"Email"))
26 comment = models.TextField(blank=True,
27 default="",
28 verbose_name=_(u"Comment"))
29 category = models.ForeignKey('ReportCategory',
30 null=True,
31 blank=True,
32 default=None,
33 verbose_name=_(u"Category"))
34 geom = gis_models.PointField(null=True,
35 blank=True,
36 default=None,
37 verbose_name=_(u"Location"),
38 srid=settings.SRID)
39 context_content_type = models.ForeignKey(ContentType,
40 null=True,
41 blank=True,
42 editable=False)
43 context_object_id = models.PositiveIntegerField(null=True,
44 blank=True,
45 editable=False)
46 context_object = GenericForeignKey('context_content_type',
47 'context_object_id')
48
49 objects = gis_models.GeoManager()
50
51 class Meta:
52 db_table = 'f_t_signalement'
53 verbose_name = _(u"Report")
54 verbose_name_plural = _(u"Reports")
55 ordering = ['-date_insert']
56
57 def __unicode__(self):
58 return self.name
59
60 @property
61 def name_display(self):
62 return u'<a data-pk="%s" href="%s" title="%s" >%s</a>' % (self.pk,
63 self.get_detail_url(),
64 self,
65 self)
66
67
68 @receiver(post_save, sender=Report, dispatch_uid="on_report_created")
69 def on_report_created(sender, instance, created, **kwargs):
70 """ Send an email to managers when a report is created.
71 """
72 try:
73 send_report_managers(instance)
74 except Exception as e:
75 logger.error('Email could not be sent to managers.')
76 logger.exception(e) # This sends an email to admins :)
77
78
79 class ReportCategory(models.Model):
80 category = models.CharField(verbose_name=_(u"Category"),
81 max_length=128)
82
83 class Meta:
84 db_table = 'f_b_categorie'
85 verbose_name = _(u"Category")
86 verbose_name_plural = _(u"Categories")
87
88 def __unicode__(self):
89 return self.category
90
[end of geotrek/feedback/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/feedback/models.py b/geotrek/feedback/models.py
--- a/geotrek/feedback/models.py
+++ b/geotrek/feedback/models.py
@@ -66,9 +66,11 @@
@receiver(post_save, sender=Report, dispatch_uid="on_report_created")
-def on_report_created(sender, instance, created, **kwargs):
+def on_report_saved(sender, instance, created, **kwargs):
""" Send an email to managers when a report is created.
"""
+ if not created:
+ return
try:
send_report_managers(instance)
except Exception as e:
| {"golden_diff": "diff --git a/geotrek/feedback/models.py b/geotrek/feedback/models.py\n--- a/geotrek/feedback/models.py\n+++ b/geotrek/feedback/models.py\n@@ -66,9 +66,11 @@\n \n \n @receiver(post_save, sender=Report, dispatch_uid=\"on_report_created\")\n-def on_report_created(sender, instance, created, **kwargs):\n+def on_report_saved(sender, instance, created, **kwargs):\n \"\"\" Send an email to managers when a report is created.\n \"\"\"\n+ if not created:\n+ return\n try:\n send_report_managers(instance)\n except Exception as e:\n", "issue": "Send an email on feeback record creation\n- rel https://github.com/makinacorpus/Geotrek-rando/issues/132\n- Send to managers\n- Import test send email from Geotrek-rando\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.contrib.gis.db import models as gis_models\nfrom django.contrib.contenttypes.generic import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom mapentity.models import MapEntityMixin\n\nfrom geotrek.common.models import TimeStampedModel\n\nfrom .helpers import send_report_managers\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Report(MapEntityMixin, TimeStampedModel):\n \"\"\" User reports, mainly submitted via *Geotrek-rando*.\n \"\"\"\n name = models.CharField(verbose_name=_(u\"Name\"), max_length=256)\n email = models.EmailField(verbose_name=_(u\"Email\"))\n comment = models.TextField(blank=True,\n default=\"\",\n verbose_name=_(u\"Comment\"))\n category = models.ForeignKey('ReportCategory',\n null=True,\n blank=True,\n default=None,\n verbose_name=_(u\"Category\"))\n geom = gis_models.PointField(null=True,\n blank=True,\n default=None,\n verbose_name=_(u\"Location\"),\n srid=settings.SRID)\n context_content_type = models.ForeignKey(ContentType,\n null=True,\n blank=True,\n editable=False)\n context_object_id = models.PositiveIntegerField(null=True,\n blank=True,\n editable=False)\n context_object = GenericForeignKey('context_content_type',\n 'context_object_id')\n\n objects = gis_models.GeoManager()\n\n class Meta:\n db_table = 'f_t_signalement'\n verbose_name = _(u\"Report\")\n verbose_name_plural = _(u\"Reports\")\n ordering = ['-date_insert']\n\n def __unicode__(self):\n return self.name\n\n @property\n def name_display(self):\n return u'<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk,\n self.get_detail_url(),\n self,\n self)\n\n\n@receiver(post_save, sender=Report, dispatch_uid=\"on_report_created\")\ndef on_report_created(sender, instance, created, **kwargs):\n \"\"\" Send an email to managers when a report is created.\n \"\"\"\n try:\n send_report_managers(instance)\n except Exception as e:\n logger.error('Email could not be sent to managers.')\n logger.exception(e) # This sends an email to admins :)\n\n\nclass ReportCategory(models.Model):\n category = models.CharField(verbose_name=_(u\"Category\"),\n max_length=128)\n\n class Meta:\n db_table = 'f_b_categorie'\n verbose_name = _(u\"Category\")\n verbose_name_plural = _(u\"Categories\")\n\n def __unicode__(self):\n return self.category\n", "path": "geotrek/feedback/models.py"}]} | 1,363 | 139 |
gh_patches_debug_10515 | rasdani/github-patches | git_diff | bokeh__bokeh-4686 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Draw Legend After WebGL glyphs
<img width="906" alt="screen shot 2016-06-24 at 6 50 29 pm" src="https://cloud.githubusercontent.com/assets/433221/16357521/c6f00a1c-3abe-11e6-8835-0e4bb17550d4.png">
</issue>
<code>
[start of examples/webgl/iris_blend.py]
1 """ The iris dataset, drawn twice with semi-transparent markers. This is
2 an interesting use-case to test blending, because several samples itself
3 overlap, and by drawing the set twice with different colors, we realize
4 even more interesting blending. Also note how this makes use of
5 different ways to specify (css) colors. This example is a good reference
6 to test WebGL blending.
7
8 """
9
10 from bokeh.plotting import figure, show, output_file
11 from bokeh.sampledata.iris import flowers
12
13 colormap1 = {'setosa': 'rgb(255, 0, 0)',
14 'versicolor': 'rgb(0, 255, 0)',
15 'virginica': 'rgb(0, 0, 255)'}
16 colors1 = [colormap1[x] for x in flowers['species']]
17
18 colormap2 = {'setosa': '#0f0', 'versicolor': '#0f0', 'virginica': '#f00'}
19 colors2 = [colormap2[x] for x in flowers['species']]
20
21 p = figure(title = "Iris Morphology", webgl=True)
22 p.xaxis.axis_label = 'Petal Length'
23 p.yaxis.axis_label = 'Petal Width'
24
25 p.diamond(flowers["petal_length"], flowers["petal_width"],
26 color=colors1, line_alpha=0.5, fill_alpha=0.2, size=25)
27
28 p.circle(flowers["petal_length"], flowers["petal_width"],
29 color=colors2, line_alpha=0.5, fill_alpha=0.2, size=10)
30
31 output_file("iris_blend.html", title="iris_blend.py example")
32
33 show(p)
34
[end of examples/webgl/iris_blend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/webgl/iris_blend.py b/examples/webgl/iris_blend.py
--- a/examples/webgl/iris_blend.py
+++ b/examples/webgl/iris_blend.py
@@ -23,10 +23,10 @@
p.yaxis.axis_label = 'Petal Width'
p.diamond(flowers["petal_length"], flowers["petal_width"],
- color=colors1, line_alpha=0.5, fill_alpha=0.2, size=25)
+ color=colors1, line_alpha=0.5, fill_alpha=0.2, size=25, legend='diamonds')
p.circle(flowers["petal_length"], flowers["petal_width"],
- color=colors2, line_alpha=0.5, fill_alpha=0.2, size=10)
+ color=colors2, line_alpha=0.5, fill_alpha=0.2, size=10, legend='circles')
output_file("iris_blend.html", title="iris_blend.py example")
| {"golden_diff": "diff --git a/examples/webgl/iris_blend.py b/examples/webgl/iris_blend.py\n--- a/examples/webgl/iris_blend.py\n+++ b/examples/webgl/iris_blend.py\n@@ -23,10 +23,10 @@\n p.yaxis.axis_label = 'Petal Width'\n \n p.diamond(flowers[\"petal_length\"], flowers[\"petal_width\"],\n- color=colors1, line_alpha=0.5, fill_alpha=0.2, size=25)\n+ color=colors1, line_alpha=0.5, fill_alpha=0.2, size=25, legend='diamonds')\n \n p.circle(flowers[\"petal_length\"], flowers[\"petal_width\"],\n- color=colors2, line_alpha=0.5, fill_alpha=0.2, size=10)\n+ color=colors2, line_alpha=0.5, fill_alpha=0.2, size=10, legend='circles')\n \n output_file(\"iris_blend.html\", title=\"iris_blend.py example\")\n", "issue": "Draw Legend After WebGL glyphs\n<img width=\"906\" alt=\"screen shot 2016-06-24 at 6 50 29 pm\" src=\"https://cloud.githubusercontent.com/assets/433221/16357521/c6f00a1c-3abe-11e6-8835-0e4bb17550d4.png\">\n\n", "before_files": [{"content": "\"\"\" The iris dataset, drawn twice with semi-transparent markers. This is\nan interesting use-case to test blending, because several samples itself\noverlap, and by drawing the set twice with different colors, we realize\neven more interesting blending. Also note how this makes use of\ndifferent ways to specify (css) colors. This example is a good reference\nto test WebGL blending.\n\n\"\"\"\n\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.iris import flowers\n\ncolormap1 = {'setosa': 'rgb(255, 0, 0)',\n 'versicolor': 'rgb(0, 255, 0)',\n 'virginica': 'rgb(0, 0, 255)'}\ncolors1 = [colormap1[x] for x in flowers['species']]\n\ncolormap2 = {'setosa': '#0f0', 'versicolor': '#0f0', 'virginica': '#f00'}\ncolors2 = [colormap2[x] for x in flowers['species']]\n\np = figure(title = \"Iris Morphology\", webgl=True)\np.xaxis.axis_label = 'Petal Length'\np.yaxis.axis_label = 'Petal Width'\n\np.diamond(flowers[\"petal_length\"], flowers[\"petal_width\"],\n color=colors1, line_alpha=0.5, fill_alpha=0.2, size=25)\n\np.circle(flowers[\"petal_length\"], flowers[\"petal_width\"],\n color=colors2, line_alpha=0.5, fill_alpha=0.2, size=10)\n\noutput_file(\"iris_blend.html\", title=\"iris_blend.py example\")\n\nshow(p)\n", "path": "examples/webgl/iris_blend.py"}]} | 1,062 | 228 |
gh_patches_debug_15391 | rasdani/github-patches | git_diff | mlflow__mlflow-5627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `MlflowException.invalid_parameter_value`
## Motivation
We frequently construct an `MlflowException` instance with `error_code=INVALID_PARAMETER_VALUE`:
```python
import mlflow.exceptions from MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
raise MlflowException(
"error message",
error_code=INVALID_PARAMETER_VALUE,
)
```
If we had a class method `invalid_parameter_value`:
```python
class MlflowException(...):
@classmethod
def invalid_parameter_value(cls, message, **kwargs):
return cls(message, error_code=INVALID_PARAMETER_VALUE, **kwargs)
```
we could simplify the code above to:
```python
import mlflow.exceptions from MlflowException
raise MlflowException.invalid_parameter_value("error message")
```
which is shorter and has fewer import statements.
## Notes
- We don't need to replace existing `MlflowException("error message",error_code=INVALID_PARAMETER_VALUE)` for now (we may in the future).
</issue>
<code>
[start of mlflow/exceptions.py]
1 import json
2
3 from mlflow.protos.databricks_pb2 import (
4 INTERNAL_ERROR,
5 TEMPORARILY_UNAVAILABLE,
6 ENDPOINT_NOT_FOUND,
7 PERMISSION_DENIED,
8 REQUEST_LIMIT_EXCEEDED,
9 BAD_REQUEST,
10 INVALID_PARAMETER_VALUE,
11 RESOURCE_DOES_NOT_EXIST,
12 INVALID_STATE,
13 RESOURCE_ALREADY_EXISTS,
14 ErrorCode,
15 )
16
17 ERROR_CODE_TO_HTTP_STATUS = {
18 ErrorCode.Name(INTERNAL_ERROR): 500,
19 ErrorCode.Name(INVALID_STATE): 500,
20 ErrorCode.Name(TEMPORARILY_UNAVAILABLE): 503,
21 ErrorCode.Name(REQUEST_LIMIT_EXCEEDED): 429,
22 ErrorCode.Name(ENDPOINT_NOT_FOUND): 404,
23 ErrorCode.Name(RESOURCE_DOES_NOT_EXIST): 404,
24 ErrorCode.Name(PERMISSION_DENIED): 403,
25 ErrorCode.Name(BAD_REQUEST): 400,
26 ErrorCode.Name(RESOURCE_ALREADY_EXISTS): 400,
27 ErrorCode.Name(INVALID_PARAMETER_VALUE): 400,
28 }
29
30
31 class MlflowException(Exception):
32 """
33 Generic exception thrown to surface failure information about external-facing operations.
34 The error message associated with this exception may be exposed to clients in HTTP responses
35 for debugging purposes. If the error text is sensitive, raise a generic `Exception` object
36 instead.
37 """
38
39 def __init__(self, message, error_code=INTERNAL_ERROR, **kwargs):
40 """
41 :param message: The message describing the error that occured. This will be included in the
42 exception's serialized JSON representation.
43 :param error_code: An appropriate error code for the error that occured; it will be included
44 in the exception's serialized JSON representation. This should be one of
45 the codes listed in the `mlflow.protos.databricks_pb2` proto.
46 :param kwargs: Additional key-value pairs to include in the serialized JSON representation
47 of the MlflowException.
48 """
49 try:
50 self.error_code = ErrorCode.Name(error_code)
51 except (ValueError, TypeError):
52 self.error_code = ErrorCode.Name(INTERNAL_ERROR)
53 self.message = message
54 self.json_kwargs = kwargs
55 super().__init__(message)
56
57 def serialize_as_json(self):
58 exception_dict = {"error_code": self.error_code, "message": self.message}
59 exception_dict.update(self.json_kwargs)
60 return json.dumps(exception_dict)
61
62 def get_http_status_code(self):
63 return ERROR_CODE_TO_HTTP_STATUS.get(self.error_code, 500)
64
65
66 class RestException(MlflowException):
67 """Exception thrown on non 200-level responses from the REST API"""
68
69 def __init__(self, json):
70 error_code = json.get("error_code", ErrorCode.Name(INTERNAL_ERROR))
71 message = "%s: %s" % (
72 error_code,
73 json["message"] if "message" in json else "Response: " + str(json),
74 )
75 super().__init__(message, error_code=ErrorCode.Value(error_code))
76 self.json = json
77
78
79 class ExecutionException(MlflowException):
80 """Exception thrown when executing a project fails"""
81
82 pass
83
84
85 class MissingConfigException(MlflowException):
86 """Exception thrown when expected configuration file/directory not found"""
87
88 pass
89
[end of mlflow/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlflow/exceptions.py b/mlflow/exceptions.py
--- a/mlflow/exceptions.py
+++ b/mlflow/exceptions.py
@@ -62,6 +62,18 @@
def get_http_status_code(self):
return ERROR_CODE_TO_HTTP_STATUS.get(self.error_code, 500)
+ @classmethod
+ def invalid_parameter_value(cls, message, **kwargs):
+ """
+ Constructs an `MlflowException` object with the `INVALID_PARAMETER_VALUE` error code.
+
+ :param message: The message describing the error that occured. This will be included in the
+ exception's serialized JSON representation.
+ :param kwargs: Additional key-value pairs to include in the serialized JSON representation
+ of the MlflowException.
+ """
+ return cls(message, error_code=INVALID_PARAMETER_VALUE, **kwargs)
+
class RestException(MlflowException):
"""Exception thrown on non 200-level responses from the REST API"""
| {"golden_diff": "diff --git a/mlflow/exceptions.py b/mlflow/exceptions.py\n--- a/mlflow/exceptions.py\n+++ b/mlflow/exceptions.py\n@@ -62,6 +62,18 @@\n def get_http_status_code(self):\n return ERROR_CODE_TO_HTTP_STATUS.get(self.error_code, 500)\n \n+ @classmethod\n+ def invalid_parameter_value(cls, message, **kwargs):\n+ \"\"\"\n+ Constructs an `MlflowException` object with the `INVALID_PARAMETER_VALUE` error code.\n+\n+ :param message: The message describing the error that occured. This will be included in the\n+ exception's serialized JSON representation.\n+ :param kwargs: Additional key-value pairs to include in the serialized JSON representation\n+ of the MlflowException.\n+ \"\"\"\n+ return cls(message, error_code=INVALID_PARAMETER_VALUE, **kwargs)\n+\n \n class RestException(MlflowException):\n \"\"\"Exception thrown on non 200-level responses from the REST API\"\"\"\n", "issue": "Add `MlflowException.invalid_parameter_value`\n## Motivation\r\n\r\nWe frequently construct an `MlflowException` instance with `error_code=INVALID_PARAMETER_VALUE`:\r\n\r\n\r\n```python\r\nimport mlflow.exceptions from MlflowException\r\nfrom mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE\r\n\r\nraise MlflowException(\r\n \"error message\",\r\n error_code=INVALID_PARAMETER_VALUE,\r\n)\r\n```\r\n\r\nIf we had a class method `invalid_parameter_value`:\r\n\r\n```python\r\nclass MlflowException(...):\r\n @classmethod\r\n def invalid_parameter_value(cls, message, **kwargs):\r\n return cls(message, error_code=INVALID_PARAMETER_VALUE, **kwargs)\r\n```\r\n\r\nwe could simplify the code above to:\r\n\r\n```python\r\nimport mlflow.exceptions from MlflowException\r\n\r\nraise MlflowException.invalid_parameter_value(\"error message\")\r\n```\r\n\r\nwhich is shorter and has fewer import statements.\r\n\r\n## Notes\r\n\r\n- We don't need to replace existing `MlflowException(\"error message\",error_code=INVALID_PARAMETER_VALUE)` for now (we may in the future).\n", "before_files": [{"content": "import json\n\nfrom mlflow.protos.databricks_pb2 import (\n INTERNAL_ERROR,\n TEMPORARILY_UNAVAILABLE,\n ENDPOINT_NOT_FOUND,\n PERMISSION_DENIED,\n REQUEST_LIMIT_EXCEEDED,\n BAD_REQUEST,\n INVALID_PARAMETER_VALUE,\n RESOURCE_DOES_NOT_EXIST,\n INVALID_STATE,\n RESOURCE_ALREADY_EXISTS,\n ErrorCode,\n)\n\nERROR_CODE_TO_HTTP_STATUS = {\n ErrorCode.Name(INTERNAL_ERROR): 500,\n ErrorCode.Name(INVALID_STATE): 500,\n ErrorCode.Name(TEMPORARILY_UNAVAILABLE): 503,\n ErrorCode.Name(REQUEST_LIMIT_EXCEEDED): 429,\n ErrorCode.Name(ENDPOINT_NOT_FOUND): 404,\n ErrorCode.Name(RESOURCE_DOES_NOT_EXIST): 404,\n ErrorCode.Name(PERMISSION_DENIED): 403,\n ErrorCode.Name(BAD_REQUEST): 400,\n ErrorCode.Name(RESOURCE_ALREADY_EXISTS): 400,\n ErrorCode.Name(INVALID_PARAMETER_VALUE): 400,\n}\n\n\nclass MlflowException(Exception):\n \"\"\"\n Generic exception thrown to surface failure information about external-facing operations.\n The error message associated with this exception may be exposed to clients in HTTP responses\n for debugging purposes. If the error text is sensitive, raise a generic `Exception` object\n instead.\n \"\"\"\n\n def __init__(self, message, error_code=INTERNAL_ERROR, **kwargs):\n \"\"\"\n :param message: The message describing the error that occured. This will be included in the\n exception's serialized JSON representation.\n :param error_code: An appropriate error code for the error that occured; it will be included\n in the exception's serialized JSON representation. This should be one of\n the codes listed in the `mlflow.protos.databricks_pb2` proto.\n :param kwargs: Additional key-value pairs to include in the serialized JSON representation\n of the MlflowException.\n \"\"\"\n try:\n self.error_code = ErrorCode.Name(error_code)\n except (ValueError, TypeError):\n self.error_code = ErrorCode.Name(INTERNAL_ERROR)\n self.message = message\n self.json_kwargs = kwargs\n super().__init__(message)\n\n def serialize_as_json(self):\n exception_dict = {\"error_code\": self.error_code, \"message\": self.message}\n exception_dict.update(self.json_kwargs)\n return json.dumps(exception_dict)\n\n def get_http_status_code(self):\n return ERROR_CODE_TO_HTTP_STATUS.get(self.error_code, 500)\n\n\nclass RestException(MlflowException):\n \"\"\"Exception thrown on non 200-level responses from the REST API\"\"\"\n\n def __init__(self, json):\n error_code = json.get(\"error_code\", ErrorCode.Name(INTERNAL_ERROR))\n message = \"%s: %s\" % (\n error_code,\n json[\"message\"] if \"message\" in json else \"Response: \" + str(json),\n )\n super().__init__(message, error_code=ErrorCode.Value(error_code))\n self.json = json\n\n\nclass ExecutionException(MlflowException):\n \"\"\"Exception thrown when executing a project fails\"\"\"\n\n pass\n\n\nclass MissingConfigException(MlflowException):\n \"\"\"Exception thrown when expected configuration file/directory not found\"\"\"\n\n pass\n", "path": "mlflow/exceptions.py"}]} | 1,617 | 217 |
gh_patches_debug_11292 | rasdani/github-patches | git_diff | optuna__optuna-889 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use OrderedDict to keep the order of intermediate values.
This PR addresses #886.
I think it has two issues.
1. `dict` does not keep the order of keys
The current implementation of `FrozenTrial.intermediate_values` employs `dict`. The order of keys is not ensured if the users use Python 3.6 or older. This PR uses `OrderedDict` for `FrozenTrial.intermediate_values`.
2. RDBs do not ensure the order of results if a query has no `order by` clause.
This PR arranges the intermediate values when they are retrieved from databases.
Please note that the results are not necessarily ordered by primary keys when queries do not contain `order by`. (c.f., ['When no 'Order by' is specified, what order does a query choose for your record set?'](https://stackoverflow.com/questions/20050341/when-no-order-by-is-specified-what-order-does-a-query-choose-for-your-record))
</issue>
<code>
[start of optuna/visualization/intermediate_values.py]
1 from optuna.logging import get_logger
2 from optuna.structs import TrialState
3 from optuna import type_checking
4 from optuna.visualization.utils import _check_plotly_availability
5 from optuna.visualization.utils import is_available
6
7 if type_checking.TYPE_CHECKING:
8 from optuna.study import Study # NOQA
9
10 if is_available():
11 from optuna.visualization.plotly_imports import go
12
13 logger = get_logger(__name__)
14
15
16 def plot_intermediate_values(study):
17 # type: (Study) -> go.Figure
18 """Plot intermediate values of all trials in a study.
19
20 Example:
21
22 The following code snippet shows how to plot intermediate values.
23
24 .. code::
25
26 import optuna
27
28 def objective(trial):
29 # Intermediate values are supposed to be reported inside the objective function.
30 ...
31
32 study = optuna.create_study()
33 study.optimize(objective, n_trials=100)
34
35 optuna.visualization.plot_intermediate_values(study)
36
37 Args:
38 study:
39 A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate
40 values.
41
42 Returns:
43 A :class:`plotly.graph_objs.Figure` object.
44 """
45
46 _check_plotly_availability()
47 return _get_intermediate_plot(study)
48
49
50 def _get_intermediate_plot(study):
51 # type: (Study) -> go.Figure
52
53 layout = go.Layout(
54 title='Intermediate Values Plot',
55 xaxis={'title': 'Step'},
56 yaxis={'title': 'Intermediate Value'},
57 showlegend=False
58 )
59
60 target_state = [TrialState.PRUNED, TrialState.COMPLETE, TrialState.RUNNING]
61 trials = [trial for trial in study.trials if trial.state in target_state]
62
63 if len(trials) == 0:
64 logger.warning('Study instance does not contain trials.')
65 return go.Figure(data=[], layout=layout)
66
67 traces = []
68 for trial in trials:
69 if trial.intermediate_values:
70 trace = go.Scatter(
71 x=tuple(trial.intermediate_values.keys()),
72 y=tuple(trial.intermediate_values.values()),
73 mode='lines+markers',
74 marker={
75 'maxdisplayed': 10
76 },
77 name='Trial{}'.format(trial.number)
78 )
79 traces.append(trace)
80
81 if not traces:
82 logger.warning(
83 'You need to set up the pruning feature to utilize `plot_intermediate_values()`')
84 return go.Figure(data=[], layout=layout)
85
86 figure = go.Figure(data=traces, layout=layout)
87
88 return figure
89
[end of optuna/visualization/intermediate_values.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/visualization/intermediate_values.py b/optuna/visualization/intermediate_values.py
--- a/optuna/visualization/intermediate_values.py
+++ b/optuna/visualization/intermediate_values.py
@@ -67,9 +67,10 @@
traces = []
for trial in trials:
if trial.intermediate_values:
+ sorted_intermediate_values = sorted(trial.intermediate_values.items())
trace = go.Scatter(
- x=tuple(trial.intermediate_values.keys()),
- y=tuple(trial.intermediate_values.values()),
+ x=tuple((x for x, _ in sorted_intermediate_values)),
+ y=tuple((y for _, y in sorted_intermediate_values)),
mode='lines+markers',
marker={
'maxdisplayed': 10
| {"golden_diff": "diff --git a/optuna/visualization/intermediate_values.py b/optuna/visualization/intermediate_values.py\n--- a/optuna/visualization/intermediate_values.py\n+++ b/optuna/visualization/intermediate_values.py\n@@ -67,9 +67,10 @@\n traces = []\n for trial in trials:\n if trial.intermediate_values:\n+ sorted_intermediate_values = sorted(trial.intermediate_values.items())\n trace = go.Scatter(\n- x=tuple(trial.intermediate_values.keys()),\n- y=tuple(trial.intermediate_values.values()),\n+ x=tuple((x for x, _ in sorted_intermediate_values)),\n+ y=tuple((y for _, y in sorted_intermediate_values)),\n mode='lines+markers',\n marker={\n 'maxdisplayed': 10\n", "issue": "Use OrderedDict to keep the order of intermediate values.\nThis PR addresses #886. \r\n\r\nI think it has two issues.\r\n\r\n1. `dict` does not keep the order of keys\r\n\r\nThe current implementation of `FrozenTrial.intermediate_values` employs `dict`. The order of keys is not ensured if the users use Python 3.6 or older. This PR uses `OrderedDict` for `FrozenTrial.intermediate_values`.\r\n\r\n2. RDBs do not ensure the order of results if a query has no `order by` clause.\r\n\r\nThis PR arranges the intermediate values when they are retrieved from databases.\r\nPlease note that the results are not necessarily ordered by primary keys when queries do not contain `order by`. (c.f., ['When no 'Order by' is specified, what order does a query choose for your record set?'](https://stackoverflow.com/questions/20050341/when-no-order-by-is-specified-what-order-does-a-query-choose-for-your-record))\n", "before_files": [{"content": "from optuna.logging import get_logger\nfrom optuna.structs import TrialState\nfrom optuna import type_checking\nfrom optuna.visualization.utils import _check_plotly_availability\nfrom optuna.visualization.utils import is_available\n\nif type_checking.TYPE_CHECKING:\n from optuna.study import Study # NOQA\n\nif is_available():\n from optuna.visualization.plotly_imports import go\n\nlogger = get_logger(__name__)\n\n\ndef plot_intermediate_values(study):\n # type: (Study) -> go.Figure\n \"\"\"Plot intermediate values of all trials in a study.\n\n Example:\n\n The following code snippet shows how to plot intermediate values.\n\n .. code::\n\n import optuna\n\n def objective(trial):\n # Intermediate values are supposed to be reported inside the objective function.\n ...\n\n study = optuna.create_study()\n study.optimize(objective, n_trials=100)\n\n optuna.visualization.plot_intermediate_values(study)\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate\n values.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _check_plotly_availability()\n return _get_intermediate_plot(study)\n\n\ndef _get_intermediate_plot(study):\n # type: (Study) -> go.Figure\n\n layout = go.Layout(\n title='Intermediate Values Plot',\n xaxis={'title': 'Step'},\n yaxis={'title': 'Intermediate Value'},\n showlegend=False\n )\n\n target_state = [TrialState.PRUNED, TrialState.COMPLETE, TrialState.RUNNING]\n trials = [trial for trial in study.trials if trial.state in target_state]\n\n if len(trials) == 0:\n logger.warning('Study instance does not contain trials.')\n return go.Figure(data=[], layout=layout)\n\n traces = []\n for trial in trials:\n if trial.intermediate_values:\n trace = go.Scatter(\n x=tuple(trial.intermediate_values.keys()),\n y=tuple(trial.intermediate_values.values()),\n mode='lines+markers',\n marker={\n 'maxdisplayed': 10\n },\n name='Trial{}'.format(trial.number)\n )\n traces.append(trace)\n\n if not traces:\n logger.warning(\n 'You need to set up the pruning feature to utilize `plot_intermediate_values()`')\n return go.Figure(data=[], layout=layout)\n\n figure = go.Figure(data=traces, layout=layout)\n\n return figure\n", "path": "optuna/visualization/intermediate_values.py"}]} | 1,485 | 178 |
gh_patches_debug_21961 | rasdani/github-patches | git_diff | pantsbuild__pants-6405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The python_requirements macro retains no handle to the requirements file.
This means changes in the requirements file do not result in changes in a live target graph (pantsd).
</issue>
<code>
[start of src/python/pants/backend/python/python_requirements.py]
1 # coding=utf-8
2 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8 from builtins import object, open
9
10
11 class PythonRequirements(object):
12 """Translates a pip requirements file into an equivalent set of python_requirements
13
14 If the ``requirements.txt`` file has lines ``foo>=3.14`` and ``bar>=2.7``,
15 then this is roughly like::
16
17 python_requirement_library(name="foo", requirements=[
18 python_requirement("foo>=3.14"),
19 ])
20 python_requirement_library(name="bar", requirements=[
21 python_requirement("bar>=2.7"),
22 ])
23
24 NB some requirements files can't be unambiguously translated; ie: multiple
25 find links. For these files a ValueError will be raised that points out the issue.
26
27 See the requirements file spec here:
28 https://pip.pypa.io/en/latest/reference/pip_install.html#requirements-file-format
29 """
30
31 def __init__(self, parse_context):
32 self._parse_context = parse_context
33
34 def __call__(self, requirements_relpath='requirements.txt'):
35 """
36 :param string requirements_relpath: The relpath from this BUILD file to the requirements file.
37 Defaults to a `requirements.txt` file sibling to the BUILD file.
38 """
39
40 requirements = []
41 repository = None
42
43 requirements_path = os.path.join(self._parse_context.rel_path, requirements_relpath)
44 with open(requirements_path, 'r') as fp:
45 for line in fp:
46 line = line.strip()
47 if line and not line.startswith('#'):
48 if not line.startswith('-'):
49 requirements.append(line)
50 else:
51 # handle flags we know about
52 flag_value = line.split(' ', 1)
53 if len(flag_value) == 2:
54 flag = flag_value[0].strip()
55 value = flag_value[1].strip()
56 if flag in ('-f', '--find-links'):
57 if repository is not None:
58 raise ValueError('Only 1 --find-links url is supported per requirements file')
59 repository = value
60
61 for requirement in requirements:
62 req = self._parse_context.create_object('python_requirement', requirement,
63 repository=repository)
64 self._parse_context.create_object('python_requirement_library',
65 name=req.project_name,
66 requirements=[req])
67
[end of src/python/pants/backend/python/python_requirements.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/python/python_requirements.py b/src/python/pants/backend/python/python_requirements.py
--- a/src/python/pants/backend/python/python_requirements.py
+++ b/src/python/pants/backend/python/python_requirements.py
@@ -58,9 +58,16 @@
raise ValueError('Only 1 --find-links url is supported per requirements file')
repository = value
+ requirements_file_target_name = requirements_relpath
+ self._parse_context.create_object('files',
+ name=requirements_file_target_name,
+ sources=[requirements_relpath])
+ requirements_dep = ':{}'.format(requirements_file_target_name)
+
for requirement in requirements:
req = self._parse_context.create_object('python_requirement', requirement,
repository=repository)
self._parse_context.create_object('python_requirement_library',
name=req.project_name,
- requirements=[req])
+ requirements=[req],
+ dependencies=[requirements_dep])
| {"golden_diff": "diff --git a/src/python/pants/backend/python/python_requirements.py b/src/python/pants/backend/python/python_requirements.py\n--- a/src/python/pants/backend/python/python_requirements.py\n+++ b/src/python/pants/backend/python/python_requirements.py\n@@ -58,9 +58,16 @@\n raise ValueError('Only 1 --find-links url is supported per requirements file')\n repository = value\n \n+ requirements_file_target_name = requirements_relpath\n+ self._parse_context.create_object('files',\n+ name=requirements_file_target_name,\n+ sources=[requirements_relpath])\n+ requirements_dep = ':{}'.format(requirements_file_target_name)\n+\n for requirement in requirements:\n req = self._parse_context.create_object('python_requirement', requirement,\n repository=repository)\n self._parse_context.create_object('python_requirement_library',\n name=req.project_name,\n- requirements=[req])\n+ requirements=[req],\n+ dependencies=[requirements_dep])\n", "issue": "The python_requirements macro retains no handle to the requirements file.\nThis means changes in the requirements file do not result in changes in a live target graph (pantsd).\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object, open\n\n\nclass PythonRequirements(object):\n \"\"\"Translates a pip requirements file into an equivalent set of python_requirements\n\n If the ``requirements.txt`` file has lines ``foo>=3.14`` and ``bar>=2.7``,\n then this is roughly like::\n\n python_requirement_library(name=\"foo\", requirements=[\n python_requirement(\"foo>=3.14\"),\n ])\n python_requirement_library(name=\"bar\", requirements=[\n python_requirement(\"bar>=2.7\"),\n ])\n\n NB some requirements files can't be unambiguously translated; ie: multiple\n find links. For these files a ValueError will be raised that points out the issue.\n\n See the requirements file spec here:\n https://pip.pypa.io/en/latest/reference/pip_install.html#requirements-file-format\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, requirements_relpath='requirements.txt'):\n \"\"\"\n :param string requirements_relpath: The relpath from this BUILD file to the requirements file.\n Defaults to a `requirements.txt` file sibling to the BUILD file.\n \"\"\"\n\n requirements = []\n repository = None\n\n requirements_path = os.path.join(self._parse_context.rel_path, requirements_relpath)\n with open(requirements_path, 'r') as fp:\n for line in fp:\n line = line.strip()\n if line and not line.startswith('#'):\n if not line.startswith('-'):\n requirements.append(line)\n else:\n # handle flags we know about\n flag_value = line.split(' ', 1)\n if len(flag_value) == 2:\n flag = flag_value[0].strip()\n value = flag_value[1].strip()\n if flag in ('-f', '--find-links'):\n if repository is not None:\n raise ValueError('Only 1 --find-links url is supported per requirements file')\n repository = value\n\n for requirement in requirements:\n req = self._parse_context.create_object('python_requirement', requirement,\n repository=repository)\n self._parse_context.create_object('python_requirement_library',\n name=req.project_name,\n requirements=[req])\n", "path": "src/python/pants/backend/python/python_requirements.py"}]} | 1,239 | 203 |
gh_patches_debug_780 | rasdani/github-patches | git_diff | graspologic-org__graspologic-428 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update requirements to scipy>=1.4
Scipy 1.4
- has much faster linear assignment problem, making FAQ way faster
- has MGC, which we eventually want for new nonpar, signal subgraph
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3 from setuptools import setup, find_packages
4 from sys import platform
5
6 PACKAGE_NAME = "graspy"
7 DESCRIPTION = "A set of python modules for graph statistics"
8 with open("README.md", "r") as f:
9 LONG_DESCRIPTION = f.read()
10 AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",)
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/neurodata/graspy"
13 MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5
14 REQUIRED_PACKAGES = [
15 "networkx>=2.1",
16 "numpy>=1.8.1",
17 "scikit-learn>=0.19.1",
18 "scipy>=1.1.0",
19 "seaborn>=0.9.0",
20 "matplotlib>=3.0.0",
21 "hyppo>=0.1.3",
22 ]
23
24
25 # Find GraSPy version.
26 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
27 for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")):
28 if line.startswith("__version__ = "):
29 VERSION = line.strip().split()[2][1:-1]
30
31
32 def check_python_version():
33 """Exit when the Python version is too low."""
34 if sys.version_info < MINIMUM_PYTHON_VERSION:
35 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
36
37
38 check_python_version()
39
40 setup(
41 name=PACKAGE_NAME,
42 version=VERSION,
43 description=DESCRIPTION,
44 long_description=LONG_DESCRIPTION,
45 long_description_content_type="text/markdown",
46 author=AUTHOR,
47 author_email=AUTHOR_EMAIL,
48 install_requires=REQUIRED_PACKAGES,
49 url=URL,
50 license="Apache License 2.0",
51 classifiers=[
52 "Development Status :: 3 - Alpha",
53 "Intended Audience :: Science/Research",
54 "Topic :: Scientific/Engineering :: Mathematics",
55 "License :: OSI Approved :: Apache Software License",
56 "Programming Language :: Python :: 3",
57 "Programming Language :: Python :: 3.6",
58 "Programming Language :: Python :: 3.7",
59 ],
60 packages=find_packages(),
61 include_package_data=True,
62 )
63
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"networkx>=2.1",
"numpy>=1.8.1",
"scikit-learn>=0.19.1",
- "scipy>=1.1.0",
+ "scipy>=1.4.0",
"seaborn>=0.9.0",
"matplotlib>=3.0.0",
"hyppo>=0.1.3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n- \"scipy>=1.1.0\",\n+ \"scipy>=1.4.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.3\",\n", "issue": "update requirements to scipy>=1.4\nScipy 1.4\r\n- has much faster linear assignment problem, making FAQ way faster\r\n- has MGC, which we eventually want for new nonpar, signal subgraph\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.3\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 1,193 | 123 |
gh_patches_debug_1137 | rasdani/github-patches | git_diff | hylang__hy-1201 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix setup.py
at least hy.extra is missing from package data
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 # DEALINGS IN THE SOFTWARE.
21
22 import os
23 import re
24 import sys
25 import subprocess
26
27 from setuptools import find_packages, setup
28
29 os.chdir(os.path.split(os.path.abspath(__file__))[0])
30
31 PKG = "hy"
32 VERSIONFILE = os.path.join(PKG, "version.py")
33 try:
34 __version__ = (subprocess.check_output
35 (["git", "describe", "--tags", "--dirty"])
36 .decode('ASCII').strip()
37 .replace('-', '+', 1).replace('-', '.'))
38 with open(VERSIONFILE, "wt") as o:
39 o.write("__version__ = {!r}\n".format(__version__))
40 except subprocess.CalledProcessError:
41 __version__ = "unknown"
42
43 long_description = """Hy is a Python <--> Lisp layer. It helps
44 make things work nicer, and lets Python and the Hy lisp variant play
45 nice together. """
46
47 install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
48 if sys.version_info[:2] < (2, 7):
49 install_requires.append('argparse>=1.2.1')
50 install_requires.append('importlib>=1.0.2')
51 if os.name == 'nt':
52 install_requires.append('pyreadline>=2.1')
53
54 ver = sys.version_info[0]
55
56 setup(
57 name=PKG,
58 version=__version__,
59 install_requires=install_requires,
60 entry_points={
61 'console_scripts': [
62 'hy = hy.cmdline:hy_main',
63 'hy%d = hy.cmdline:hy_main' % ver,
64 'hyc = hy.cmdline:hyc_main',
65 'hyc%d = hy.cmdline:hyc_main' % ver,
66 'hy2py = hy.cmdline:hy2py_main',
67 'hy2py%d = hy.cmdline:hy2py_main' % ver,
68 ]
69 },
70 packages=find_packages(exclude=['tests*']),
71 package_data={
72 'hy.contrib': ['*.hy'],
73 'hy.core': ['*.hy'],
74 },
75 author="Paul Tagliamonte",
76 author_email="[email protected]",
77 long_description=long_description,
78 description='Lisp and Python love each other.',
79 license="Expat",
80 url="http://hylang.org/",
81 platforms=['any'],
82 classifiers=[
83 "Development Status :: 4 - Beta",
84 "Intended Audience :: Developers",
85 "License :: DFSG approved",
86 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
87 "Operating System :: OS Independent",
88 "Programming Language :: Lisp",
89 "Programming Language :: Python",
90 "Programming Language :: Python :: 2",
91 "Programming Language :: Python :: 2.6",
92 "Programming Language :: Python :: 2.7",
93 "Programming Language :: Python :: 3",
94 "Programming Language :: Python :: 3.3",
95 "Programming Language :: Python :: 3.4",
96 "Programming Language :: Python :: 3.5",
97 "Topic :: Software Development :: Code Generators",
98 "Topic :: Software Development :: Compilers",
99 "Topic :: Software Development :: Libraries",
100 ]
101 )
102
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -71,6 +71,7 @@
package_data={
'hy.contrib': ['*.hy'],
'hy.core': ['*.hy'],
+ 'hy.extra': ['*.hy'],
},
author="Paul Tagliamonte",
author_email="[email protected]",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,6 +71,7 @@\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n+ 'hy.extra': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n", "issue": "fix setup.py\nat least hy.extra is missing from package data\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\ntry:\n __version__ = (subprocess.check_output\n ([\"git\", \"describe\", \"--tags\", \"--dirty\"])\n .decode('ASCII').strip()\n .replace('-', '+', 1).replace('-', '.'))\n with open(VERSIONFILE, \"wt\") as o:\n o.write(\"__version__ = {!r}\\n\".format(__version__))\nexcept subprocess.CalledProcessError:\n __version__ = \"unknown\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline>=2.1')\n\nver = sys.version_info[0]\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]} | 1,683 | 86 |
gh_patches_debug_1145 | rasdani/github-patches | git_diff | cocotb__cocotb-1298 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change setup.py to list the version as 1.x-dev for versions installed from github
As suggested by @themperek, it would be neat if cocotb behaved like this:
```
> pip install git+https://github.com/cocotb/cocotb
> python -c "import cocotb; print(cocotb.__version__)"
1.4.0-dev
```
</issue>
<code>
[start of cocotb/_version.py]
1 # Package versioning solution originally found here:
2 # http://stackoverflow.com/q/458550
3
4 # Store the version here so:
5 # 1) we don't load dependencies by storing it in __init__.py
6 # 2) we can import it in setup.py for the same reason
7 # 3) we can import it into your module
8 __version__ = '1.3.0'
9
[end of cocotb/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cocotb/_version.py b/cocotb/_version.py
--- a/cocotb/_version.py
+++ b/cocotb/_version.py
@@ -5,4 +5,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module
-__version__ = '1.3.0'
+__version__ = '1.4.0.dev0'
| {"golden_diff": "diff --git a/cocotb/_version.py b/cocotb/_version.py\n--- a/cocotb/_version.py\n+++ b/cocotb/_version.py\n@@ -5,4 +5,4 @@\n # 1) we don't load dependencies by storing it in __init__.py\n # 2) we can import it in setup.py for the same reason\n # 3) we can import it into your module\n-__version__ = '1.3.0'\n+__version__ = '1.4.0.dev0'\n", "issue": "Change setup.py to list the version as 1.x-dev for versions installed from github\nAs suggested by @themperek, it would be neat if cocotb behaved like this:\r\n```\r\n> pip install git+https://github.com/cocotb/cocotb\r\n> python -c \"import cocotb; print(cocotb.__version__)\"\r\n1.4.0-dev\r\n```\n", "before_files": [{"content": "# Package versioning solution originally found here:\n# http://stackoverflow.com/q/458550\n\n# Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module\n__version__ = '1.3.0'\n", "path": "cocotb/_version.py"}]} | 721 | 123 |
gh_patches_debug_13157 | rasdani/github-patches | git_diff | doccano__doccano-2201 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include Example number in the Project Comments view
Feature description
---------
On the backend API and in the dataset annotation interface, the Comments are associated with specific Examples. But in the Project Comments view, the Example association is unclear--all the comments are grouped together.
Can the Project Comments view tab be improved to detail Examples, maybe even sort or group by Example?
Thanks!
</issue>
<code>
[start of backend/examples/views/comment.py]
1 from django_filters.rest_framework import DjangoFilterBackend
2 from rest_framework import filters, generics, status
3 from rest_framework.permissions import IsAuthenticated
4 from rest_framework.response import Response
5
6 from examples.models import Comment
7 from examples.permissions import IsOwnComment
8 from examples.serializers import CommentSerializer
9 from projects.permissions import IsProjectMember
10
11
12 class CommentList(generics.ListCreateAPIView):
13 permission_classes = [IsAuthenticated & IsProjectMember]
14 serializer_class = CommentSerializer
15 filter_backends = (DjangoFilterBackend, filters.SearchFilter)
16 filterset_fields = ["example"]
17 search_fields = ("text",)
18
19 def get_queryset(self):
20 queryset = Comment.objects.filter(example__project_id=self.kwargs["project_id"])
21 return queryset
22
23 def perform_create(self, serializer):
24 serializer.save(example_id=self.request.query_params.get("example"), user=self.request.user)
25
26 def delete(self, request, *args, **kwargs):
27 delete_ids = request.data["ids"]
28 Comment.objects.filter(user=request.user, pk__in=delete_ids).delete()
29 return Response(status=status.HTTP_204_NO_CONTENT)
30
31
32 class CommentDetail(generics.RetrieveUpdateDestroyAPIView):
33 queryset = Comment.objects.all()
34 serializer_class = CommentSerializer
35 lookup_url_kwarg = "comment_id"
36 permission_classes = [IsAuthenticated & IsProjectMember & IsOwnComment]
37
[end of backend/examples/views/comment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/examples/views/comment.py b/backend/examples/views/comment.py
--- a/backend/examples/views/comment.py
+++ b/backend/examples/views/comment.py
@@ -12,9 +12,10 @@
class CommentList(generics.ListCreateAPIView):
permission_classes = [IsAuthenticated & IsProjectMember]
serializer_class = CommentSerializer
- filter_backends = (DjangoFilterBackend, filters.SearchFilter)
+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filterset_fields = ["example"]
search_fields = ("text",)
+ ordering_fields = ("created_at", "example")
def get_queryset(self):
queryset = Comment.objects.filter(example__project_id=self.kwargs["project_id"])
| {"golden_diff": "diff --git a/backend/examples/views/comment.py b/backend/examples/views/comment.py\n--- a/backend/examples/views/comment.py\n+++ b/backend/examples/views/comment.py\n@@ -12,9 +12,10 @@\n class CommentList(generics.ListCreateAPIView):\n permission_classes = [IsAuthenticated & IsProjectMember]\n serializer_class = CommentSerializer\n- filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n filterset_fields = [\"example\"]\n search_fields = (\"text\",)\n+ ordering_fields = (\"created_at\", \"example\")\n \n def get_queryset(self):\n queryset = Comment.objects.filter(example__project_id=self.kwargs[\"project_id\"])\n", "issue": "Include Example number in the Project Comments view\nFeature description\r\n---------\r\nOn the backend API and in the dataset annotation interface, the Comments are associated with specific Examples. But in the Project Comments view, the Example association is unclear--all the comments are grouped together.\r\n\r\nCan the Project Comments view tab be improved to detail Examples, maybe even sort or group by Example?\r\n\r\nThanks!\n", "before_files": [{"content": "from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, generics, status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom examples.models import Comment\nfrom examples.permissions import IsOwnComment\nfrom examples.serializers import CommentSerializer\nfrom projects.permissions import IsProjectMember\n\n\nclass CommentList(generics.ListCreateAPIView):\n permission_classes = [IsAuthenticated & IsProjectMember]\n serializer_class = CommentSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter)\n filterset_fields = [\"example\"]\n search_fields = (\"text\",)\n\n def get_queryset(self):\n queryset = Comment.objects.filter(example__project_id=self.kwargs[\"project_id\"])\n return queryset\n\n def perform_create(self, serializer):\n serializer.save(example_id=self.request.query_params.get(\"example\"), user=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data[\"ids\"]\n Comment.objects.filter(user=request.user, pk__in=delete_ids).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CommentDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Comment.objects.all()\n serializer_class = CommentSerializer\n lookup_url_kwarg = \"comment_id\"\n permission_classes = [IsAuthenticated & IsProjectMember & IsOwnComment]\n", "path": "backend/examples/views/comment.py"}]} | 966 | 163 |
gh_patches_debug_17054 | rasdani/github-patches | git_diff | elastic__elasticsearch-py-206 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
max_retries is ignored when using thrift
connection/thrift.py does not catch socket.error in perform_request which cause max_retries to be ignored in the transport.
One can argue that this socket.error exception should be translated into a TException in the offical thrift-library but I think it's better to have it included here aswell.
</issue>
<code>
[start of elasticsearch/connection/thrift.py]
1 from __future__ import absolute_import
2 from socket import timeout as SocketTimeout
3 import time
4 import logging
5
6 try:
7 from .esthrift import Rest
8 from .esthrift.ttypes import Method, RestRequest
9
10 from thrift.transport import TTransport, TSocket, TSSLSocket
11 from thrift.protocol import TBinaryProtocol
12 from thrift.Thrift import TException
13 THRIFT_AVAILABLE = True
14 except ImportError:
15 THRIFT_AVAILABLE = False
16
17 from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout
18 from .pooling import PoolingConnection
19
20 logger = logging.getLogger('elasticsearch')
21
22 class ThriftConnection(PoolingConnection):
23 """
24 Connection using the `thrift` protocol to communicate with elasticsearch.
25
26 See https://github.com/elasticsearch/elasticsearch-transport-thrift for additional info.
27 """
28 transport_schema = 'thrift'
29
30 def __init__(self, host='localhost', port=9500, framed_transport=False, use_ssl=False, **kwargs):
31 """
32 :arg framed_transport: use `TTransport.TFramedTransport` instead of
33 `TTransport.TBufferedTransport`
34 """
35 if not THRIFT_AVAILABLE:
36 raise ImproperlyConfigured("Thrift is not available.")
37
38 super(ThriftConnection, self).__init__(host=host, port=port, **kwargs)
39 self._framed_transport = framed_transport
40 self._tsocket_class = TSocket.TSocket
41 if use_ssl:
42 self._tsocket_class = TSSLSocket.TSSLSocket
43 self._tsocket_args = (host, port)
44
45 def _make_connection(self):
46 socket = self._tsocket_class(*self._tsocket_args)
47 socket.setTimeout(self.timeout * 1000.0)
48 if self._framed_transport:
49 transport = TTransport.TFramedTransport(socket)
50 else:
51 transport = TTransport.TBufferedTransport(socket)
52
53 protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
54 client = Rest.Client(protocol)
55 client.transport = transport
56 transport.open()
57 return client
58
59 def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
60 request = RestRequest(method=Method._NAMES_TO_VALUES[method.upper()], uri=url,
61 parameters=params, body=body)
62
63 start = time.time()
64 tclient = None
65 try:
66 tclient = self._get_connection()
67 response = tclient.execute(request)
68 duration = time.time() - start
69 except SocketTimeout as e:
70 self.log_request_fail(method, url, body, time.time() - start, exception=e)
71 raise ConnectionTimeout('TIMEOUT', str(e), e)
72 except (TException, SocketTimeout) as e:
73 self.log_request_fail(method, url, body, time.time() - start, exception=e)
74 if tclient:
75 try:
76 # try closing transport socket
77 tclient.transport.close()
78 except Exception as e:
79 logger.warning(
80 'Exception %s occured when closing a failed thrift connection.',
81 e, exc_info=True
82 )
83 raise ConnectionError('N/A', str(e), e)
84
85 self._release_connection(tclient)
86
87 if not (200 <= response.status < 300) and response.status not in ignore:
88 self.log_request_fail(method, url, body, duration, response.status)
89 self._raise_error(response.status, response.body)
90
91 self.log_request_success(method, url, url, body, response.status,
92 response.body, duration)
93
94 headers = {}
95 if response.headers:
96 headers = dict((k.lower(), v) for k, v in response.headers.items())
97 return response.status, headers, response.body or ''
98
99
[end of elasticsearch/connection/thrift.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticsearch/connection/thrift.py b/elasticsearch/connection/thrift.py
--- a/elasticsearch/connection/thrift.py
+++ b/elasticsearch/connection/thrift.py
@@ -1,5 +1,6 @@
from __future__ import absolute_import
from socket import timeout as SocketTimeout
+from socket import error as SocketError
import time
import logging
@@ -69,7 +70,7 @@
except SocketTimeout as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionTimeout('TIMEOUT', str(e), e)
- except (TException, SocketTimeout) as e:
+ except (TException, SocketError) as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
if tclient:
try:
| {"golden_diff": "diff --git a/elasticsearch/connection/thrift.py b/elasticsearch/connection/thrift.py\n--- a/elasticsearch/connection/thrift.py\n+++ b/elasticsearch/connection/thrift.py\n@@ -1,5 +1,6 @@\n from __future__ import absolute_import\n from socket import timeout as SocketTimeout\n+from socket import error as SocketError\n import time\n import logging\n \n@@ -69,7 +70,7 @@\n except SocketTimeout as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise ConnectionTimeout('TIMEOUT', str(e), e)\n- except (TException, SocketTimeout) as e:\n+ except (TException, SocketError) as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n if tclient:\n try:\n", "issue": "max_retries is ignored when using thrift\nconnection/thrift.py does not catch socket.error in perform_request which cause max_retries to be ignored in the transport.\n\nOne can argue that this socket.error exception should be translated into a TException in the offical thrift-library but I think it's better to have it included here aswell.\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom socket import timeout as SocketTimeout\nimport time\nimport logging\n\ntry:\n from .esthrift import Rest\n from .esthrift.ttypes import Method, RestRequest\n\n from thrift.transport import TTransport, TSocket, TSSLSocket\n from thrift.protocol import TBinaryProtocol\n from thrift.Thrift import TException\n THRIFT_AVAILABLE = True\nexcept ImportError:\n THRIFT_AVAILABLE = False\n\nfrom ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout\nfrom .pooling import PoolingConnection\n\nlogger = logging.getLogger('elasticsearch')\n\nclass ThriftConnection(PoolingConnection):\n \"\"\"\n Connection using the `thrift` protocol to communicate with elasticsearch.\n\n See https://github.com/elasticsearch/elasticsearch-transport-thrift for additional info.\n \"\"\"\n transport_schema = 'thrift'\n\n def __init__(self, host='localhost', port=9500, framed_transport=False, use_ssl=False, **kwargs):\n \"\"\"\n :arg framed_transport: use `TTransport.TFramedTransport` instead of\n `TTransport.TBufferedTransport`\n \"\"\"\n if not THRIFT_AVAILABLE:\n raise ImproperlyConfigured(\"Thrift is not available.\")\n\n super(ThriftConnection, self).__init__(host=host, port=port, **kwargs)\n self._framed_transport = framed_transport\n self._tsocket_class = TSocket.TSocket\n if use_ssl:\n self._tsocket_class = TSSLSocket.TSSLSocket \n self._tsocket_args = (host, port)\n\n def _make_connection(self):\n socket = self._tsocket_class(*self._tsocket_args)\n socket.setTimeout(self.timeout * 1000.0)\n if self._framed_transport:\n transport = TTransport.TFramedTransport(socket)\n else:\n transport = TTransport.TBufferedTransport(socket)\n\n protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)\n client = Rest.Client(protocol)\n client.transport = transport\n transport.open()\n return client\n\n def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):\n request = RestRequest(method=Method._NAMES_TO_VALUES[method.upper()], uri=url,\n parameters=params, body=body)\n\n start = time.time()\n tclient = None\n try:\n tclient = self._get_connection()\n response = tclient.execute(request)\n duration = time.time() - start\n except SocketTimeout as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n raise ConnectionTimeout('TIMEOUT', str(e), e)\n except (TException, SocketTimeout) as e:\n self.log_request_fail(method, url, body, time.time() - start, exception=e)\n if tclient:\n try:\n # try closing transport socket\n tclient.transport.close()\n except Exception as e:\n logger.warning(\n 'Exception %s occured when closing a failed thrift connection.',\n e, exc_info=True\n )\n raise ConnectionError('N/A', str(e), e)\n\n self._release_connection(tclient)\n\n if not (200 <= response.status < 300) and response.status not in ignore:\n self.log_request_fail(method, url, body, duration, response.status)\n self._raise_error(response.status, response.body)\n\n self.log_request_success(method, url, url, body, response.status,\n response.body, duration)\n\n headers = {}\n if response.headers:\n headers = dict((k.lower(), v) for k, v in response.headers.items())\n return response.status, headers, response.body or ''\n\n", "path": "elasticsearch/connection/thrift.py"}]} | 1,609 | 184 |
gh_patches_debug_35486 | rasdani/github-patches | git_diff | archlinux__archinstall-1021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
invalid package names png++
Hi,
I try to install some package with the "package" list in the config file, but if i use the package name png++ (i put the link under the message) it return as invalid package names.
https://archlinux.org/packages/community/any/png++/
invalid package names png++
Hi,
I try to install some package with the "package" list in the config file, but if i use the package name png++ (i put the link under the message) it return as invalid package names.
https://archlinux.org/packages/community/any/png++/
</issue>
<code>
[start of archinstall/lib/packages/packages.py]
1 import json
2 import ssl
3 import urllib.request
4 from typing import Dict, Any, Tuple, List
5
6 from ..exceptions import PackageError, SysCallError
7 from ..models.dataclasses import PackageSearch, PackageSearchResult, LocalPackage
8 from ..pacman import run_pacman
9
10 BASE_URL_PKG_SEARCH = 'https://archlinux.org/packages/search/json/?name={package}'
11 # BASE_URL_PKG_CONTENT = 'https://archlinux.org/packages/search/json/'
12 BASE_GROUP_URL = 'https://archlinux.org/groups/search/json/?name={group}'
13
14
15 def group_search(name :str) -> List[PackageSearchResult]:
16 # TODO UPSTREAM: Implement /json/ for the groups search
17 ssl_context = ssl.create_default_context()
18 ssl_context.check_hostname = False
19 ssl_context.verify_mode = ssl.CERT_NONE
20 try:
21 response = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context)
22 except urllib.error.HTTPError as err:
23 if err.code == 404:
24 return []
25 else:
26 raise err
27
28 # Just to be sure some code didn't slip through the exception
29 data = response.read().decode('UTF-8')
30
31 return [PackageSearchResult(**package) for package in json.loads(data)['results']]
32
33
34 def package_search(package :str) -> PackageSearch:
35 """
36 Finds a specific package via the package database.
37 It makes a simple web-request, which might be a bit slow.
38 """
39 # TODO UPSTREAM: Implement bulk search, either support name=X&name=Y or split on space (%20 or ' ')
40 # TODO: utilize pacman cache first, upstream second.
41 ssl_context = ssl.create_default_context()
42 ssl_context.check_hostname = False
43 ssl_context.verify_mode = ssl.CERT_NONE
44 response = urllib.request.urlopen(BASE_URL_PKG_SEARCH.format(package=package), context=ssl_context)
45
46 if response.code != 200:
47 raise PackageError(f"Could not locate package: [{response.code}] {response}")
48
49 data = response.read().decode('UTF-8')
50
51 return PackageSearch(**json.loads(data))
52
53
54 def find_package(package :str) -> List[PackageSearchResult]:
55 data = package_search(package)
56 results = []
57
58 for result in data.results:
59 if result.pkgname == package:
60 results.append(result)
61
62 # If we didn't find the package in the search results,
63 # odds are it's a group package
64 if not results:
65 # Check if the package is actually a group
66 for result in group_search(package):
67 results.append(result)
68
69 return results
70
71
72 def find_packages(*names :str) -> Dict[str, Any]:
73 """
74 This function returns the search results for many packages.
75 The function itself is rather slow, so consider not sending to
76 many packages to the search query.
77 """
78 result = {}
79 for package in names:
80 for found_package in find_package(package):
81 result[package] = found_package
82
83 return result
84
85
86 def validate_package_list(packages :list) -> Tuple[list, list]:
87 """
88 Validates a list of given packages.
89 return: Tuple of lists containing valid packavges in the first and invalid
90 packages in the second entry
91 """
92 valid_packages = {package for package in packages if find_package(package)}
93 invalid_packages = set(packages) - valid_packages
94
95 return list(valid_packages), list(invalid_packages)
96
97
98 def installed_package(package :str) -> LocalPackage:
99 package_info = {}
100 try:
101 for line in run_pacman(f"-Q --info {package}"):
102 if b':' in line:
103 key, value = line.decode().split(':', 1)
104 package_info[key.strip().lower().replace(' ', '_')] = value.strip()
105 except SysCallError:
106 pass
107
108 return LocalPackage(**package_info)
109
[end of archinstall/lib/packages/packages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/packages/packages.py b/archinstall/lib/packages/packages.py
--- a/archinstall/lib/packages/packages.py
+++ b/archinstall/lib/packages/packages.py
@@ -1,25 +1,35 @@
import json
import ssl
-import urllib.request
from typing import Dict, Any, Tuple, List
+from urllib.error import HTTPError
+from urllib.parse import urlencode
+from urllib.request import urlopen
from ..exceptions import PackageError, SysCallError
from ..models.dataclasses import PackageSearch, PackageSearchResult, LocalPackage
from ..pacman import run_pacman
-BASE_URL_PKG_SEARCH = 'https://archlinux.org/packages/search/json/?name={package}'
+BASE_URL_PKG_SEARCH = 'https://archlinux.org/packages/search/json/'
# BASE_URL_PKG_CONTENT = 'https://archlinux.org/packages/search/json/'
-BASE_GROUP_URL = 'https://archlinux.org/groups/search/json/?name={group}'
+BASE_GROUP_URL = 'https://archlinux.org/groups/search/json/'
-def group_search(name :str) -> List[PackageSearchResult]:
- # TODO UPSTREAM: Implement /json/ for the groups search
+def _make_request(url: str, params: Dict) -> Any:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
+
+ encoded = urlencode(params)
+ full_url = f'{url}?{encoded}'
+
+ return urlopen(full_url, context=ssl_context)
+
+
+def group_search(name :str) -> List[PackageSearchResult]:
+ # TODO UPSTREAM: Implement /json/ for the groups search
try:
- response = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context)
- except urllib.error.HTTPError as err:
+ response = _make_request(BASE_GROUP_URL, {'name': name})
+ except HTTPError as err:
if err.code == 404:
return []
else:
@@ -38,10 +48,7 @@
"""
# TODO UPSTREAM: Implement bulk search, either support name=X&name=Y or split on space (%20 or ' ')
# TODO: utilize pacman cache first, upstream second.
- ssl_context = ssl.create_default_context()
- ssl_context.check_hostname = False
- ssl_context.verify_mode = ssl.CERT_NONE
- response = urllib.request.urlopen(BASE_URL_PKG_SEARCH.format(package=package), context=ssl_context)
+ response = _make_request(BASE_URL_PKG_SEARCH, {'name': package})
if response.code != 200:
raise PackageError(f"Could not locate package: [{response.code}] {response}")
| {"golden_diff": "diff --git a/archinstall/lib/packages/packages.py b/archinstall/lib/packages/packages.py\n--- a/archinstall/lib/packages/packages.py\n+++ b/archinstall/lib/packages/packages.py\n@@ -1,25 +1,35 @@\n import json\n import ssl\n-import urllib.request\n from typing import Dict, Any, Tuple, List\n+from urllib.error import HTTPError\n+from urllib.parse import urlencode\n+from urllib.request import urlopen\n \n from ..exceptions import PackageError, SysCallError\n from ..models.dataclasses import PackageSearch, PackageSearchResult, LocalPackage\n from ..pacman import run_pacman\n \n-BASE_URL_PKG_SEARCH = 'https://archlinux.org/packages/search/json/?name={package}'\n+BASE_URL_PKG_SEARCH = 'https://archlinux.org/packages/search/json/'\n # BASE_URL_PKG_CONTENT = 'https://archlinux.org/packages/search/json/'\n-BASE_GROUP_URL = 'https://archlinux.org/groups/search/json/?name={group}'\n+BASE_GROUP_URL = 'https://archlinux.org/groups/search/json/'\n \n \n-def group_search(name :str) -> List[PackageSearchResult]:\n-\t# TODO UPSTREAM: Implement /json/ for the groups search\n+def _make_request(url: str, params: Dict) -> Any:\n \tssl_context = ssl.create_default_context()\n \tssl_context.check_hostname = False\n \tssl_context.verify_mode = ssl.CERT_NONE\n+\n+\tencoded = urlencode(params)\n+\tfull_url = f'{url}?{encoded}'\n+\n+\treturn urlopen(full_url, context=ssl_context)\n+\n+\n+def group_search(name :str) -> List[PackageSearchResult]:\n+\t# TODO UPSTREAM: Implement /json/ for the groups search\n \ttry:\n-\t\tresponse = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context)\n-\texcept urllib.error.HTTPError as err:\n+\t\tresponse = _make_request(BASE_GROUP_URL, {'name': name})\n+\texcept HTTPError as err:\n \t\tif err.code == 404:\n \t\t\treturn []\n \t\telse:\n@@ -38,10 +48,7 @@\n \t\"\"\"\n \t# TODO UPSTREAM: Implement bulk search, either support name=X&name=Y or split on space (%20 or ' ')\n \t# TODO: utilize pacman cache first, upstream second.\n-\tssl_context = ssl.create_default_context()\n-\tssl_context.check_hostname = False\n-\tssl_context.verify_mode = ssl.CERT_NONE\n-\tresponse = urllib.request.urlopen(BASE_URL_PKG_SEARCH.format(package=package), context=ssl_context)\n+\tresponse = _make_request(BASE_URL_PKG_SEARCH, {'name': package})\n \n \tif response.code != 200:\n \t\traise PackageError(f\"Could not locate package: [{response.code}] {response}\")\n", "issue": "invalid package names png++\nHi,\r\n\r\nI try to install some package with the \"package\" list in the config file, but if i use the package name png++ (i put the link under the message) it return as invalid package names.\r\n\r\nhttps://archlinux.org/packages/community/any/png++/\r\n\ninvalid package names png++\nHi,\r\n\r\nI try to install some package with the \"package\" list in the config file, but if i use the package name png++ (i put the link under the message) it return as invalid package names.\r\n\r\nhttps://archlinux.org/packages/community/any/png++/\r\n\n", "before_files": [{"content": "import json\nimport ssl\nimport urllib.request\nfrom typing import Dict, Any, Tuple, List\n\nfrom ..exceptions import PackageError, SysCallError\nfrom ..models.dataclasses import PackageSearch, PackageSearchResult, LocalPackage\nfrom ..pacman import run_pacman\n\nBASE_URL_PKG_SEARCH = 'https://archlinux.org/packages/search/json/?name={package}'\n# BASE_URL_PKG_CONTENT = 'https://archlinux.org/packages/search/json/'\nBASE_GROUP_URL = 'https://archlinux.org/groups/search/json/?name={group}'\n\n\ndef group_search(name :str) -> List[PackageSearchResult]:\n\t# TODO UPSTREAM: Implement /json/ for the groups search\n\tssl_context = ssl.create_default_context()\n\tssl_context.check_hostname = False\n\tssl_context.verify_mode = ssl.CERT_NONE\n\ttry:\n\t\tresponse = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context)\n\texcept urllib.error.HTTPError as err:\n\t\tif err.code == 404:\n\t\t\treturn []\n\t\telse:\n\t\t\traise err\n\n\t# Just to be sure some code didn't slip through the exception\n\tdata = response.read().decode('UTF-8')\n\n\treturn [PackageSearchResult(**package) for package in json.loads(data)['results']]\n\n\ndef package_search(package :str) -> PackageSearch:\n\t\"\"\"\n\tFinds a specific package via the package database.\n\tIt makes a simple web-request, which might be a bit slow.\n\t\"\"\"\n\t# TODO UPSTREAM: Implement bulk search, either support name=X&name=Y or split on space (%20 or ' ')\n\t# TODO: utilize pacman cache first, upstream second.\n\tssl_context = ssl.create_default_context()\n\tssl_context.check_hostname = False\n\tssl_context.verify_mode = ssl.CERT_NONE\n\tresponse = urllib.request.urlopen(BASE_URL_PKG_SEARCH.format(package=package), context=ssl_context)\n\n\tif response.code != 200:\n\t\traise PackageError(f\"Could not locate package: [{response.code}] {response}\")\n\n\tdata = response.read().decode('UTF-8')\n\n\treturn PackageSearch(**json.loads(data))\n\n\ndef find_package(package :str) -> List[PackageSearchResult]:\n\tdata = package_search(package)\n\tresults = []\n\n\tfor result in data.results:\n\t\tif result.pkgname == package:\n\t\t\tresults.append(result)\n\n\t# If we didn't find the package in the search results,\n\t# odds are it's a group package\n\tif not results:\n\t\t# Check if the package is actually a group\n\t\tfor result in group_search(package):\n\t\t\tresults.append(result)\n\n\treturn results\n\n\ndef find_packages(*names :str) -> Dict[str, Any]:\n\t\"\"\"\n\tThis function returns the search results for many packages.\n\tThe function itself is rather slow, so consider not sending to\n\tmany packages to the search query.\n\t\"\"\"\n\tresult = {}\n\tfor package in names:\n\t\tfor found_package in find_package(package):\n\t\t\tresult[package] = found_package\n\n\treturn result\n\n\ndef validate_package_list(packages :list) -> Tuple[list, list]:\n\t\"\"\"\n\tValidates a list of given packages.\n\treturn: Tuple of lists containing valid packavges in the first and invalid\n\tpackages in the second entry\n\t\"\"\"\n\tvalid_packages = {package for package in packages if find_package(package)}\n\tinvalid_packages = set(packages) - valid_packages\n\n\treturn list(valid_packages), list(invalid_packages)\n\n\ndef installed_package(package :str) -> LocalPackage:\n\tpackage_info = {}\n\ttry:\n\t\tfor line in run_pacman(f\"-Q --info {package}\"):\n\t\t\tif b':' in line:\n\t\t\t\tkey, value = line.decode().split(':', 1)\n\t\t\t\tpackage_info[key.strip().lower().replace(' ', '_')] = value.strip()\n\texcept SysCallError:\n\t\tpass\n\n\treturn LocalPackage(**package_info)\n", "path": "archinstall/lib/packages/packages.py"}]} | 1,735 | 581 |
gh_patches_debug_11945 | rasdani/github-patches | git_diff | mdn__kuma-7185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django User admin not working related to stripe_customer_id
**Summary**
<img width="1450" alt="Screen Shot 2020-05-20 at 1 01 52 PM" src="https://user-images.githubusercontent.com/26739/82475221-28e5a080-9a9a-11ea-948f-be97bf2c15d6.png">
There *exists* a user with that username.
The user is there. I can manually type in the URL http://localhost.org:8000/admin/users/user/1311/change/ and it works.
It's related to `IsStripeCustomer` in `kuma/users/admin.py`.
**Steps To Reproduce (STR)**
1. Have a user with and without a `stripe_customer_id`
2. Go to http://localhost.org:8000/admin/users/user/
3. Is the user appearing?
**Actual behavior**
_What actually happened?_
**Expected behavior**
_What did you expect to happen?_
**Additional context**
Pretty sure it's the `IsStripeCustomer` thing that's filtering on the empty string even if you didn't filter on it.
</issue>
<code>
[start of kuma/users/admin.py]
1 from django.contrib import admin
2 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
3 from django.utils.html import format_html
4
5 from kuma.core.urlresolvers import reverse
6 from kuma.core.utils import urlparams
7
8 from .models import User, UserBan, UserSubscription
9
10
11 @admin.register(UserBan)
12 class UserBanAdmin(admin.ModelAdmin):
13 fields = ("user", "by", "reason", "is_active")
14 list_display = ("user", "by", "reason", "is_active")
15 list_editable = ("is_active",)
16 list_filter = ("is_active",)
17 raw_id_fields = ("user", "by")
18 search_fields = ("user__username", "reason", "by__username")
19
20
21 class IsStripeCustomer(admin.SimpleListFilter):
22 title = "is Stripe customer"
23 parameter_name = "is_stripe_customer"
24
25 def lookups(self, request, model_admin):
26 return (
27 (True, "Yes"),
28 (False, "No"),
29 )
30
31 def queryset(self, request, queryset):
32 if self.value():
33 return queryset.exclude(stripe_customer_id="")
34 else:
35 return queryset.filter(stripe_customer_id="")
36
37
38 @admin.register(User)
39 class UserAdmin(BaseUserAdmin):
40 """
41 Extends the admin view of users to show date_joined field
42 add a filter on the field too
43 """
44
45 fieldsets = BaseUserAdmin.fieldsets + (
46 ("Subscription", {"fields": ("stripe_customer_id", "subscriber_number")}),
47 )
48 readonly_fields = BaseUserAdmin.readonly_fields + (
49 "stripe_customer_id",
50 "subscriber_number",
51 )
52
53 list_display = (
54 "username",
55 "fullname",
56 "email",
57 "revisions",
58 "date_joined",
59 "is_staff",
60 "is_active",
61 )
62 list_filter = (
63 "is_staff",
64 "is_superuser",
65 "is_active",
66 "date_joined",
67 "groups",
68 IsStripeCustomer,
69 )
70 ordering = ("-date_joined",)
71 search_fields = (
72 "username",
73 "title",
74 "fullname",
75 "organization",
76 "location",
77 "email",
78 )
79
80 def revisions(self, obj):
81 """HTML link to user's revisions with count"""
82 link = urlparams(reverse("dashboards.revisions"), user=obj.username)
83 count = obj.created_revisions.count()
84 return format_html('<a href="{}"><strong>{}</strong></a>', link, count)
85
86
87 @admin.register(UserSubscription)
88 class UserSubscriptionAdmin(admin.ModelAdmin):
89 readonly_fields = ("user", "updated", "created", "stripe_subscription_id")
90 list_display = ("user", "canceled", "updated", "created")
91 search_fields = ("user__username",)
92 list_filter = ("canceled", "created")
93 ordering = ("updated",)
94
[end of kuma/users/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/users/admin.py b/kuma/users/admin.py
--- a/kuma/users/admin.py
+++ b/kuma/users/admin.py
@@ -24,15 +24,18 @@
def lookups(self, request, model_admin):
return (
- (True, "Yes"),
- (False, "No"),
+ ("yes", "Yes"),
+ ("no", "No"),
)
def queryset(self, request, queryset):
- if self.value():
+ value = self.value()
+ if value == "yes":
return queryset.exclude(stripe_customer_id="")
- else:
+ elif value == "no":
return queryset.filter(stripe_customer_id="")
+ else:
+ return queryset
@admin.register(User)
| {"golden_diff": "diff --git a/kuma/users/admin.py b/kuma/users/admin.py\n--- a/kuma/users/admin.py\n+++ b/kuma/users/admin.py\n@@ -24,15 +24,18 @@\n \n def lookups(self, request, model_admin):\n return (\n- (True, \"Yes\"),\n- (False, \"No\"),\n+ (\"yes\", \"Yes\"),\n+ (\"no\", \"No\"),\n )\n \n def queryset(self, request, queryset):\n- if self.value():\n+ value = self.value()\n+ if value == \"yes\":\n return queryset.exclude(stripe_customer_id=\"\")\n- else:\n+ elif value == \"no\":\n return queryset.filter(stripe_customer_id=\"\")\n+ else:\n+ return queryset\n \n \n @admin.register(User)\n", "issue": "Django User admin not working related to stripe_customer_id\n**Summary**\r\n<img width=\"1450\" alt=\"Screen Shot 2020-05-20 at 1 01 52 PM\" src=\"https://user-images.githubusercontent.com/26739/82475221-28e5a080-9a9a-11ea-948f-be97bf2c15d6.png\">\r\n\r\nThere *exists* a user with that username. \r\nThe user is there. I can manually type in the URL http://localhost.org:8000/admin/users/user/1311/change/ and it works. \r\n\r\nIt's related to `IsStripeCustomer` in `kuma/users/admin.py`.\r\n\r\n**Steps To Reproduce (STR)**\r\n\r\n1. Have a user with and without a `stripe_customer_id`\r\n2. Go to http://localhost.org:8000/admin/users/user/\r\n3. Is the user appearing?\r\n\r\n\r\n**Actual behavior**\r\n_What actually happened?_\r\n\r\n\r\n**Expected behavior**\r\n_What did you expect to happen?_\r\n\r\n\r\n**Additional context**\r\nPretty sure it's the `IsStripeCustomer` thing that's filtering on the empty string even if you didn't filter on it. \n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.utils.html import format_html\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import urlparams\n\nfrom .models import User, UserBan, UserSubscription\n\n\[email protected](UserBan)\nclass UserBanAdmin(admin.ModelAdmin):\n fields = (\"user\", \"by\", \"reason\", \"is_active\")\n list_display = (\"user\", \"by\", \"reason\", \"is_active\")\n list_editable = (\"is_active\",)\n list_filter = (\"is_active\",)\n raw_id_fields = (\"user\", \"by\")\n search_fields = (\"user__username\", \"reason\", \"by__username\")\n\n\nclass IsStripeCustomer(admin.SimpleListFilter):\n title = \"is Stripe customer\"\n parameter_name = \"is_stripe_customer\"\n\n def lookups(self, request, model_admin):\n return (\n (True, \"Yes\"),\n (False, \"No\"),\n )\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.exclude(stripe_customer_id=\"\")\n else:\n return queryset.filter(stripe_customer_id=\"\")\n\n\[email protected](User)\nclass UserAdmin(BaseUserAdmin):\n \"\"\"\n Extends the admin view of users to show date_joined field\n add a filter on the field too\n \"\"\"\n\n fieldsets = BaseUserAdmin.fieldsets + (\n (\"Subscription\", {\"fields\": (\"stripe_customer_id\", \"subscriber_number\")}),\n )\n readonly_fields = BaseUserAdmin.readonly_fields + (\n \"stripe_customer_id\",\n \"subscriber_number\",\n )\n\n list_display = (\n \"username\",\n \"fullname\",\n \"email\",\n \"revisions\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n )\n list_filter = (\n \"is_staff\",\n \"is_superuser\",\n \"is_active\",\n \"date_joined\",\n \"groups\",\n IsStripeCustomer,\n )\n ordering = (\"-date_joined\",)\n search_fields = (\n \"username\",\n \"title\",\n \"fullname\",\n \"organization\",\n \"location\",\n \"email\",\n )\n\n def revisions(self, obj):\n \"\"\"HTML link to user's revisions with count\"\"\"\n link = urlparams(reverse(\"dashboards.revisions\"), user=obj.username)\n count = obj.created_revisions.count()\n return format_html('<a href=\"{}\"><strong>{}</strong></a>', link, count)\n\n\[email protected](UserSubscription)\nclass UserSubscriptionAdmin(admin.ModelAdmin):\n readonly_fields = (\"user\", \"updated\", \"created\", \"stripe_subscription_id\")\n list_display = (\"user\", \"canceled\", \"updated\", \"created\")\n search_fields = (\"user__username\",)\n list_filter = (\"canceled\", \"created\")\n ordering = (\"updated\",)\n", "path": "kuma/users/admin.py"}]} | 1,602 | 172 |
gh_patches_debug_15191 | rasdani/github-patches | git_diff | coqui-ai__TTS-611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] `tts_models/en/vctk/sc-glow-tts` fails to find `speakers.json` file
**Describe the bug**
`tts_models/en/vctk/sc-glow-tts` fails to find `speakers.json` file.
**To Reproduce**
```
$ tts --model_name tts_models/en/vctk/sc-glow-tts --text "test" --out_path tmp.wav
> tts_models/en/vctk/sc-glow-tts is already downloaded.
> vocoder_models/en/vctk/hifigan_v2 is already downloaded.
> Using model: glow_tts
Traceback (most recent call last):
File "/usr/local/bin/tts", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.8/dist-packages/TTS/bin/synthesize.py", line 226, in main
synthesizer = Synthesizer(
File "/usr/local/lib/python3.8/dist-packages/TTS/utils/synthesizer.py", line 73, in __init__
self._load_tts(tts_checkpoint, tts_config_path, use_cuda)
File "/usr/local/lib/python3.8/dist-packages/TTS/utils/synthesizer.py", line 135, in _load_tts
self.tts_model = setup_tts_model(config=self.tts_config)
File "/usr/local/lib/python3.8/dist-packages/TTS/tts/models/__init__.py", line 27, in setup_model
model = MyModel(config)
File "/usr/local/lib/python3.8/dist-packages/TTS/tts/models/glow_tts.py", line 57, in __init__
self.init_multispeaker(config)
File "/usr/local/lib/python3.8/dist-packages/TTS/tts/models/glow_tts.py", line 106, in init_multispeaker
self.speaker_manager = get_speaker_manager(config, data=data)
File "/usr/local/lib/python3.8/dist-packages/TTS/tts/utils/speakers.py", line 354, in get_speaker_manager
speaker_manager.set_d_vectors_from_file(c.d_vector_file)
File "/usr/local/lib/python3.8/dist-packages/TTS/tts/utils/speakers.py", line 161, in set_d_vectors_from_file
self.d_vectors = self._load_json(file_path)
File "/usr/local/lib/python3.8/dist-packages/TTS/tts/utils/speakers.py", line 85, in _load_json
with open(json_file_path) as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/erogol/.local/share/tts/tts_models--en--vctk--sc-glow-tts/speakers.json'
```
The problem appears to be on line 143 of the `config.json` within the [model ZIP file](https://github.com/coqui-ai/TTS/releases/download/v0.1.0/tts_models--en--vctk--sc-glow-tts.zip):
```
"d_vector_file": "/home/erogol/.local/share/tts/tts_models--en--vctk--sc-glow-tts/speakers.json"
```
If I change that line in `~/.local/share/tts/tts_models--en--vctk--sc-glow-tts/config.json` to my own local path, then it works. I haven't been able to figure out how to make a relative path work.
**Environment (please complete the following information):**
Using the following Dockerfile:
```
FROM ubuntu:focal
ENV DEBIAN_FRONTEND=noninteractive
RUN apt update && \
apt install -y ca-certificates && update-ca-certificates && \
apt install -y espeak-ng git libsndfile1 python3 python3-pip
RUN python3 -m pip install git+git://github.com/coqui-ai/[email protected]
```
</issue>
<code>
[start of TTS/utils/trainer_utils.py]
1 import importlib
2 from typing import Dict
3
4 import torch
5
6 from TTS.utils.training import NoamLR
7
8
9 def is_apex_available():
10 return importlib.util.find_spec("apex") is not None
11
12
13 def setup_torch_training_env(cudnn_enable, cudnn_benchmark):
14 torch.backends.cudnn.enabled = cudnn_enable
15 torch.backends.cudnn.benchmark = cudnn_benchmark
16 torch.manual_seed(54321)
17 use_cuda = torch.cuda.is_available()
18 num_gpus = torch.cuda.device_count()
19 print(" > Using CUDA: ", use_cuda)
20 print(" > Number of GPUs: ", num_gpus)
21 return use_cuda, num_gpus
22
23
24 def get_scheduler(
25 lr_scheduler: str, lr_scheduler_params: Dict, optimizer: torch.optim.Optimizer
26 ) -> torch.optim.lr_scheduler._LRScheduler: # pylint: disable=protected-access
27 """Find, initialize and return a scheduler.
28
29 Args:
30 lr_scheduler (str): Scheduler name.
31 lr_scheduler_params (Dict): Scheduler parameters.
32 optimizer (torch.optim.Optimizer): Optimizer to pass to the scheduler.
33
34 Returns:
35 torch.optim.lr_scheduler._LRScheduler: Functional scheduler.
36 """
37 if lr_scheduler is None:
38 return None
39 if lr_scheduler.lower() == "noamlr":
40 scheduler = NoamLR
41 else:
42 scheduler = getattr(torch.optim.lr_scheduler, lr_scheduler)
43 return scheduler(optimizer, **lr_scheduler_params)
44
45
46 def get_optimizer(
47 optimizer_name: str, optimizer_params: dict, lr: float, model: torch.nn.Module
48 ) -> torch.optim.Optimizer:
49 """Find, initialize and return a optimizer.
50
51 Args:
52 optimizer_name (str): Optimizer name.
53 optimizer_params (dict): Optimizer parameters.
54 lr (float): Initial learning rate.
55 model (torch.nn.Module): Model to pass to the optimizer.
56
57 Returns:
58 torch.optim.Optimizer: Functional optimizer.
59 """
60 if optimizer_name.lower() == "radam":
61 module = importlib.import_module("TTS.utils.radam")
62 optimizer = getattr(module, "RAdam")
63 else:
64 optimizer = getattr(torch.optim, optimizer_name)
65 return optimizer(model.parameters(), lr=lr, **optimizer_params)
66
[end of TTS/utils/trainer_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/TTS/utils/trainer_utils.py b/TTS/utils/trainer_utils.py
--- a/TTS/utils/trainer_utils.py
+++ b/TTS/utils/trainer_utils.py
@@ -11,11 +11,15 @@
def setup_torch_training_env(cudnn_enable, cudnn_benchmark):
+ num_gpus = torch.cuda.device_count()
+ if num_gpus > 1:
+ raise RuntimeError(
+ f" [!] {num_gpus} active GPUs. Define the target GPU by `CUDA_VISIBLE_DEVICES`. For multi-gpu training use `TTS/bin/distribute.py`."
+ )
torch.backends.cudnn.enabled = cudnn_enable
torch.backends.cudnn.benchmark = cudnn_benchmark
torch.manual_seed(54321)
use_cuda = torch.cuda.is_available()
- num_gpus = torch.cuda.device_count()
print(" > Using CUDA: ", use_cuda)
print(" > Number of GPUs: ", num_gpus)
return use_cuda, num_gpus
| {"golden_diff": "diff --git a/TTS/utils/trainer_utils.py b/TTS/utils/trainer_utils.py\n--- a/TTS/utils/trainer_utils.py\n+++ b/TTS/utils/trainer_utils.py\n@@ -11,11 +11,15 @@\n \n \n def setup_torch_training_env(cudnn_enable, cudnn_benchmark):\n+ num_gpus = torch.cuda.device_count()\n+ if num_gpus > 1:\n+ raise RuntimeError(\n+ f\" [!] {num_gpus} active GPUs. Define the target GPU by `CUDA_VISIBLE_DEVICES`. For multi-gpu training use `TTS/bin/distribute.py`.\"\n+ )\n torch.backends.cudnn.enabled = cudnn_enable\n torch.backends.cudnn.benchmark = cudnn_benchmark\n torch.manual_seed(54321)\n use_cuda = torch.cuda.is_available()\n- num_gpus = torch.cuda.device_count()\n print(\" > Using CUDA: \", use_cuda)\n print(\" > Number of GPUs: \", num_gpus)\n return use_cuda, num_gpus\n", "issue": "[Bug] `tts_models/en/vctk/sc-glow-tts` fails to find `speakers.json` file\n**Describe the bug**\r\n`tts_models/en/vctk/sc-glow-tts` fails to find `speakers.json` file.\r\n\r\n**To Reproduce**\r\n```\r\n$ tts --model_name tts_models/en/vctk/sc-glow-tts --text \"test\" --out_path tmp.wav\r\n > tts_models/en/vctk/sc-glow-tts is already downloaded.\r\n > vocoder_models/en/vctk/hifigan_v2 is already downloaded.\r\n > Using model: glow_tts\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/tts\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/bin/synthesize.py\", line 226, in main\r\n synthesizer = Synthesizer(\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/utils/synthesizer.py\", line 73, in __init__\r\n self._load_tts(tts_checkpoint, tts_config_path, use_cuda)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/utils/synthesizer.py\", line 135, in _load_tts\r\n self.tts_model = setup_tts_model(config=self.tts_config)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/tts/models/__init__.py\", line 27, in setup_model\r\n model = MyModel(config)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/tts/models/glow_tts.py\", line 57, in __init__\r\n self.init_multispeaker(config)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/tts/models/glow_tts.py\", line 106, in init_multispeaker\r\n self.speaker_manager = get_speaker_manager(config, data=data)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/tts/utils/speakers.py\", line 354, in get_speaker_manager\r\n speaker_manager.set_d_vectors_from_file(c.d_vector_file)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/tts/utils/speakers.py\", line 161, in set_d_vectors_from_file\r\n self.d_vectors = self._load_json(file_path)\r\n File \"/usr/local/lib/python3.8/dist-packages/TTS/tts/utils/speakers.py\", line 85, in _load_json\r\n with open(json_file_path) as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/erogol/.local/share/tts/tts_models--en--vctk--sc-glow-tts/speakers.json'\r\n```\r\n\r\nThe problem appears to be on line 143 of the `config.json` within the [model ZIP file](https://github.com/coqui-ai/TTS/releases/download/v0.1.0/tts_models--en--vctk--sc-glow-tts.zip):\r\n```\r\n\"d_vector_file\": \"/home/erogol/.local/share/tts/tts_models--en--vctk--sc-glow-tts/speakers.json\"\r\n```\r\n\r\nIf I change that line in `~/.local/share/tts/tts_models--en--vctk--sc-glow-tts/config.json` to my own local path, then it works. I haven't been able to figure out how to make a relative path work.\r\n\r\n**Environment (please complete the following information):**\r\nUsing the following Dockerfile:\r\n```\r\nFROM ubuntu:focal\r\nENV DEBIAN_FRONTEND=noninteractive \r\nRUN apt update && \\\r\n apt install -y ca-certificates && update-ca-certificates && \\\r\n apt install -y espeak-ng git libsndfile1 python3 python3-pip\r\nRUN python3 -m pip install git+git://github.com/coqui-ai/[email protected]\r\n```\r\n\n", "before_files": [{"content": "import importlib\nfrom typing import Dict\n\nimport torch\n\nfrom TTS.utils.training import NoamLR\n\n\ndef is_apex_available():\n return importlib.util.find_spec(\"apex\") is not None\n\n\ndef setup_torch_training_env(cudnn_enable, cudnn_benchmark):\n torch.backends.cudnn.enabled = cudnn_enable\n torch.backends.cudnn.benchmark = cudnn_benchmark\n torch.manual_seed(54321)\n use_cuda = torch.cuda.is_available()\n num_gpus = torch.cuda.device_count()\n print(\" > Using CUDA: \", use_cuda)\n print(\" > Number of GPUs: \", num_gpus)\n return use_cuda, num_gpus\n\n\ndef get_scheduler(\n lr_scheduler: str, lr_scheduler_params: Dict, optimizer: torch.optim.Optimizer\n) -> torch.optim.lr_scheduler._LRScheduler: # pylint: disable=protected-access\n \"\"\"Find, initialize and return a scheduler.\n\n Args:\n lr_scheduler (str): Scheduler name.\n lr_scheduler_params (Dict): Scheduler parameters.\n optimizer (torch.optim.Optimizer): Optimizer to pass to the scheduler.\n\n Returns:\n torch.optim.lr_scheduler._LRScheduler: Functional scheduler.\n \"\"\"\n if lr_scheduler is None:\n return None\n if lr_scheduler.lower() == \"noamlr\":\n scheduler = NoamLR\n else:\n scheduler = getattr(torch.optim.lr_scheduler, lr_scheduler)\n return scheduler(optimizer, **lr_scheduler_params)\n\n\ndef get_optimizer(\n optimizer_name: str, optimizer_params: dict, lr: float, model: torch.nn.Module\n) -> torch.optim.Optimizer:\n \"\"\"Find, initialize and return a optimizer.\n\n Args:\n optimizer_name (str): Optimizer name.\n optimizer_params (dict): Optimizer parameters.\n lr (float): Initial learning rate.\n model (torch.nn.Module): Model to pass to the optimizer.\n\n Returns:\n torch.optim.Optimizer: Functional optimizer.\n \"\"\"\n if optimizer_name.lower() == \"radam\":\n module = importlib.import_module(\"TTS.utils.radam\")\n optimizer = getattr(module, \"RAdam\")\n else:\n optimizer = getattr(torch.optim, optimizer_name)\n return optimizer(model.parameters(), lr=lr, **optimizer_params)\n", "path": "TTS/utils/trainer_utils.py"}]} | 2,021 | 230 |
gh_patches_debug_18697 | rasdani/github-patches | git_diff | graspologic-org__graspologic-345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deprecate python 3.5
sklearn 0.23 and above don't support 3.5
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3 from setuptools import setup, find_packages
4 from sys import platform
5
6 PACKAGE_NAME = "graspy"
7 DESCRIPTION = "A set of python modules for graph statistics"
8 with open("README.md", "r") as f:
9 LONG_DESCRIPTION = f.read()
10 AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",)
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/neurodata/graspy"
13 MINIMUM_PYTHON_VERSION = 3, 5 # Minimum of Python 3.5
14 REQUIRED_PACKAGES = [
15 "networkx>=2.1",
16 "numpy>=1.8.1",
17 "scikit-learn>=0.19.1",
18 "scipy>=1.1.0",
19 "seaborn>=0.9.0",
20 "matplotlib>=3.0.0",
21 ]
22
23 # Find GraSPy version.
24 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
25 for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")):
26 if line.startswith("__version__ = "):
27 VERSION = line.strip().split()[2][1:-1]
28
29
30 def check_python_version():
31 """Exit when the Python version is too low."""
32 if sys.version_info < MINIMUM_PYTHON_VERSION:
33 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
34
35
36 check_python_version()
37
38 setup(
39 name=PACKAGE_NAME,
40 version=VERSION,
41 description=DESCRIPTION,
42 long_description=LONG_DESCRIPTION,
43 long_description_content_type="text/markdown",
44 author=AUTHOR,
45 author_email=AUTHOR_EMAIL,
46 install_requires=REQUIRED_PACKAGES,
47 url=URL,
48 license="Apache License 2.0",
49 classifiers=[
50 "Development Status :: 3 - Alpha",
51 "Intended Audience :: Science/Research",
52 "Topic :: Scientific/Engineering :: Mathematics",
53 "License :: OSI Approved :: Apache Software License",
54 "Programming Language :: Python :: 3",
55 "Programming Language :: Python :: 3.5",
56 "Programming Language :: Python :: 3.6",
57 "Programming Language :: Python :: 3.7",
58 ],
59 packages=find_packages(),
60 include_package_data=True,
61 )
62
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",)
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/neurodata/graspy"
-MINIMUM_PYTHON_VERSION = 3, 5 # Minimum of Python 3.5
+MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5
REQUIRED_PACKAGES = [
"networkx>=2.1",
"numpy>=1.8.1",
@@ -52,7 +52,6 @@
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n AUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\n AUTHOR_EMAIL = \"[email protected]\"\n URL = \"https://github.com/neurodata/graspy\"\n-MINIMUM_PYTHON_VERSION = 3, 5 # Minimum of Python 3.5\n+MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\n REQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n@@ -52,7 +52,6 @@\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n", "issue": "deprecate python 3.5\nsklearn 0.23 and above don't support 3.5\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 5 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n]\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 1,170 | 232 |
gh_patches_debug_4061 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8052 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pin click
resolves #8048
### Description
Pin main to `click>=8.1.1,<8.1.4`
### Checklist
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR
- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)
</issue>
<code>
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 7, 2):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.7.2 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.5.2"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.cli.main:cli"],
47 },
48 install_requires=[
49 "Jinja2==3.1.2",
50 "agate>=1.6,<1.7.1",
51 "click>=7.0,<9",
52 "colorama>=0.3.9,<0.4.7",
53 "hologram>=0.0.14,<=0.0.16",
54 "isodate>=0.6,<0.7",
55 "logbook>=1.5,<1.6",
56 "mashumaro[msgpack]==3.6",
57 "minimal-snowplow-tracker==0.0.2",
58 "networkx>=2.3,<2.8.1;python_version<'3.8'",
59 "networkx>=2.3,<3;python_version>='3.8'",
60 "packaging>20.9",
61 "sqlparse>=0.2.3",
62 "dbt-extractor~=0.4.1",
63 "typing-extensions>=3.7.4",
64 "werkzeug>=1,<3",
65 "pathspec>=0.9,<0.12",
66 "protobuf>=4.0.0",
67 "pytz>=2015.7",
68 # the following are all to match snowflake-connector-python
69 "requests<3.0.0",
70 "idna>=2.5,<4",
71 "cffi>=1.9,<2.0.0",
72 "pyyaml>=6.0",
73 ],
74 zip_safe=False,
75 classifiers=[
76 "Development Status :: 5 - Production/Stable",
77 "License :: OSI Approved :: Apache Software License",
78 "Operating System :: Microsoft :: Windows",
79 "Operating System :: MacOS :: MacOS X",
80 "Operating System :: POSIX :: Linux",
81 "Programming Language :: Python :: 3.7",
82 "Programming Language :: Python :: 3.8",
83 "Programming Language :: Python :: 3.9",
84 "Programming Language :: Python :: 3.10",
85 "Programming Language :: Python :: 3.11",
86 ],
87 python_requires=">=3.7.2",
88 )
89
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -48,7 +48,8 @@
install_requires=[
"Jinja2==3.1.2",
"agate>=1.6,<1.7.1",
- "click>=7.0,<9",
+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558
+ "click>=7.0,<8.1.4",
"colorama>=0.3.9,<0.4.7",
"hologram>=0.0.14,<=0.0.16",
"isodate>=0.6,<0.7",
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -48,7 +48,8 @@\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n- \"click>=7.0,<9\",\n+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558\n+ \"click>=7.0,<8.1.4\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n", "issue": "pin click\nresolves #8048 \r\n\r\n### Description\r\n\r\nPin main to `click>=8.1.1,<8.1.4`\r\n\r\n### Checklist\r\n\r\n- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me\r\n- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)\r\n- [ ] I have run this code in development and it appears to resolve the stated issue\r\n- [ ] This PR includes tests, or tests are not required/relevant for this PR\r\n- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR\r\n- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.5.2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.6\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>20.9\",\n \"sqlparse>=0.2.3\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]} | 1,731 | 172 |
gh_patches_debug_33773 | rasdani/github-patches | git_diff | netbox-community__netbox-14131 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Removing Module Bays with Children Power Ports breaks Power Ports count
### NetBox version
v3.6.4
### Python version
3.8
### Steps to Reproduce
1. Create module types for device (for example PWR-MX960-4100-AC with 2 power ports):

2. In Devices add to chassis device (for example mx960) PEM module:

3. Check count Power Ports:

4. Delete module from device:

### Expected Behavior
For the Power Ports count to be correct
### Observed Behavior
The port counter has not decreased
</issue>
<code>
[start of netbox/utilities/counters.py]
1 from django.apps import apps
2 from django.db.models import F, Count, OuterRef, Subquery
3 from django.db.models.signals import post_delete, post_save
4
5 from netbox.registry import registry
6 from .fields import CounterCacheField
7
8
9 def get_counters_for_model(model):
10 """
11 Return field mappings for all counters registered to the given model.
12 """
13 return registry['counter_fields'][model].items()
14
15
16 def update_counter(model, pk, counter_name, value):
17 """
18 Increment or decrement a counter field on an object identified by its model and primary key (PK). Positive values
19 will increment; negative values will decrement.
20 """
21 model.objects.filter(pk=pk).update(
22 **{counter_name: F(counter_name) + value}
23 )
24
25
26 def update_counts(model, field_name, related_query):
27 """
28 Perform a bulk update for the given model and counter field. For example,
29
30 update_counts(Device, '_interface_count', 'interfaces')
31
32 will effectively set
33
34 Device.objects.update(_interface_count=Count('interfaces'))
35 """
36 subquery = Subquery(
37 model.objects.filter(pk=OuterRef('pk')).annotate(_count=Count(related_query)).values('_count')
38 )
39 return model.objects.update(**{
40 field_name: subquery
41 })
42
43
44 #
45 # Signal handlers
46 #
47
48 def post_save_receiver(sender, instance, created, **kwargs):
49 """
50 Update counter fields on related objects when a TrackingModelMixin subclass is created or modified.
51 """
52 for field_name, counter_name in get_counters_for_model(sender):
53 parent_model = sender._meta.get_field(field_name).related_model
54 new_pk = getattr(instance, field_name, None)
55 has_old_field = field_name in instance.tracker
56 old_pk = instance.tracker.get(field_name) if has_old_field else None
57
58 # Update the counters on the old and/or new parents as needed
59 if old_pk is not None:
60 update_counter(parent_model, old_pk, counter_name, -1)
61 if new_pk is not None and (has_old_field or created):
62 update_counter(parent_model, new_pk, counter_name, 1)
63
64
65 def post_delete_receiver(sender, instance, origin, **kwargs):
66 """
67 Update counter fields on related objects when a TrackingModelMixin subclass is deleted.
68 """
69 for field_name, counter_name in get_counters_for_model(sender):
70 parent_model = sender._meta.get_field(field_name).related_model
71 parent_pk = getattr(instance, field_name, None)
72
73 # Decrement the parent's counter by one
74 if parent_pk is not None:
75 # MPTT sends two delete signals for child elements so guard against multiple decrements
76 if not origin or origin == instance:
77 update_counter(parent_model, parent_pk, counter_name, -1)
78
79
80 #
81 # Registration
82 #
83
84 def connect_counters(*models):
85 """
86 Register counter fields and connect post_save & post_delete signal handlers for the affected models.
87 """
88 for model in models:
89
90 # Find all CounterCacheFields on the model
91 counter_fields = [
92 field for field in model._meta.get_fields() if type(field) is CounterCacheField
93 ]
94
95 for field in counter_fields:
96 to_model = apps.get_model(field.to_model_name)
97
98 # Register the counter in the registry
99 change_tracking_fields = registry['counter_fields'][to_model]
100 change_tracking_fields[f"{field.to_field_name}_id"] = field.name
101
102 # Connect the post_save and post_delete handlers
103 post_save.connect(
104 post_save_receiver,
105 sender=to_model,
106 weak=False,
107 dispatch_uid=f'{model._meta.label}.{field.name}'
108 )
109 post_delete.connect(
110 post_delete_receiver,
111 sender=to_model,
112 weak=False,
113 dispatch_uid=f'{model._meta.label}.{field.name}'
114 )
115
[end of netbox/utilities/counters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/utilities/counters.py b/netbox/utilities/counters.py
--- a/netbox/utilities/counters.py
+++ b/netbox/utilities/counters.py
@@ -1,6 +1,6 @@
from django.apps import apps
from django.db.models import F, Count, OuterRef, Subquery
-from django.db.models.signals import post_delete, post_save
+from django.db.models.signals import post_delete, post_save, pre_delete
from netbox.registry import registry
from .fields import CounterCacheField
@@ -62,6 +62,12 @@
update_counter(parent_model, new_pk, counter_name, 1)
+def pre_delete_receiver(sender, instance, origin, **kwargs):
+ model = instance._meta.model
+ if not model.objects.filter(pk=instance.pk).exists():
+ instance._previously_removed = True
+
+
def post_delete_receiver(sender, instance, origin, **kwargs):
"""
Update counter fields on related objects when a TrackingModelMixin subclass is deleted.
@@ -71,10 +77,8 @@
parent_pk = getattr(instance, field_name, None)
# Decrement the parent's counter by one
- if parent_pk is not None:
- # MPTT sends two delete signals for child elements so guard against multiple decrements
- if not origin or origin == instance:
- update_counter(parent_model, parent_pk, counter_name, -1)
+ if parent_pk is not None and not hasattr(instance, "_previously_removed"):
+ update_counter(parent_model, parent_pk, counter_name, -1)
#
@@ -106,6 +110,12 @@
weak=False,
dispatch_uid=f'{model._meta.label}.{field.name}'
)
+ pre_delete.connect(
+ pre_delete_receiver,
+ sender=to_model,
+ weak=False,
+ dispatch_uid=f'{model._meta.label}.{field.name}'
+ )
post_delete.connect(
post_delete_receiver,
sender=to_model,
| {"golden_diff": "diff --git a/netbox/utilities/counters.py b/netbox/utilities/counters.py\n--- a/netbox/utilities/counters.py\n+++ b/netbox/utilities/counters.py\n@@ -1,6 +1,6 @@\n from django.apps import apps\n from django.db.models import F, Count, OuterRef, Subquery\n-from django.db.models.signals import post_delete, post_save\n+from django.db.models.signals import post_delete, post_save, pre_delete\n \n from netbox.registry import registry\n from .fields import CounterCacheField\n@@ -62,6 +62,12 @@\n update_counter(parent_model, new_pk, counter_name, 1)\n \n \n+def pre_delete_receiver(sender, instance, origin, **kwargs):\n+ model = instance._meta.model\n+ if not model.objects.filter(pk=instance.pk).exists():\n+ instance._previously_removed = True\n+\n+\n def post_delete_receiver(sender, instance, origin, **kwargs):\n \"\"\"\n Update counter fields on related objects when a TrackingModelMixin subclass is deleted.\n@@ -71,10 +77,8 @@\n parent_pk = getattr(instance, field_name, None)\n \n # Decrement the parent's counter by one\n- if parent_pk is not None:\n- # MPTT sends two delete signals for child elements so guard against multiple decrements\n- if not origin or origin == instance:\n- update_counter(parent_model, parent_pk, counter_name, -1)\n+ if parent_pk is not None and not hasattr(instance, \"_previously_removed\"):\n+ update_counter(parent_model, parent_pk, counter_name, -1)\n \n \n #\n@@ -106,6 +110,12 @@\n weak=False,\n dispatch_uid=f'{model._meta.label}.{field.name}'\n )\n+ pre_delete.connect(\n+ pre_delete_receiver,\n+ sender=to_model,\n+ weak=False,\n+ dispatch_uid=f'{model._meta.label}.{field.name}'\n+ )\n post_delete.connect(\n post_delete_receiver,\n sender=to_model,\n", "issue": "Removing Module Bays with Children Power Ports breaks Power Ports count\n### NetBox version\r\n\r\nv3.6.4\r\n\r\n### Python version\r\n\r\n3.8\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create module types for device (for example PWR-MX960-4100-AC with 2 power ports):\r\n\r\n2. In Devices add to chassis device (for example mx960) PEM module:\r\n\r\n3. Check count Power Ports:\r\n\r\n4. Delete module from device:\r\n\r\n\r\n\r\n### Expected Behavior\r\n\r\nFor the Power Ports count to be correct\r\n\r\n### Observed Behavior\r\n\r\nThe port counter has not decreased\n", "before_files": [{"content": "from django.apps import apps\nfrom django.db.models import F, Count, OuterRef, Subquery\nfrom django.db.models.signals import post_delete, post_save\n\nfrom netbox.registry import registry\nfrom .fields import CounterCacheField\n\n\ndef get_counters_for_model(model):\n \"\"\"\n Return field mappings for all counters registered to the given model.\n \"\"\"\n return registry['counter_fields'][model].items()\n\n\ndef update_counter(model, pk, counter_name, value):\n \"\"\"\n Increment or decrement a counter field on an object identified by its model and primary key (PK). Positive values\n will increment; negative values will decrement.\n \"\"\"\n model.objects.filter(pk=pk).update(\n **{counter_name: F(counter_name) + value}\n )\n\n\ndef update_counts(model, field_name, related_query):\n \"\"\"\n Perform a bulk update for the given model and counter field. For example,\n\n update_counts(Device, '_interface_count', 'interfaces')\n\n will effectively set\n\n Device.objects.update(_interface_count=Count('interfaces'))\n \"\"\"\n subquery = Subquery(\n model.objects.filter(pk=OuterRef('pk')).annotate(_count=Count(related_query)).values('_count')\n )\n return model.objects.update(**{\n field_name: subquery\n })\n\n\n#\n# Signal handlers\n#\n\ndef post_save_receiver(sender, instance, created, **kwargs):\n \"\"\"\n Update counter fields on related objects when a TrackingModelMixin subclass is created or modified.\n \"\"\"\n for field_name, counter_name in get_counters_for_model(sender):\n parent_model = sender._meta.get_field(field_name).related_model\n new_pk = getattr(instance, field_name, None)\n has_old_field = field_name in instance.tracker\n old_pk = instance.tracker.get(field_name) if has_old_field else None\n\n # Update the counters on the old and/or new parents as needed\n if old_pk is not None:\n update_counter(parent_model, old_pk, counter_name, -1)\n if new_pk is not None and (has_old_field or created):\n update_counter(parent_model, new_pk, counter_name, 1)\n\n\ndef post_delete_receiver(sender, instance, origin, **kwargs):\n \"\"\"\n Update counter fields on related objects when a TrackingModelMixin subclass is deleted.\n \"\"\"\n for field_name, counter_name in get_counters_for_model(sender):\n parent_model = sender._meta.get_field(field_name).related_model\n parent_pk = getattr(instance, field_name, None)\n\n # Decrement the parent's counter by one\n if parent_pk is not None:\n # MPTT sends two delete signals for child elements so guard against multiple decrements\n if not origin or origin == instance:\n update_counter(parent_model, parent_pk, counter_name, -1)\n\n\n#\n# Registration\n#\n\ndef connect_counters(*models):\n \"\"\"\n Register counter fields and connect post_save & post_delete signal handlers for the affected models.\n \"\"\"\n for model in models:\n\n # Find all CounterCacheFields on the model\n counter_fields = [\n field for field in model._meta.get_fields() if type(field) is CounterCacheField\n ]\n\n for field in counter_fields:\n to_model = apps.get_model(field.to_model_name)\n\n # Register the counter in the registry\n change_tracking_fields = registry['counter_fields'][to_model]\n change_tracking_fields[f\"{field.to_field_name}_id\"] = field.name\n\n # Connect the post_save and post_delete handlers\n post_save.connect(\n post_save_receiver,\n sender=to_model,\n weak=False,\n dispatch_uid=f'{model._meta.label}.{field.name}'\n )\n post_delete.connect(\n post_delete_receiver,\n sender=to_model,\n weak=False,\n dispatch_uid=f'{model._meta.label}.{field.name}'\n )\n", "path": "netbox/utilities/counters.py"}]} | 1,955 | 443 |
gh_patches_debug_1630 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Execfile does not exist in py3k
<!--
Thanks for reporting issues of python-telegram-bot!
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Use python 3
2. Try to install from git:
`$ pip install -e git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram`
### Expected behaviour
The library should be installed.
### Actual behaviour
NameError due to `execfile` not being a thing in python 3.
See here for alternatives: https://stackoverflow.com/a/437857
I would fix it myself, but I am unable to actually find the execfile call anywhere .-.
### Configuration
**Operating System:**
Windows 10 Education
**Version of Python, python-telegram-bot & dependencies:**
Python 3.5.2 |Continuum Analytics, Inc.| (default, Jul 5 2016, 11:41:13) [MSC v.1900 64 bit (AMD64)]
### Logs
``````
$ pip install -e git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram
Obtaining telegram from git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram
Skipping because already up-to-date.
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Development\telegram\VocaBot2\src\telegram\setup.py", line 20, in <module>
execfile(os.path.join('telegram', 'version.py'))
NameError: name 'execfile' is not defined
Command "python setup.py egg_info" failed with error code 1```
``````
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """The setup and build script for the python-telegram-bot library."""
3
4 import codecs
5 import os
6 from setuptools import setup, find_packages
7
8
9 def requirements():
10 """Build the requirements list for this project"""
11 requirements_list = []
12
13 with open('requirements.txt') as requirements:
14 for install in requirements:
15 requirements_list.append(install.strip())
16
17 return requirements_list
18
19 with codecs.open('README.rst', 'r', 'utf-8') as fd:
20 execfile(os.path.join('telegram', 'version.py'))
21
22 setup(name='python-telegram-bot',
23 version=__version__,
24 author='Leandro Toledo',
25 author_email='[email protected]',
26 license='LGPLv3',
27 url='https://github.com/python-telegram-bot/python-telegram-bot',
28 keywords='python telegram bot api wrapper',
29 description='Not just a Python wrapper around the Telegram Bot API',
30 long_description=fd.read(),
31 packages=find_packages(exclude=['tests*']),
32 install_requires=requirements(),
33 include_package_data=True,
34 classifiers=[
35 'Development Status :: 5 - Production/Stable',
36 'Intended Audience :: Developers',
37 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
38 'Operating System :: OS Independent',
39 'Topic :: Software Development :: Libraries :: Python Modules',
40 'Topic :: Communications :: Chat',
41 'Topic :: Internet',
42 'Programming Language :: Python',
43 'Programming Language :: Python :: 2',
44 'Programming Language :: Python :: 2.6',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.3',
48 'Programming Language :: Python :: 3.4',
49 'Programming Language :: Python :: 3.5',
50 ],)
51
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,13 @@
return requirements_list
+
+def execfile(fn):
+ with open(fn) as f:
+ code = compile(f.read(), fn, 'exec')
+ exec(code)
+
+
with codecs.open('README.rst', 'r', 'utf-8') as fd:
execfile(os.path.join('telegram', 'version.py'))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,6 +16,13 @@\n \n return requirements_list\n \n+\n+def execfile(fn):\n+ with open(fn) as f:\n+ code = compile(f.read(), fn, 'exec')\n+ exec(code)\n+\n+\n with codecs.open('README.rst', 'r', 'utf-8') as fd:\n execfile(os.path.join('telegram', 'version.py'))\n", "issue": "Execfile does not exist in py3k\n<!--\nThanks for reporting issues of python-telegram-bot!\nTo make it easier for us to help you please enter detailed information below.\n\nPlease note, we only support the latest version of python-telegram-bot and\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\nversion prior to opening an issue.\n-->\n### Steps to reproduce\n1. Use python 3\n2. Try to install from git:\n `$ pip install -e git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram`\n### Expected behaviour\n\nThe library should be installed.\n### Actual behaviour\n\nNameError due to `execfile` not being a thing in python 3.\nSee here for alternatives: https://stackoverflow.com/a/437857\nI would fix it myself, but I am unable to actually find the execfile call anywhere .-.\n### Configuration\n\n**Operating System:**\nWindows 10 Education\n\n**Version of Python, python-telegram-bot & dependencies:**\nPython 3.5.2 |Continuum Analytics, Inc.| (default, Jul 5 2016, 11:41:13) [MSC v.1900 64 bit (AMD64)]\n### Logs\n\n``````\n$ pip install -e git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram\nObtaining telegram from git+https://github.com/python-telegram-bot/python-telegram-bot.git@555e36ee8036a179f157f60dcb0c3fcf958146f4#egg=telegram\n Skipping because already up-to-date.\n Complete output from command python setup.py egg_info:\n Traceback (most recent call last):\n File \"<string>\", line 1, in <module>\n File \"C:\\Development\\telegram\\VocaBot2\\src\\telegram\\setup.py\", line 20, in <module>\n execfile(os.path.join('telegram', 'version.py'))\n NameError: name 'execfile' is not defined\nCommand \"python setup.py egg_info\" failed with error code 1```\n``````\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n execfile(os.path.join('telegram', 'version.py'))\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://github.com/python-telegram-bot/python-telegram-bot',\n keywords='python telegram bot api wrapper',\n description='Not just a Python wrapper around the Telegram Bot API',\n long_description=fd.read(),\n packages=find_packages(exclude=['tests*']),\n install_requires=requirements(),\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],)\n", "path": "setup.py"}]} | 1,579 | 108 |
gh_patches_debug_32541 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Signer client does not support files with '.' in filenames
Many of our artifacts contain '.' in the artifact names.
`client-benchmarks-1.0.0-javadoc.jar`
These files need to be signed properly.
</issue>
<code>
[start of bundle-workflow/src/sign.py]
1 #!/usr/bin/env python
2
3 # SPDX-License-Identifier: Apache-2.0
4 #
5 # The OpenSearch Contributors require contributions made to
6 # this file be licensed under the Apache-2.0 license or a
7 # compatible open source license.
8
9 import argparse
10
11 from manifests.build_manifest import BuildManifest
12 from signing_workflow.signer import Signer
13
14 parser = argparse.ArgumentParser(description="Sign artifacts")
15 parser.add_argument(
16 "manifest", type=argparse.FileType("r"), help="Path to local manifest file."
17 )
18 parser.add_argument("--component", nargs="?", help="Component name")
19 parser.add_argument("--type", nargs="?", help="Artifact type")
20 args = parser.parse_args()
21
22 manifest = BuildManifest.from_file(args.manifest)
23 signer = Signer()
24
25 for component in manifest.components:
26
27 if args.component and args.component != component.name:
28 print(f"\nSkipping {component.name}")
29 continue
30
31 print(f"\nSigning {component.name}")
32 for artifact_type in component.artifacts:
33
34 if args.type and args.type != artifact_type:
35 continue
36
37 signer.sign(component.artifacts[artifact_type])
38
39 print("Done.")
40
[end of bundle-workflow/src/sign.py]
[start of bundle-workflow/src/signing_workflow/signer.py]
1 #!/usr/bin/env python
2
3 # SPDX-License-Identifier: Apache-2.0
4 #
5 # The OpenSearch Contributors require contributions made to
6 # this file be licensed under the Apache-2.0 license or a
7 # compatible open source license.
8
9 import os
10 import pathlib
11
12 from git.git_repository import GitRepository
13
14 """
15 This class is responsible for signing an artifact using the OpenSearch-signer-client and verifying its signature.
16 The signed artifacts will be found in the same location as the original artifacts.
17 """
18
19
20 class Signer:
21
22 ACCEPTED_FILE_TYPES = [".zip", ".jar", ".war", ".pom", ".module", ".tar.gz"]
23
24 def __init__(self):
25 self.git_repo = GitRepository(self.get_repo_url(), "HEAD")
26 self.git_repo.execute("./bootstrap", subdirname="src")
27 self.git_repo.execute("rm config.cfg", subdirname="src")
28
29 def sign_artifacts(self, artifacts, basepath):
30 for artifact in artifacts:
31 if self.is_invalid_file_type(artifact):
32 print(f"Skipping signing of file ${artifact}")
33 continue
34 location = os.path.join(basepath, artifact)
35 self.sign(location)
36 self.verify(location + ".asc")
37
38 def is_invalid_file_type(self, file_name):
39 return (
40 "".join(pathlib.Path(file_name).suffixes) not in Signer.ACCEPTED_FILE_TYPES
41 )
42
43 def get_repo_url(self):
44 if "GITHUB_TOKEN" in os.environ:
45 return "https://${GITHUB_TOKEN}@github.com/opensearch-project/opensearch-signer-client.git"
46 return "https://github.com/opensearch-project/opensearch-signer-client.git"
47
48 def sign(self, filename):
49 signature_file = filename + ".asc"
50 signing_cmd = [
51 "./opensearch-signer-client",
52 "-i",
53 filename,
54 "-o",
55 signature_file,
56 "-p",
57 "pgp",
58 ]
59 self.git_repo.execute(" ".join(signing_cmd), subdirname="src")
60
61 def verify(self, filename):
62 verify_cmd = ["gpg", "--verify-files", filename]
63 self.git_repo.execute(" ".join(verify_cmd))
64
[end of bundle-workflow/src/signing_workflow/signer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/src/sign.py b/bundle-workflow/src/sign.py
--- a/bundle-workflow/src/sign.py
+++ b/bundle-workflow/src/sign.py
@@ -7,6 +7,7 @@
# compatible open source license.
import argparse
+import os
from manifests.build_manifest import BuildManifest
from signing_workflow.signer import Signer
@@ -20,6 +21,7 @@
args = parser.parse_args()
manifest = BuildManifest.from_file(args.manifest)
+basepath = os.path.dirname(os.path.abspath(manifest.name))
signer = Signer()
for component in manifest.components:
@@ -34,6 +36,6 @@
if args.type and args.type != artifact_type:
continue
- signer.sign(component.artifacts[artifact_type])
+ signer.sign_artifacts(component.artifacts[artifact_type], basepath)
print("Done.")
diff --git a/bundle-workflow/src/signing_workflow/signer.py b/bundle-workflow/src/signing_workflow/signer.py
--- a/bundle-workflow/src/signing_workflow/signer.py
+++ b/bundle-workflow/src/signing_workflow/signer.py
@@ -28,17 +28,15 @@
def sign_artifacts(self, artifacts, basepath):
for artifact in artifacts:
- if self.is_invalid_file_type(artifact):
+ if not self.is_valid_file_type(artifact):
print(f"Skipping signing of file ${artifact}")
continue
location = os.path.join(basepath, artifact)
self.sign(location)
self.verify(location + ".asc")
- def is_invalid_file_type(self, file_name):
- return (
- "".join(pathlib.Path(file_name).suffixes) not in Signer.ACCEPTED_FILE_TYPES
- )
+ def is_valid_file_type(self, file_name):
+ return any(x in [pathlib.Path(file_name).suffix, "".join(pathlib.Path(file_name).suffixes)] for x in Signer.ACCEPTED_FILE_TYPES)
def get_repo_url(self):
if "GITHUB_TOKEN" in os.environ:
| {"golden_diff": "diff --git a/bundle-workflow/src/sign.py b/bundle-workflow/src/sign.py\n--- a/bundle-workflow/src/sign.py\n+++ b/bundle-workflow/src/sign.py\n@@ -7,6 +7,7 @@\n # compatible open source license.\n \n import argparse\n+import os\n \n from manifests.build_manifest import BuildManifest\n from signing_workflow.signer import Signer\n@@ -20,6 +21,7 @@\n args = parser.parse_args()\n \n manifest = BuildManifest.from_file(args.manifest)\n+basepath = os.path.dirname(os.path.abspath(manifest.name))\n signer = Signer()\n \n for component in manifest.components:\n@@ -34,6 +36,6 @@\n if args.type and args.type != artifact_type:\n continue\n \n- signer.sign(component.artifacts[artifact_type])\n+ signer.sign_artifacts(component.artifacts[artifact_type], basepath)\n \n print(\"Done.\")\ndiff --git a/bundle-workflow/src/signing_workflow/signer.py b/bundle-workflow/src/signing_workflow/signer.py\n--- a/bundle-workflow/src/signing_workflow/signer.py\n+++ b/bundle-workflow/src/signing_workflow/signer.py\n@@ -28,17 +28,15 @@\n \n def sign_artifacts(self, artifacts, basepath):\n for artifact in artifacts:\n- if self.is_invalid_file_type(artifact):\n+ if not self.is_valid_file_type(artifact):\n print(f\"Skipping signing of file ${artifact}\")\n continue\n location = os.path.join(basepath, artifact)\n self.sign(location)\n self.verify(location + \".asc\")\n \n- def is_invalid_file_type(self, file_name):\n- return (\n- \"\".join(pathlib.Path(file_name).suffixes) not in Signer.ACCEPTED_FILE_TYPES\n- )\n+ def is_valid_file_type(self, file_name):\n+ return any(x in [pathlib.Path(file_name).suffix, \"\".join(pathlib.Path(file_name).suffixes)] for x in Signer.ACCEPTED_FILE_TYPES)\n \n def get_repo_url(self):\n if \"GITHUB_TOKEN\" in os.environ:\n", "issue": "Signer client does not support files with '.' in filenames\nMany of our artifacts contain '.' in the artifact names.\r\n\r\n`client-benchmarks-1.0.0-javadoc.jar`\r\n\r\nThese files need to be signed properly.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport argparse\n\nfrom manifests.build_manifest import BuildManifest\nfrom signing_workflow.signer import Signer\n\nparser = argparse.ArgumentParser(description=\"Sign artifacts\")\nparser.add_argument(\n \"manifest\", type=argparse.FileType(\"r\"), help=\"Path to local manifest file.\"\n)\nparser.add_argument(\"--component\", nargs=\"?\", help=\"Component name\")\nparser.add_argument(\"--type\", nargs=\"?\", help=\"Artifact type\")\nargs = parser.parse_args()\n\nmanifest = BuildManifest.from_file(args.manifest)\nsigner = Signer()\n\nfor component in manifest.components:\n\n if args.component and args.component != component.name:\n print(f\"\\nSkipping {component.name}\")\n continue\n\n print(f\"\\nSigning {component.name}\")\n for artifact_type in component.artifacts:\n\n if args.type and args.type != artifact_type:\n continue\n\n signer.sign(component.artifacts[artifact_type])\n\nprint(\"Done.\")\n", "path": "bundle-workflow/src/sign.py"}, {"content": "#!/usr/bin/env python\n\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nimport pathlib\n\nfrom git.git_repository import GitRepository\n\n\"\"\"\nThis class is responsible for signing an artifact using the OpenSearch-signer-client and verifying its signature.\nThe signed artifacts will be found in the same location as the original artifacts.\n\"\"\"\n\n\nclass Signer:\n\n ACCEPTED_FILE_TYPES = [\".zip\", \".jar\", \".war\", \".pom\", \".module\", \".tar.gz\"]\n\n def __init__(self):\n self.git_repo = GitRepository(self.get_repo_url(), \"HEAD\")\n self.git_repo.execute(\"./bootstrap\", subdirname=\"src\")\n self.git_repo.execute(\"rm config.cfg\", subdirname=\"src\")\n\n def sign_artifacts(self, artifacts, basepath):\n for artifact in artifacts:\n if self.is_invalid_file_type(artifact):\n print(f\"Skipping signing of file ${artifact}\")\n continue\n location = os.path.join(basepath, artifact)\n self.sign(location)\n self.verify(location + \".asc\")\n\n def is_invalid_file_type(self, file_name):\n return (\n \"\".join(pathlib.Path(file_name).suffixes) not in Signer.ACCEPTED_FILE_TYPES\n )\n\n def get_repo_url(self):\n if \"GITHUB_TOKEN\" in os.environ:\n return \"https://${GITHUB_TOKEN}@github.com/opensearch-project/opensearch-signer-client.git\"\n return \"https://github.com/opensearch-project/opensearch-signer-client.git\"\n\n def sign(self, filename):\n signature_file = filename + \".asc\"\n signing_cmd = [\n \"./opensearch-signer-client\",\n \"-i\",\n filename,\n \"-o\",\n signature_file,\n \"-p\",\n \"pgp\",\n ]\n self.git_repo.execute(\" \".join(signing_cmd), subdirname=\"src\")\n\n def verify(self, filename):\n verify_cmd = [\"gpg\", \"--verify-files\", filename]\n self.git_repo.execute(\" \".join(verify_cmd))\n", "path": "bundle-workflow/src/signing_workflow/signer.py"}]} | 1,511 | 456 |
gh_patches_debug_5178 | rasdani/github-patches | git_diff | pytorch__pytorch-1087 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ProgressMonitor (Trainer plugin) prints incorrect progress value and percentage
Code to replicate behaviour, extract of [jupyter notebook](https://github.com/recastrodiaz/courses/blob/master/deeplearning1/nbs/lesson1-pytorch.ipynb) In [9]:
```PYTHON
t = trainer.Trainer(model, criterion, optimizer, train_loader)
t.register_plugin(trainer.plugins.ProgressMonitor())
t.register_plugin(trainer.plugins.AccuracyMonitor())
t.register_plugin(trainer.plugins.LossMonitor())
t.register_plugin(trainer.plugins.TimeMonitor())
t.register_plugin(trainer.plugins.Logger(['progress', 'accuracy', 'loss', 'time']))
epochs = 1
t.run(epochs)
```
Prints:
```LOG
progress: 23000/360 (6388.89%) accuracy: 100.00% (99.31%) loss: 0.0058 (0.0368) time: 705ms (1249ms)
```
Should print instead:
```
progress: 360/360 (100.00%) accuracy: 100.00% (99.31%) loss: 0.0058 (0.0368) time: 705ms (1249ms)
```
I'll send over a PR for this.
</issue>
<code>
[start of torch/utils/trainer/plugins/progress.py]
1 from .plugin import Plugin
2
3
4 class ProgressMonitor(Plugin):
5 stat_name = 'progress'
6
7 def __init__(self):
8 super(ProgressMonitor, self).__init__([(1, 'iteration'), (1, 'epoch')])
9
10 def register(self, trainer):
11 self.trainer = trainer
12 stats = self.trainer.stats.setdefault(self.stat_name, {})
13 stats['samples_used'] = 0
14 stats['epoch_size'] = len(trainer.dataset)
15 stats['log_iter_fields'] = [
16 '{samples_used}/{epoch_size}',
17 '({percent:.2f}%)'
18 ]
19
20 def iteration(self, iteration, input, *args):
21 stats = self.trainer.stats.setdefault(self.stat_name, {})
22 stats['samples_used'] += input.size(0)
23 stats['percent'] = 100. * stats['samples_used'] / stats['epoch_size']
24
25 def epoch(self, *args):
26 stats = self.trainer.stats.setdefault(self.stat_name, {})
27 stats['samples_used'] = 0
28 stats['percent'] = 0
29
[end of torch/utils/trainer/plugins/progress.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch/utils/trainer/plugins/progress.py b/torch/utils/trainer/plugins/progress.py
--- a/torch/utils/trainer/plugins/progress.py
+++ b/torch/utils/trainer/plugins/progress.py
@@ -19,7 +19,7 @@
def iteration(self, iteration, input, *args):
stats = self.trainer.stats.setdefault(self.stat_name, {})
- stats['samples_used'] += input.size(0)
+ stats['samples_used'] += 1
stats['percent'] = 100. * stats['samples_used'] / stats['epoch_size']
def epoch(self, *args):
| {"golden_diff": "diff --git a/torch/utils/trainer/plugins/progress.py b/torch/utils/trainer/plugins/progress.py\n--- a/torch/utils/trainer/plugins/progress.py\n+++ b/torch/utils/trainer/plugins/progress.py\n@@ -19,7 +19,7 @@\n \n def iteration(self, iteration, input, *args):\n stats = self.trainer.stats.setdefault(self.stat_name, {})\n- stats['samples_used'] += input.size(0)\n+ stats['samples_used'] += 1\n stats['percent'] = 100. * stats['samples_used'] / stats['epoch_size']\n \n def epoch(self, *args):\n", "issue": "ProgressMonitor (Trainer plugin) prints incorrect progress value and percentage\nCode to replicate behaviour, extract of [jupyter notebook](https://github.com/recastrodiaz/courses/blob/master/deeplearning1/nbs/lesson1-pytorch.ipynb) In [9]:\r\n\r\n```PYTHON\r\nt = trainer.Trainer(model, criterion, optimizer, train_loader)\r\nt.register_plugin(trainer.plugins.ProgressMonitor())\r\nt.register_plugin(trainer.plugins.AccuracyMonitor())\r\nt.register_plugin(trainer.plugins.LossMonitor())\r\nt.register_plugin(trainer.plugins.TimeMonitor())\r\nt.register_plugin(trainer.plugins.Logger(['progress', 'accuracy', 'loss', 'time']))\r\n\r\nepochs = 1\r\nt.run(epochs)\r\n```\r\n\r\nPrints:\r\n```LOG\r\nprogress: 23000/360 (6388.89%)\taccuracy: 100.00% (99.31%)\tloss: 0.0058 (0.0368)\ttime: 705ms (1249ms)\r\n```\r\n\r\nShould print instead:\r\n```\r\nprogress: 360/360 (100.00%)\taccuracy: 100.00% (99.31%)\tloss: 0.0058 (0.0368)\ttime: 705ms (1249ms)\r\n```\r\n\r\nI'll send over a PR for this.\n", "before_files": [{"content": "from .plugin import Plugin\n\n\nclass ProgressMonitor(Plugin):\n stat_name = 'progress'\n\n def __init__(self):\n super(ProgressMonitor, self).__init__([(1, 'iteration'), (1, 'epoch')])\n\n def register(self, trainer):\n self.trainer = trainer\n stats = self.trainer.stats.setdefault(self.stat_name, {})\n stats['samples_used'] = 0\n stats['epoch_size'] = len(trainer.dataset)\n stats['log_iter_fields'] = [\n '{samples_used}/{epoch_size}',\n '({percent:.2f}%)'\n ]\n\n def iteration(self, iteration, input, *args):\n stats = self.trainer.stats.setdefault(self.stat_name, {})\n stats['samples_used'] += input.size(0)\n stats['percent'] = 100. * stats['samples_used'] / stats['epoch_size']\n\n def epoch(self, *args):\n stats = self.trainer.stats.setdefault(self.stat_name, {})\n stats['samples_used'] = 0\n stats['percent'] = 0\n", "path": "torch/utils/trainer/plugins/progress.py"}]} | 1,140 | 143 |
gh_patches_debug_17900 | rasdani/github-patches | git_diff | DataBiosphere__toil-4728 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Toil is marking its inputs executable
I think that Toil is marking input files from the local disk executable. It is resulting in weird changes to the file permissions in my Git tree when I run Toil tests.
┆Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1462)
┆Issue Number: TOIL-1462
</issue>
<code>
[start of src/toil/fileStores/__init__.py]
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 import stat
16 from typing import Any
17
18
19 class FileID(str):
20 """
21 A small wrapper around Python's builtin string class.
22
23 It is used to represent a file's ID in the file store, and has a size attribute
24 that is the file's size in bytes. This object is returned by importFile and
25 writeGlobalFile.
26
27 Calls into the file store can use bare strings; size will be queried from
28 the job store if unavailable in the ID.
29 """
30
31 def __new__(cls, fileStoreID: str, *args: Any) -> 'FileID':
32 return super().__new__(cls, fileStoreID)
33
34 def __init__(self, fileStoreID: str, size: int, executable: bool = False) -> None:
35 # Don't pass an argument to parent class's __init__.
36 # In Python 3 we can have super(FileID, self) hand us object's __init__ which chokes on any arguments.
37 super().__init__()
38 self.size = size
39 self.executable = executable
40
41 def pack(self) -> str:
42 """Pack the FileID into a string so it can be passed through external code."""
43 return f'{self.size}:{int(self.executable)}:{self}'
44
45 @classmethod
46 def forPath(cls, fileStoreID: str, filePath: str) -> 'FileID':
47 executable = os.stat(filePath).st_mode & stat.S_IXUSR != 0
48 return cls(fileStoreID, os.stat(filePath).st_size, executable)
49
50 @classmethod
51 def unpack(cls, packedFileStoreID: str) -> 'FileID':
52 """Unpack the result of pack() into a FileID object."""
53 # Only separate twice in case the FileID itself has colons in it
54 vals = packedFileStoreID.split(':', 2)
55 # Break up the packed value
56 size = int(vals[0])
57 executable = bool(vals[1])
58 value = vals[2]
59 # Create the FileID
60 return cls(value, size, executable)
61
[end of src/toil/fileStores/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/toil/fileStores/__init__.py b/src/toil/fileStores/__init__.py
--- a/src/toil/fileStores/__init__.py
+++ b/src/toil/fileStores/__init__.py
@@ -40,7 +40,7 @@
def pack(self) -> str:
"""Pack the FileID into a string so it can be passed through external code."""
- return f'{self.size}:{int(self.executable)}:{self}'
+ return f'{self.size}:{"1" if self.executable else "0"}:{self}'
@classmethod
def forPath(cls, fileStoreID: str, filePath: str) -> 'FileID':
@@ -54,7 +54,7 @@
vals = packedFileStoreID.split(':', 2)
# Break up the packed value
size = int(vals[0])
- executable = bool(vals[1])
+ executable = (vals[1] == "1")
value = vals[2]
# Create the FileID
return cls(value, size, executable)
| {"golden_diff": "diff --git a/src/toil/fileStores/__init__.py b/src/toil/fileStores/__init__.py\n--- a/src/toil/fileStores/__init__.py\n+++ b/src/toil/fileStores/__init__.py\n@@ -40,7 +40,7 @@\n \n def pack(self) -> str:\n \"\"\"Pack the FileID into a string so it can be passed through external code.\"\"\"\n- return f'{self.size}:{int(self.executable)}:{self}'\n+ return f'{self.size}:{\"1\" if self.executable else \"0\"}:{self}'\n \n @classmethod\n def forPath(cls, fileStoreID: str, filePath: str) -> 'FileID':\n@@ -54,7 +54,7 @@\n vals = packedFileStoreID.split(':', 2)\n # Break up the packed value\n size = int(vals[0])\n- executable = bool(vals[1])\n+ executable = (vals[1] == \"1\")\n value = vals[2]\n # Create the FileID\n return cls(value, size, executable)\n", "issue": "Toil is marking its inputs executable\nI think that Toil is marking input files from the local disk executable. It is resulting in weird changes to the file permissions in my Git tree when I run Toil tests.\n\n\n\n\u2506Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1462)\n\u2506Issue Number: TOIL-1462\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport stat\nfrom typing import Any\n\n\nclass FileID(str):\n \"\"\"\n A small wrapper around Python's builtin string class.\n\n It is used to represent a file's ID in the file store, and has a size attribute\n that is the file's size in bytes. This object is returned by importFile and\n writeGlobalFile.\n\n Calls into the file store can use bare strings; size will be queried from\n the job store if unavailable in the ID.\n \"\"\"\n\n def __new__(cls, fileStoreID: str, *args: Any) -> 'FileID':\n return super().__new__(cls, fileStoreID)\n\n def __init__(self, fileStoreID: str, size: int, executable: bool = False) -> None:\n # Don't pass an argument to parent class's __init__.\n # In Python 3 we can have super(FileID, self) hand us object's __init__ which chokes on any arguments.\n super().__init__()\n self.size = size\n self.executable = executable\n\n def pack(self) -> str:\n \"\"\"Pack the FileID into a string so it can be passed through external code.\"\"\"\n return f'{self.size}:{int(self.executable)}:{self}'\n\n @classmethod\n def forPath(cls, fileStoreID: str, filePath: str) -> 'FileID':\n executable = os.stat(filePath).st_mode & stat.S_IXUSR != 0\n return cls(fileStoreID, os.stat(filePath).st_size, executable)\n\n @classmethod\n def unpack(cls, packedFileStoreID: str) -> 'FileID':\n \"\"\"Unpack the result of pack() into a FileID object.\"\"\"\n # Only separate twice in case the FileID itself has colons in it\n vals = packedFileStoreID.split(':', 2)\n # Break up the packed value\n size = int(vals[0])\n executable = bool(vals[1])\n value = vals[2]\n # Create the FileID\n return cls(value, size, executable)\n", "path": "src/toil/fileStores/__init__.py"}]} | 1,335 | 237 |
gh_patches_debug_54 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-585 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
v0.43.0 requires pydantic, but is marked optional
Attempting to start a very simple server results in:
```
$ strawberry server app
Traceback (most recent call last):
File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/bin/strawberry", line 5, in <module>
from strawberry.cli import run
File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/__init__.py", line 1, in <module>
from . import experimental, federation
File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/__init__.py", line 1, in <module>
from . import pydantic
File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/pydantic/__init__.py", line 1, in <module>
from .error_type import error_type
File "/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/pydantic/error_type.py", line 4, in <module>
from pydantic import BaseModel
ModuleNotFoundError: No module named 'pydantic'
```
Even though pyproject.toml has pydantic marked as optional: https://github.com/strawberry-graphql/strawberry/blob/master/pyproject.toml#L37
app.py is:
```
import asyncio
import strawberry
async def resolve_hello(root, info, name: str) -> str:
await asyncio.sleep(1)
return f"Hello {name}"
@strawberry.type
class Query:
hello: str = strawberry.field(resolver=resolve_hello)
schema = strawberry.Schema(query=Query)
```
</issue>
<code>
[start of strawberry/experimental/__init__.py]
1 from . import pydantic
2
3
4 __all__ = ["pydantic"]
5
[end of strawberry/experimental/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/experimental/__init__.py b/strawberry/experimental/__init__.py
--- a/strawberry/experimental/__init__.py
+++ b/strawberry/experimental/__init__.py
@@ -1,4 +1,6 @@
-from . import pydantic
-
-
-__all__ = ["pydantic"]
+try:
+ from . import pydantic
+except ImportError:
+ pass
+else:
+ __all__ = ["pydantic"]
| {"golden_diff": "diff --git a/strawberry/experimental/__init__.py b/strawberry/experimental/__init__.py\n--- a/strawberry/experimental/__init__.py\n+++ b/strawberry/experimental/__init__.py\n@@ -1,4 +1,6 @@\n-from . import pydantic\n-\n-\n-__all__ = [\"pydantic\"]\n+try:\n+ from . import pydantic\n+except ImportError:\n+ pass\n+else:\n+ __all__ = [\"pydantic\"]\n", "issue": "v0.43.0 requires pydantic, but is marked optional\nAttempting to start a very simple server results in:\r\n\r\n```\r\n$ strawberry server app\r\nTraceback (most recent call last):\r\n File \"/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/bin/strawberry\", line 5, in <module>\r\n from strawberry.cli import run\r\n File \"/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/__init__.py\", line 1, in <module>\r\n from . import experimental, federation\r\n File \"/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/__init__.py\", line 1, in <module>\r\n from . import pydantic\r\n File \"/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/pydantic/__init__.py\", line 1, in <module>\r\n from .error_type import error_type\r\n File \"/home/ossareh/.local/share/virtualenvs/vega-IIh8dIOy/lib/python3.9/site-packages/strawberry/experimental/pydantic/error_type.py\", line 4, in <module>\r\n from pydantic import BaseModel\r\nModuleNotFoundError: No module named 'pydantic'\r\n```\r\n\r\nEven though pyproject.toml has pydantic marked as optional: https://github.com/strawberry-graphql/strawberry/blob/master/pyproject.toml#L37\r\n\r\napp.py is:\r\n\r\n```\r\nimport asyncio\r\n\r\nimport strawberry\r\n\r\n\r\nasync def resolve_hello(root, info, name: str) -> str:\r\n await asyncio.sleep(1)\r\n return f\"Hello {name}\"\r\n\r\n\r\[email protected]\r\nclass Query:\r\n hello: str = strawberry.field(resolver=resolve_hello)\r\n\r\n\r\nschema = strawberry.Schema(query=Query)\r\n```\n", "before_files": [{"content": "from . import pydantic\n\n\n__all__ = [\"pydantic\"]\n", "path": "strawberry/experimental/__init__.py"}]} | 1,013 | 116 |
gh_patches_debug_11265 | rasdani/github-patches | git_diff | kubeflow__pipelines-1936 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handling of boolean input types
I have a pipeline that has a boolean parameter:
```
def test(
param: dsl.PipelineParam = dsl.PipelineParam(name='param', value=True, param_type=TypeMeta('bool')),
) -> None:
pass
```
When starting a new pipeline via the UI, the input field is a textbox, prefilled with the input string 'True'. Given that the input value is evaluated using `bool(value)`, the only way to start the pipeline with a `False` value is to leave the input textbox empty.
It seems that currently the pipeline does not gracefully handle boolean parameters?
</issue>
<code>
[start of sdk/python/kfp/components/_data_passing.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 __all__ = [
16 'type_to_type_name',
17 'type_name_to_type',
18 'type_to_deserializer',
19 'type_name_to_deserializer',
20 'type_name_to_serializer',
21 ]
22
23
24 import inspect
25 from typing import Any, Callable, NamedTuple, Sequence
26 import warnings
27
28
29 Converter = NamedTuple('Converter', [
30 ('types', Sequence[str]),
31 ('type_names', Sequence[str]),
32 ('serializer', Callable[[Any], str]),
33 ('deserializer_code', str),
34 ('definitions', str),
35 ])
36
37
38 _converters = [
39 Converter([str], ['String', 'str'], str, 'str', None),
40 Converter([int], ['Integer', 'int'], str, 'int', None),
41 Converter([float], ['Float', 'float'], str, 'float', None),
42 ]
43
44
45 type_to_type_name = {typ: converter.type_names[0] for converter in _converters for typ in converter.types}
46 type_name_to_type = {type_name: converter.types[0] for converter in _converters for type_name in converter.type_names if converter.types}
47 type_to_deserializer = {typ: (converter.deserializer_code, converter.definitions) for converter in _converters for typ in converter.types}
48 type_name_to_deserializer = {type_name: (converter.deserializer_code, converter.definitions) for converter in _converters for type_name in converter.type_names}
49 type_name_to_serializer = {type_name: converter.serializer for converter in _converters for type_name in converter.type_names}
50
51
52 def serialize_value(value, type_name: str) -> str:
53 '''serialize_value converts the passed value to string based on the serializer associated with the passed type_name'''
54 if isinstance(value, str):
55 return value # The value is supposedly already serialized
56
57 if type_name is None:
58 type_name = type_to_type_name.get(type(value), type(value).__name__)
59 warnings.warn('Missing type name was inferred as "{}" based on the value "{}".'.format(type_name, str(value)))
60
61 serializer = type_name_to_serializer.get(type_name, None)
62 if serializer:
63 try:
64 return serializer(value)
65 except Exception as e:
66 raise ValueError('Failed to serialize the value "{}" of type "{}" to type "{}". Exception: {}'.format(
67 str(value),
68 str(type(value).__name__),
69 str(type_name),
70 str(e),
71 ))
72
73 serialized_value = str(value)
74 warnings.warn('There are no registered serializers from type "{}" to type "{}", so the value will be serializers as string "{}".'.format(
75 str(type(value).__name__),
76 str(type_name),
77 serialized_value),
78 )
79 return serialized_value
80
[end of sdk/python/kfp/components/_data_passing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/kfp/components/_data_passing.py b/sdk/python/kfp/components/_data_passing.py
--- a/sdk/python/kfp/components/_data_passing.py
+++ b/sdk/python/kfp/components/_data_passing.py
@@ -35,10 +35,20 @@
])
+def _deserialize_bool(s) -> bool:
+ from distutils.util import strtobool
+ return strtobool(s) == 1
+
+
+_bool_deserializer_definitions = inspect.getsource(_deserialize_bool)
+_bool_deserializer_code = _deserialize_bool.__name__
+
+
_converters = [
Converter([str], ['String', 'str'], str, 'str', None),
Converter([int], ['Integer', 'int'], str, 'int', None),
Converter([float], ['Float', 'float'], str, 'float', None),
+ Converter([bool], ['Boolean', 'bool'], str, _bool_deserializer_code, _bool_deserializer_definitions),
]
| {"golden_diff": "diff --git a/sdk/python/kfp/components/_data_passing.py b/sdk/python/kfp/components/_data_passing.py\n--- a/sdk/python/kfp/components/_data_passing.py\n+++ b/sdk/python/kfp/components/_data_passing.py\n@@ -35,10 +35,20 @@\n ])\n \n \n+def _deserialize_bool(s) -> bool:\n+ from distutils.util import strtobool\n+ return strtobool(s) == 1\n+\n+\n+_bool_deserializer_definitions = inspect.getsource(_deserialize_bool)\n+_bool_deserializer_code = _deserialize_bool.__name__\n+\n+\n _converters = [\n Converter([str], ['String', 'str'], str, 'str', None),\n Converter([int], ['Integer', 'int'], str, 'int', None),\n Converter([float], ['Float', 'float'], str, 'float', None),\n+ Converter([bool], ['Boolean', 'bool'], str, _bool_deserializer_code, _bool_deserializer_definitions),\n ]\n", "issue": "Handling of boolean input types\nI have a pipeline that has a boolean parameter:\r\n\r\n```\r\ndef test(\r\n param: dsl.PipelineParam = dsl.PipelineParam(name='param', value=True, param_type=TypeMeta('bool')),\r\n) -> None:\r\n pass\r\n```\r\n\r\nWhen starting a new pipeline via the UI, the input field is a textbox, prefilled with the input string 'True'. Given that the input value is evaluated using `bool(value)`, the only way to start the pipeline with a `False` value is to leave the input textbox empty.\r\n\r\nIt seems that currently the pipeline does not gracefully handle boolean parameters?\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n 'type_to_type_name',\n 'type_name_to_type',\n 'type_to_deserializer',\n 'type_name_to_deserializer',\n 'type_name_to_serializer',\n]\n\n\nimport inspect\nfrom typing import Any, Callable, NamedTuple, Sequence\nimport warnings\n\n\nConverter = NamedTuple('Converter', [\n ('types', Sequence[str]),\n ('type_names', Sequence[str]),\n ('serializer', Callable[[Any], str]),\n ('deserializer_code', str),\n ('definitions', str),\n])\n\n\n_converters = [\n Converter([str], ['String', 'str'], str, 'str', None),\n Converter([int], ['Integer', 'int'], str, 'int', None),\n Converter([float], ['Float', 'float'], str, 'float', None),\n]\n\n\ntype_to_type_name = {typ: converter.type_names[0] for converter in _converters for typ in converter.types}\ntype_name_to_type = {type_name: converter.types[0] for converter in _converters for type_name in converter.type_names if converter.types}\ntype_to_deserializer = {typ: (converter.deserializer_code, converter.definitions) for converter in _converters for typ in converter.types}\ntype_name_to_deserializer = {type_name: (converter.deserializer_code, converter.definitions) for converter in _converters for type_name in converter.type_names}\ntype_name_to_serializer = {type_name: converter.serializer for converter in _converters for type_name in converter.type_names}\n\n\ndef serialize_value(value, type_name: str) -> str:\n '''serialize_value converts the passed value to string based on the serializer associated with the passed type_name'''\n if isinstance(value, str):\n return value # The value is supposedly already serialized\n\n if type_name is None:\n type_name = type_to_type_name.get(type(value), type(value).__name__)\n warnings.warn('Missing type name was inferred as \"{}\" based on the value \"{}\".'.format(type_name, str(value)))\n\n serializer = type_name_to_serializer.get(type_name, None)\n if serializer:\n try:\n return serializer(value)\n except Exception as e:\n raise ValueError('Failed to serialize the value \"{}\" of type \"{}\" to type \"{}\". Exception: {}'.format(\n str(value),\n str(type(value).__name__),\n str(type_name),\n str(e),\n ))\n\n serialized_value = str(value)\n warnings.warn('There are no registered serializers from type \"{}\" to type \"{}\", so the value will be serializers as string \"{}\".'.format(\n str(type(value).__name__),\n str(type_name),\n serialized_value),\n )\n return serialized_value\n", "path": "sdk/python/kfp/components/_data_passing.py"}]} | 1,528 | 217 |
gh_patches_debug_13531 | rasdani/github-patches | git_diff | mkdocs__mkdocs-417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Publish personal GitHub pages
I've switched over to using mkdocs for [my personal page](http://jiahao.github.io) and am really liking the output.
I'd like to request that `gh-deploy` be extended to support personal GitHub pages also. Unlike project pages, personal pages must have repository names `<username>.github.io`, and the project is published to the `master` branch instead of `gh-pages`. Everything works fine if I do it manually so I presume supporting personal pages automatically should not be too difficult.
Edit: I have a hacky Makefile that emulates `gh-deploy` (assumes that the markdown source is on a branch called `mkdocs` and deploys to `master`):
``` make
site:
git checkout mkdocs
mkdocs build
git checkout master
cp -R site/* .
rm -rf site
echo Ready to commit and push
```
</issue>
<code>
[start of mkdocs/gh_deploy.py]
1 from __future__ import print_function
2 import subprocess
3 import os
4
5
6 def gh_deploy(config):
7 if not os.path.exists('.git'):
8 print('Cannot deploy - this directory does not appear to be a git repository')
9 return
10
11 print("Copying '%s' to `gh-pages` branch and pushing to GitHub." % config['site_dir'])
12 try:
13 subprocess.check_call(['ghp-import', '-p', config['site_dir']])
14 except:
15 return
16
17 # Does this repository have a CNAME set for GitHub pages?
18 if os.path.isfile('CNAME'):
19 # This GitHub pages repository has a CNAME configured.
20 with(open('CNAME', 'r')) as f:
21 cname_host = f.read().strip()
22 print('Based on your CNAME file, your documentation should be available shortly at: http://%s' % cname_host)
23 print('NOTE: Your DNS records must be configured appropriately for your CNAME URL to work.')
24 return
25
26 # No CNAME found. We will use the origin URL to determine the GitHub
27 # pages location.
28 url = subprocess.check_output(["git", "config", "--get", "remote.origin.url"])
29 url = url.decode('utf-8').strip()
30
31 host = None
32 path = None
33 if 'github.com/' in url:
34 host, path = url.split('github.com/', 1)
35 elif 'github.com:' in url:
36 host, path = url.split('github.com:', 1)
37
38 if host is None:
39 # This could be a GitHub Enterprise deployment.
40 print('Your documentation should be available shortly.')
41 else:
42 username, repo = path.split('/', 1)
43 if repo.endswith('.git'):
44 repo = repo[:-len('.git')]
45 url = 'http://%s.github.io/%s' % (username, repo)
46 print('Your documentation should shortly be available at: ' + url)
47
[end of mkdocs/gh_deploy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/gh_deploy.py b/mkdocs/gh_deploy.py
--- a/mkdocs/gh_deploy.py
+++ b/mkdocs/gh_deploy.py
@@ -4,13 +4,17 @@
def gh_deploy(config):
+
if not os.path.exists('.git'):
print('Cannot deploy - this directory does not appear to be a git repository')
return
print("Copying '%s' to `gh-pages` branch and pushing to GitHub." % config['site_dir'])
try:
- subprocess.check_call(['ghp-import', '-p', config['site_dir']])
+ command = ['ghp-import', '-p', config['site_dir']]
+ if 'remote_branch' in config:
+ command.extend(['-b', config['remote_branch']])
+ subprocess.check_call(command)
except:
return
| {"golden_diff": "diff --git a/mkdocs/gh_deploy.py b/mkdocs/gh_deploy.py\n--- a/mkdocs/gh_deploy.py\n+++ b/mkdocs/gh_deploy.py\n@@ -4,13 +4,17 @@\n \n \n def gh_deploy(config):\n+\n if not os.path.exists('.git'):\n print('Cannot deploy - this directory does not appear to be a git repository')\n return\n \n print(\"Copying '%s' to `gh-pages` branch and pushing to GitHub.\" % config['site_dir'])\n try:\n- subprocess.check_call(['ghp-import', '-p', config['site_dir']])\n+ command = ['ghp-import', '-p', config['site_dir']]\n+ if 'remote_branch' in config:\n+ command.extend(['-b', config['remote_branch']])\n+ subprocess.check_call(command)\n except:\n return\n", "issue": "Publish personal GitHub pages\nI've switched over to using mkdocs for [my personal page](http://jiahao.github.io) and am really liking the output.\n\nI'd like to request that `gh-deploy` be extended to support personal GitHub pages also. Unlike project pages, personal pages must have repository names `<username>.github.io`, and the project is published to the `master` branch instead of `gh-pages`. Everything works fine if I do it manually so I presume supporting personal pages automatically should not be too difficult.\n\nEdit: I have a hacky Makefile that emulates `gh-deploy` (assumes that the markdown source is on a branch called `mkdocs` and deploys to `master`):\n\n``` make\nsite:\n git checkout mkdocs\n mkdocs build\n git checkout master\n cp -R site/* .\n rm -rf site\n echo Ready to commit and push\n```\n\n", "before_files": [{"content": "from __future__ import print_function\nimport subprocess\nimport os\n\n\ndef gh_deploy(config):\n if not os.path.exists('.git'):\n print('Cannot deploy - this directory does not appear to be a git repository')\n return\n\n print(\"Copying '%s' to `gh-pages` branch and pushing to GitHub.\" % config['site_dir'])\n try:\n subprocess.check_call(['ghp-import', '-p', config['site_dir']])\n except:\n return\n\n # Does this repository have a CNAME set for GitHub pages?\n if os.path.isfile('CNAME'):\n # This GitHub pages repository has a CNAME configured.\n with(open('CNAME', 'r')) as f:\n cname_host = f.read().strip()\n print('Based on your CNAME file, your documentation should be available shortly at: http://%s' % cname_host)\n print('NOTE: Your DNS records must be configured appropriately for your CNAME URL to work.')\n return\n\n # No CNAME found. We will use the origin URL to determine the GitHub\n # pages location.\n url = subprocess.check_output([\"git\", \"config\", \"--get\", \"remote.origin.url\"])\n url = url.decode('utf-8').strip()\n\n host = None\n path = None\n if 'github.com/' in url:\n host, path = url.split('github.com/', 1)\n elif 'github.com:' in url:\n host, path = url.split('github.com:', 1)\n\n if host is None:\n # This could be a GitHub Enterprise deployment.\n print('Your documentation should be available shortly.')\n else:\n username, repo = path.split('/', 1)\n if repo.endswith('.git'):\n repo = repo[:-len('.git')]\n url = 'http://%s.github.io/%s' % (username, repo)\n print('Your documentation should shortly be available at: ' + url)\n", "path": "mkdocs/gh_deploy.py"}]} | 1,228 | 187 |
gh_patches_debug_37248 | rasdani/github-patches | git_diff | ipython__ipython-14256 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deprecate IPython.utils.tz
It is not used in IPython itself, and uses functions that are marked for removal in Python
</issue>
<code>
[start of IPython/utils/tz.py]
1 # encoding: utf-8
2 """
3 Timezone utilities
4
5 Just UTC-awareness right now
6 """
7
8 #-----------------------------------------------------------------------------
9 # Copyright (C) 2013 The IPython Development Team
10 #
11 # Distributed under the terms of the BSD License. The full license is in
12 # the file COPYING, distributed as part of this software.
13 #-----------------------------------------------------------------------------
14
15 #-----------------------------------------------------------------------------
16 # Imports
17 #-----------------------------------------------------------------------------
18
19 from datetime import tzinfo, timedelta, datetime
20
21 #-----------------------------------------------------------------------------
22 # Code
23 #-----------------------------------------------------------------------------
24 # constant for zero offset
25 ZERO = timedelta(0)
26
27 class tzUTC(tzinfo):
28 """tzinfo object for UTC (zero offset)"""
29
30 def utcoffset(self, d):
31 return ZERO
32
33 def dst(self, d):
34 return ZERO
35
36
37 UTC = tzUTC() # type: ignore[abstract]
38
39
40 def utc_aware(unaware):
41 """decorator for adding UTC tzinfo to datetime's utcfoo methods"""
42 def utc_method(*args, **kwargs):
43 dt = unaware(*args, **kwargs)
44 return dt.replace(tzinfo=UTC)
45 return utc_method
46
47 utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
48 utcnow = utc_aware(datetime.utcnow)
49
[end of IPython/utils/tz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/utils/tz.py b/IPython/utils/tz.py
--- a/IPython/utils/tz.py
+++ b/IPython/utils/tz.py
@@ -3,29 +3,56 @@
Timezone utilities
Just UTC-awareness right now
+
+Deprecated since IPython 8.19.0.
"""
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Imports
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
+import warnings
from datetime import tzinfo, timedelta, datetime
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
# Code
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
+__all__ = ["tzUTC", "utc_aware", "utcfromtimestamp", "utcnow"]
+
+
# constant for zero offset
ZERO = timedelta(0)
+
+def __getattr__(name):
+ if name not in __all__:
+ err = f"IPython.utils.tz is deprecated and has no attribute {name}"
+ raise AttributeError(err)
+
+ _warn_deprecated()
+
+ return getattr(name)
+
+
+def _warn_deprecated():
+ msg = "The module `IPython.utils.tz` is deprecated and will be completely removed."
+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
+
+
class tzUTC(tzinfo):
- """tzinfo object for UTC (zero offset)"""
+ """tzinfo object for UTC (zero offset)
+
+ Deprecated since IPython 8.19.0.
+ """
+
+ _warn_deprecated()
def utcoffset(self, d):
return ZERO
@@ -38,11 +65,18 @@
def utc_aware(unaware):
- """decorator for adding UTC tzinfo to datetime's utcfoo methods"""
+ """decorator for adding UTC tzinfo to datetime's utcfoo methods
+
+ Deprecated since IPython 8.19.0.
+ """
+
def utc_method(*args, **kwargs):
+ _warn_deprecated()
dt = unaware(*args, **kwargs)
return dt.replace(tzinfo=UTC)
+
return utc_method
+
utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
utcnow = utc_aware(datetime.utcnow)
| {"golden_diff": "diff --git a/IPython/utils/tz.py b/IPython/utils/tz.py\n--- a/IPython/utils/tz.py\n+++ b/IPython/utils/tz.py\n@@ -3,29 +3,56 @@\n Timezone utilities\n \n Just UTC-awareness right now\n+\n+Deprecated since IPython 8.19.0.\n \"\"\"\n \n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n # Copyright (C) 2013 The IPython Development Team\n #\n # Distributed under the terms of the BSD License. The full license is in\n # the file COPYING, distributed as part of this software.\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n \n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n # Imports\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n \n+import warnings\n from datetime import tzinfo, timedelta, datetime\n \n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n # Code\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n+__all__ = [\"tzUTC\", \"utc_aware\", \"utcfromtimestamp\", \"utcnow\"]\n+\n+\n # constant for zero offset\n ZERO = timedelta(0)\n \n+\n+def __getattr__(name):\n+ if name not in __all__:\n+ err = f\"IPython.utils.tz is deprecated and has no attribute {name}\"\n+ raise AttributeError(err)\n+\n+ _warn_deprecated()\n+\n+ return getattr(name)\n+\n+\n+def _warn_deprecated():\n+ msg = \"The module `IPython.utils.tz` is deprecated and will be completely removed.\"\n+ warnings.warn(msg, category=DeprecationWarning, stacklevel=2)\n+\n+\n class tzUTC(tzinfo):\n- \"\"\"tzinfo object for UTC (zero offset)\"\"\"\n+ \"\"\"tzinfo object for UTC (zero offset)\n+\n+ Deprecated since IPython 8.19.0.\n+ \"\"\"\n+\n+ _warn_deprecated()\n \n def utcoffset(self, d):\n return ZERO\n@@ -38,11 +65,18 @@\n \n \n def utc_aware(unaware):\n- \"\"\"decorator for adding UTC tzinfo to datetime's utcfoo methods\"\"\"\n+ \"\"\"decorator for adding UTC tzinfo to datetime's utcfoo methods\n+\n+ Deprecated since IPython 8.19.0.\n+ \"\"\"\n+\n def utc_method(*args, **kwargs):\n+ _warn_deprecated()\n dt = unaware(*args, **kwargs)\n return dt.replace(tzinfo=UTC)\n+\n return utc_method\n \n+\n utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)\n utcnow = utc_aware(datetime.utcnow)\n", "issue": "deprecate IPython.utils.tz\nIt is not used in IPython itself, and uses functions that are marked for removal in Python\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nTimezone utilities\n\nJust UTC-awareness right now\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2013 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom datetime import tzinfo, timedelta, datetime\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n# constant for zero offset\nZERO = timedelta(0)\n\nclass tzUTC(tzinfo):\n \"\"\"tzinfo object for UTC (zero offset)\"\"\"\n\n def utcoffset(self, d):\n return ZERO\n\n def dst(self, d):\n return ZERO\n\n\nUTC = tzUTC() # type: ignore[abstract]\n\n\ndef utc_aware(unaware):\n \"\"\"decorator for adding UTC tzinfo to datetime's utcfoo methods\"\"\"\n def utc_method(*args, **kwargs):\n dt = unaware(*args, **kwargs)\n return dt.replace(tzinfo=UTC)\n return utc_method\n\nutcfromtimestamp = utc_aware(datetime.utcfromtimestamp)\nutcnow = utc_aware(datetime.utcnow)\n", "path": "IPython/utils/tz.py"}]} | 919 | 537 |
gh_patches_debug_8314 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CKV_AWS_358] - false alarm triggered even when GH Organisation is specified
**Describe the issue**
I guess there is false alarm triggered by this check.
The GitHub Organisation is already included in the values of condition for the `token.actions.githubusercontent.com:sub` variable.
**Examples**
```terraform
data "aws_iam_policy_document" "assumerole" {
statement {
sid = "DefaultAssumeRole"
effect = "Allow"
actions = [
"sts:AssumeRoleWithWebIdentity",
]
condition {
test = "StringEquals"
variable = "token.actions.githubusercontent.com:aud"
values = [
"sts.amazonaws.com"
]
}
condition {
test = "StringLike"
variable = "token.actions.githubusercontent.com:sub"
values = [
"repo:GitHub-Organization/${var.github_repo}:*"
]
}
principals {
type = "Federated"
identifiers = [
"arn:aws:iam::${var.aws_account_id}:oidc-provider/token.actions.githubusercontent.com"
]
}
}
}
```
Checkov output:
```sh
Check: CKV_AWS_358: "Ensure GitHub Actions OIDC trust policies only allows actions from a specific known organization"
FAILED for resource: module.ecr_repo.aws_iam_policy_document.assumerole
File: /../../modules/ecr-repository/data_aws_iam_policy_document_assumerole.tf:1-35
Calling File: /module_ecr_repository.tf:2-24
```
**Version:**
- Checkov Version [2.3.359]
**Additional context**
See https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services
</issue>
<code>
[start of checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py]
1 from typing import Dict, List, Any
2 import re
3 from checkov.common.models.enums import CheckResult, CheckCategories
4 from checkov.common.util.type_forcers import force_list
5 from checkov.terraform.checks.data.base_check import BaseDataCheck
6
7 gh_repo_regex = re.compile(r'repo:[^/]+/[^/]+')
8
9
10 class GithubActionsOIDCTrustPolicy(BaseDataCheck):
11 def __init__(self):
12 name = 'Ensure GitHub Actions OIDC trust policies only allows actions from a specific known organization'
13 id = "CKV_AWS_358"
14 supported_data = ("aws_iam_policy_document",)
15 categories = [CheckCategories.IAM]
16 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
17
18 def scan_data_conf(self, conf: Dict[str, List[Any]], entity_type: str) -> CheckResult:
19 statements = force_list(conf.get('statement'))
20 for statement in statements:
21 found_federated_gh_oidc = False
22 if isinstance(statement, dict):
23 if statement.get('principals'):
24 principals = statement['principals']
25 for principal in force_list(principals):
26 if 'type' not in principal and 'identifiers' not in principal:
27 continue
28 principal_type = principal['type']
29 principal_identifiers = principal['identifiers']
30 if isinstance(principal_type, list) and len(
31 principal_type) and 'Federated' in principal_type and isinstance(principal_identifiers,
32 list):
33 for identifier in principal_identifiers:
34 if isinstance(identifier,
35 list) and 'oidc-provider/token.actions.githubusercontent.com' in \
36 identifier[0]:
37 found_federated_gh_oidc = True
38 break
39 if not found_federated_gh_oidc:
40 return CheckResult.PASSED
41 if found_federated_gh_oidc and not statement.get('condition'):
42 return CheckResult.FAILED
43 found_sub_condition_variable = False
44 found_sub_condition_value = False
45 for condition in statement.get('condition'):
46 condition_variables = condition.get('variable')
47 condition_values = condition.get('values')
48 if isinstance(condition_variables, list):
49 for condition_variable in condition_variables:
50 if condition_variable == 'token.actions.githubusercontent.com:sub':
51 found_sub_condition_variable = True
52 break
53 for condition_value in condition_values:
54 if isinstance(condition_value, list) and gh_repo_regex.search(condition_value[0]):
55 found_sub_condition_value = True
56 break
57 if found_sub_condition_value and found_sub_condition_variable:
58 return CheckResult.PASSED
59 else:
60 return CheckResult.FAILED
61
62 return CheckResult.PASSED
63
64
65 check = GithubActionsOIDCTrustPolicy()
66
[end of checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py b/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py
--- a/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py
+++ b/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py
@@ -56,8 +56,9 @@
break
if found_sub_condition_value and found_sub_condition_variable:
return CheckResult.PASSED
- else:
- return CheckResult.FAILED
+
+ # Found a federated GitHub user, but no restirctions
+ return CheckResult.FAILED
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py b/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py\n--- a/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py\n+++ b/checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py\n@@ -56,8 +56,9 @@\n break\n if found_sub_condition_value and found_sub_condition_variable:\n return CheckResult.PASSED\n- else:\n- return CheckResult.FAILED\n+\n+ # Found a federated GitHub user, but no restirctions\n+ return CheckResult.FAILED\n \n return CheckResult.PASSED\n", "issue": "[CKV_AWS_358] - false alarm triggered even when GH Organisation is specified\n**Describe the issue**\r\n\r\nI guess there is false alarm triggered by this check.\r\nThe GitHub Organisation is already included in the values of condition for the `token.actions.githubusercontent.com:sub` variable.\r\n\r\n**Examples**\r\n```terraform\r\ndata \"aws_iam_policy_document\" \"assumerole\" {\r\n statement {\r\n sid = \"DefaultAssumeRole\"\r\n effect = \"Allow\"\r\n\r\n actions = [\r\n \"sts:AssumeRoleWithWebIdentity\",\r\n ]\r\n\r\n condition {\r\n test = \"StringEquals\"\r\n variable = \"token.actions.githubusercontent.com:aud\"\r\n\r\n values = [\r\n \"sts.amazonaws.com\"\r\n ]\r\n }\r\n\r\n condition {\r\n test = \"StringLike\"\r\n variable = \"token.actions.githubusercontent.com:sub\"\r\n\r\n values = [\r\n \"repo:GitHub-Organization/${var.github_repo}:*\"\r\n ]\r\n }\r\n\r\n principals {\r\n type = \"Federated\"\r\n identifiers = [\r\n \"arn:aws:iam::${var.aws_account_id}:oidc-provider/token.actions.githubusercontent.com\"\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n\r\nCheckov output:\r\n```sh\r\nCheck: CKV_AWS_358: \"Ensure GitHub Actions OIDC trust policies only allows actions from a specific known organization\"\r\n\tFAILED for resource: module.ecr_repo.aws_iam_policy_document.assumerole\r\n\tFile: /../../modules/ecr-repository/data_aws_iam_policy_document_assumerole.tf:1-35\r\n\tCalling File: /module_ecr_repository.tf:2-24\r\n```\r\n\r\n**Version:**\r\n - Checkov Version [2.3.359]\r\n\r\n**Additional context**\r\nSee https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/configuring-openid-connect-in-amazon-web-services\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, List, Any\nimport re\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.checks.data.base_check import BaseDataCheck\n\ngh_repo_regex = re.compile(r'repo:[^/]+/[^/]+')\n\n\nclass GithubActionsOIDCTrustPolicy(BaseDataCheck):\n def __init__(self):\n name = 'Ensure GitHub Actions OIDC trust policies only allows actions from a specific known organization'\n id = \"CKV_AWS_358\"\n supported_data = (\"aws_iam_policy_document\",)\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf: Dict[str, List[Any]], entity_type: str) -> CheckResult:\n statements = force_list(conf.get('statement'))\n for statement in statements:\n found_federated_gh_oidc = False\n if isinstance(statement, dict):\n if statement.get('principals'):\n principals = statement['principals']\n for principal in force_list(principals):\n if 'type' not in principal and 'identifiers' not in principal:\n continue\n principal_type = principal['type']\n principal_identifiers = principal['identifiers']\n if isinstance(principal_type, list) and len(\n principal_type) and 'Federated' in principal_type and isinstance(principal_identifiers,\n list):\n for identifier in principal_identifiers:\n if isinstance(identifier,\n list) and 'oidc-provider/token.actions.githubusercontent.com' in \\\n identifier[0]:\n found_federated_gh_oidc = True\n break\n if not found_federated_gh_oidc:\n return CheckResult.PASSED\n if found_federated_gh_oidc and not statement.get('condition'):\n return CheckResult.FAILED\n found_sub_condition_variable = False\n found_sub_condition_value = False\n for condition in statement.get('condition'):\n condition_variables = condition.get('variable')\n condition_values = condition.get('values')\n if isinstance(condition_variables, list):\n for condition_variable in condition_variables:\n if condition_variable == 'token.actions.githubusercontent.com:sub':\n found_sub_condition_variable = True\n break\n for condition_value in condition_values:\n if isinstance(condition_value, list) and gh_repo_regex.search(condition_value[0]):\n found_sub_condition_value = True\n break\n if found_sub_condition_value and found_sub_condition_variable:\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = GithubActionsOIDCTrustPolicy()\n", "path": "checkov/terraform/checks/data/aws/GithubActionsOIDCTrustPolicy.py"}]} | 1,666 | 159 |
gh_patches_debug_32747 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1967 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[gRPC] Does not work with stream-to-stream gRPC requests
**Describe the bug**: I'm trying to add elastic apm for my gRPC service with unary-to-unary requests and stream-to-stream. Everything is fine with unary-to-unary requests. But stream-to-stream requests stop working.
The service stops accepting streaming requests altogether.
I think I saw somewhere that you wrote elastic does not support stream requests for gRPC. But I didn't think it would be a service-blocking problem.
Is there any way to get around it, to make custom processing for stream specifically?
**To Reproduce**
1. Create a gRPC service with stream-to-stream and unary-to-unary endpoints.
```python
class TestService(test_pb2_grpc.TestService):
async def Streaming(self, request_iterator, context):
count_of_request = 0
async for request in request_iterator:
count_of_request += 1
yield test_pb2.Response(message=f"#{count_of_request } - ok")
async def Unary(self, request, context):
return test_pb2.Response(message="ok")
```
3. Connect elastic apm.
```python
elasticapm.instrument()
async def run_serve():
apm_client = GRPCApmClient(
{
"SERVICE_NAME": "grpc-test",
"SERVER_URL": "http://localhost:8200",
"ENVIRONMENT": "test",
"TRANSACTION_SAMPLE_RATE": 1.0,
"SECRET_TOKEN": "qwerty",
}
)
server = grpc.aio.server(
futures.ThreadPoolExecutor(max_workers=10),
)
test_pb2_grpc.add_TestServicer_to_server(
TestService(), server
)
server.add_insecure_port("[::]:50051")
await server.start()
await server.wait_for_termination()
if __name__ == "__main__":
asyncio.run(run_serve())
```
5. Make test requests
6. Result: unary - works, stream - returns nothing, no logs in the service
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.10
- Framework and version: gRPC 1.43.0
- APM Server version: 6.20.0
**Additional context**
Add any other context about the problem here.
- `requirements.txt`:
<details>
<summary>Click to expand</summary>
```
gcloud==0.18.3
google-api-python-client==2.39.0
grpcio-tools==1.43.0
grpcio-health-checking==1.43.0
setuptools==59.5.0
elastic-apm
sentry-sdk[grpcio]==1.31.0
```
</details>
</issue>
<code>
[start of elasticapm/contrib/grpc/async_server_interceptor.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2022, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import inspect
32
33 import grpc
34
35 import elasticapm
36 from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, _wrap_rpc_behavior, get_trace_parent
37
38
39 class _AsyncServerInterceptor(grpc.aio.ServerInterceptor):
40 async def intercept_service(self, continuation, handler_call_details):
41 def transaction_wrapper(behavior, request_streaming, response_streaming):
42 async def _interceptor(request_or_iterator, context):
43 if request_streaming or response_streaming: # only unary-unary is supported
44 return behavior(request_or_iterator, context)
45 tp = get_trace_parent(handler_call_details)
46 client = elasticapm.get_client()
47 transaction = client.begin_transaction("request", trace_parent=tp)
48 try:
49 result = behavior(request_or_iterator, _ServicerContextWrapper(context, transaction))
50
51 # This is so we can support both sync and async rpc functions
52 if inspect.isawaitable(result):
53 result = await result
54
55 if transaction and not transaction.outcome:
56 transaction.set_success()
57 return result
58 except Exception:
59 if transaction:
60 transaction.set_failure()
61 client.capture_exception(handled=False)
62 raise
63 finally:
64 client.end_transaction(name=handler_call_details.method)
65
66 return _interceptor
67
68 return _wrap_rpc_behavior(await continuation(handler_call_details), transaction_wrapper)
69
[end of elasticapm/contrib/grpc/async_server_interceptor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/contrib/grpc/async_server_interceptor.py b/elasticapm/contrib/grpc/async_server_interceptor.py
--- a/elasticapm/contrib/grpc/async_server_interceptor.py
+++ b/elasticapm/contrib/grpc/async_server_interceptor.py
@@ -33,20 +33,18 @@
import grpc
import elasticapm
-from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, _wrap_rpc_behavior, get_trace_parent
+from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, get_trace_parent
class _AsyncServerInterceptor(grpc.aio.ServerInterceptor):
async def intercept_service(self, continuation, handler_call_details):
- def transaction_wrapper(behavior, request_streaming, response_streaming):
- async def _interceptor(request_or_iterator, context):
- if request_streaming or response_streaming: # only unary-unary is supported
- return behavior(request_or_iterator, context)
+ def wrap_unary_unary(behavior):
+ async def _interceptor(request, context):
tp = get_trace_parent(handler_call_details)
client = elasticapm.get_client()
transaction = client.begin_transaction("request", trace_parent=tp)
try:
- result = behavior(request_or_iterator, _ServicerContextWrapper(context, transaction))
+ result = behavior(request, _ServicerContextWrapper(context, transaction))
# This is so we can support both sync and async rpc functions
if inspect.isawaitable(result):
@@ -65,4 +63,12 @@
return _interceptor
- return _wrap_rpc_behavior(await continuation(handler_call_details), transaction_wrapper)
+ handler = await continuation(handler_call_details)
+ if handler.request_streaming or handler.response_streaming:
+ return handler
+
+ return grpc.unary_unary_rpc_method_handler(
+ wrap_unary_unary(handler.unary_unary),
+ request_deserializer=handler.request_deserializer,
+ response_serializer=handler.response_serializer,
+ )
| {"golden_diff": "diff --git a/elasticapm/contrib/grpc/async_server_interceptor.py b/elasticapm/contrib/grpc/async_server_interceptor.py\n--- a/elasticapm/contrib/grpc/async_server_interceptor.py\n+++ b/elasticapm/contrib/grpc/async_server_interceptor.py\n@@ -33,20 +33,18 @@\n import grpc\n \n import elasticapm\n-from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, _wrap_rpc_behavior, get_trace_parent\n+from elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, get_trace_parent\n \n \n class _AsyncServerInterceptor(grpc.aio.ServerInterceptor):\n async def intercept_service(self, continuation, handler_call_details):\n- def transaction_wrapper(behavior, request_streaming, response_streaming):\n- async def _interceptor(request_or_iterator, context):\n- if request_streaming or response_streaming: # only unary-unary is supported\n- return behavior(request_or_iterator, context)\n+ def wrap_unary_unary(behavior):\n+ async def _interceptor(request, context):\n tp = get_trace_parent(handler_call_details)\n client = elasticapm.get_client()\n transaction = client.begin_transaction(\"request\", trace_parent=tp)\n try:\n- result = behavior(request_or_iterator, _ServicerContextWrapper(context, transaction))\n+ result = behavior(request, _ServicerContextWrapper(context, transaction))\n \n # This is so we can support both sync and async rpc functions\n if inspect.isawaitable(result):\n@@ -65,4 +63,12 @@\n \n return _interceptor\n \n- return _wrap_rpc_behavior(await continuation(handler_call_details), transaction_wrapper)\n+ handler = await continuation(handler_call_details)\n+ if handler.request_streaming or handler.response_streaming:\n+ return handler\n+\n+ return grpc.unary_unary_rpc_method_handler(\n+ wrap_unary_unary(handler.unary_unary),\n+ request_deserializer=handler.request_deserializer,\n+ response_serializer=handler.response_serializer,\n+ )\n", "issue": "[gRPC] Does not work with stream-to-stream gRPC requests\n**Describe the bug**: I'm trying to add elastic apm for my gRPC service with unary-to-unary requests and stream-to-stream. Everything is fine with unary-to-unary requests. But stream-to-stream requests stop working.\r\n\r\nThe service stops accepting streaming requests altogether.\r\n\r\nI think I saw somewhere that you wrote elastic does not support stream requests for gRPC. But I didn't think it would be a service-blocking problem. \r\n\r\nIs there any way to get around it, to make custom processing for stream specifically?\r\n\r\n**To Reproduce**\r\n1. Create a gRPC service with stream-to-stream and unary-to-unary endpoints.\r\n```python\r\nclass TestService(test_pb2_grpc.TestService):\r\n async def Streaming(self, request_iterator, context):\r\n count_of_request = 0\r\n async for request in request_iterator:\r\n count_of_request += 1\r\n yield test_pb2.Response(message=f\"#{count_of_request } - ok\")\r\n\r\n async def Unary(self, request, context):\r\n return test_pb2.Response(message=\"ok\")\r\n```\r\n3. Connect elastic apm.\r\n```python\r\nelasticapm.instrument()\r\n\r\nasync def run_serve():\r\n apm_client = GRPCApmClient(\r\n {\r\n \"SERVICE_NAME\": \"grpc-test\",\r\n \"SERVER_URL\": \"http://localhost:8200\",\r\n \"ENVIRONMENT\": \"test\",\r\n \"TRANSACTION_SAMPLE_RATE\": 1.0,\r\n \"SECRET_TOKEN\": \"qwerty\",\r\n }\r\n )\r\n server = grpc.aio.server(\r\n futures.ThreadPoolExecutor(max_workers=10),\r\n )\r\n test_pb2_grpc.add_TestServicer_to_server(\r\n TestService(), server\r\n )\r\n server.add_insecure_port(\"[::]:50051\")\r\n await server.start()\r\n await server.wait_for_termination()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n asyncio.run(run_serve())\r\n```\r\n5. Make test requests\r\n6. Result: unary - works, stream - returns nothing, no logs in the service\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.10\r\n- Framework and version: gRPC 1.43.0\r\n- APM Server version: 6.20.0\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- `requirements.txt`:\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n gcloud==0.18.3\r\n google-api-python-client==2.39.0\r\n grpcio-tools==1.43.0\r\n grpcio-health-checking==1.43.0\r\n setuptools==59.5.0\r\n elastic-apm\r\n sentry-sdk[grpcio]==1.31.0\r\n ```\r\n </details>\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2022, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport inspect\n\nimport grpc\n\nimport elasticapm\nfrom elasticapm.contrib.grpc.server_interceptor import _ServicerContextWrapper, _wrap_rpc_behavior, get_trace_parent\n\n\nclass _AsyncServerInterceptor(grpc.aio.ServerInterceptor):\n async def intercept_service(self, continuation, handler_call_details):\n def transaction_wrapper(behavior, request_streaming, response_streaming):\n async def _interceptor(request_or_iterator, context):\n if request_streaming or response_streaming: # only unary-unary is supported\n return behavior(request_or_iterator, context)\n tp = get_trace_parent(handler_call_details)\n client = elasticapm.get_client()\n transaction = client.begin_transaction(\"request\", trace_parent=tp)\n try:\n result = behavior(request_or_iterator, _ServicerContextWrapper(context, transaction))\n\n # This is so we can support both sync and async rpc functions\n if inspect.isawaitable(result):\n result = await result\n\n if transaction and not transaction.outcome:\n transaction.set_success()\n return result\n except Exception:\n if transaction:\n transaction.set_failure()\n client.capture_exception(handled=False)\n raise\n finally:\n client.end_transaction(name=handler_call_details.method)\n\n return _interceptor\n\n return _wrap_rpc_behavior(await continuation(handler_call_details), transaction_wrapper)\n", "path": "elasticapm/contrib/grpc/async_server_interceptor.py"}]} | 1,914 | 447 |
gh_patches_debug_26968 | rasdani/github-patches | git_diff | Pyomo__pyomo-2385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pyomo.environ doesn't initialize when called by cython
See full details on stack overflow:
https://stackoverflow.com/questions/71902579/pyomo-doesnt-load-when-called-via-cython
Also see
https://github.com/Pyomo/pyomo/issues/2374#issuecomment-1115424111_ :
"...(The linked question is probably either a problem with how the pyomo driver script interacts with argparse [it really shouldn't be doing that much when pyomo.environ is imported - that is almost certainly a bug], or with how cython and argparse interact)."
_Originally posted by @jsiirola in https://github.com/Pyomo/pyomo/issues/2374#issuecomment-1115424111_
</issue>
<code>
[start of pyomo/scripting/pyomo_parser.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # ___________________________________________________________________________
11
12 __all__ = ['add_subparser', 'get_parser', 'subparsers']
13
14 import argparse
15 import sys
16
17 #
18 # Sort sub_parser names, since these are inserted throughout Pyomo
19 #
20 # NOTE: This may not be robust to different versions of argparse. We're
21 # mucking with a non-public API here ...
22 #
23 class CustomHelpFormatter(argparse.RawDescriptionHelpFormatter):
24
25 def _metavar_formatter(self, action, default_metavar):
26 if action.metavar is not None:
27 result = action.metavar
28 elif action.choices is not None:
29 choice_strs = sorted(str(choice) for choice in action.choices)
30 result = '{%s}' % ','.join(choice_strs)
31 else:
32 result = default_metavar
33
34 def format(tuple_size):
35 if isinstance(result, tuple):
36 return result
37 else:
38 return (result, ) * tuple_size
39 return format
40
41 def _iter_indented_subactions(self, action):
42 try:
43 get_subactions = action._get_subactions
44 except AttributeError:
45 pass
46 else:
47 self._indent()
48 if isinstance(action, argparse._SubParsersAction):
49 for subaction in sorted(get_subactions(), key=lambda x: x.dest):
50 yield subaction
51 else:
52 for subaction in get_subactions():
53 yield subaction
54 self._dedent()
55
56
57 def get_version():
58 from pyomo.version import version
59 import platform
60 return "Pyomo %s (%s %s on %s %s)" % (
61 version,
62 platform.python_implementation(),
63 '.'.join( str(x) for x in sys.version_info[:3] ),
64 platform.system(),
65 platform.release() )
66
67 #
68 # Create the argparse parser for Pyomo
69 #
70 doc="This is the main driver for the Pyomo optimization software."
71 epilog="""
72 -------------------------------------------------------------------------
73 Pyomo supports a variety of modeling and optimization capabilities,
74 which are executed either as subcommands of 'pyomo' or as separate
75 commands. Use the 'help' subcommand to get information about the
76 capabilities installed with Pyomo. Additionally, each subcommand
77 supports independent command-line options. Use the -h option to
78 print details for a subcommand. For example, type
79
80 pyomo solve -h
81
82 to print information about the `solve` subcommand.
83 """
84 _pyomo_parser = argparse.ArgumentParser(
85 description=doc, epilog=epilog, formatter_class=CustomHelpFormatter )
86 _pyomo_parser.add_argument("--version", action="version", version=get_version())
87 _pyomo_subparsers = _pyomo_parser.add_subparsers(
88 dest='subparser_name', title='subcommands' )
89
90 subparsers = []
91
92 def add_subparser(name, **args):
93 """
94 Add a subparser to the 'pyomo' command.
95 """
96 global subparsers
97 func = args.pop('func', None)
98 parser = _pyomo_subparsers.add_parser(name, **args)
99 subparsers.append(name)
100 if func is not None:
101 parser.set_defaults(func=func)
102 return parser
103
104 def get_parser():
105 """
106 Return the parser used by the 'pyomo' commmand.
107 """
108 return _pyomo_parser
109
110
[end of pyomo/scripting/pyomo_parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/scripting/pyomo_parser.py b/pyomo/scripting/pyomo_parser.py
--- a/pyomo/scripting/pyomo_parser.py
+++ b/pyomo/scripting/pyomo_parser.py
@@ -81,11 +81,8 @@
to print information about the `solve` subcommand.
"""
-_pyomo_parser = argparse.ArgumentParser(
- description=doc, epilog=epilog, formatter_class=CustomHelpFormatter )
-_pyomo_parser.add_argument("--version", action="version", version=get_version())
-_pyomo_subparsers = _pyomo_parser.add_subparsers(
- dest='subparser_name', title='subcommands' )
+_pyomo_parser = None
+_pyomo_subparsers = None
subparsers = []
@@ -93,7 +90,9 @@
"""
Add a subparser to the 'pyomo' command.
"""
- global subparsers
+ if _pyomo_subparsers is None:
+ get_parser()
+
func = args.pop('func', None)
parser = _pyomo_subparsers.add_parser(name, **args)
subparsers.append(name)
@@ -105,5 +104,16 @@
"""
Return the parser used by the 'pyomo' commmand.
"""
+ global _pyomo_parser
+ if _pyomo_parser is None:
+ _pyomo_parser = argparse.ArgumentParser(
+ description=doc,
+ epilog=epilog,
+ formatter_class=CustomHelpFormatter
+ )
+ _pyomo_parser.add_argument(
+ "--version", action="version", version=get_version())
+ global _pyomo_subparsers
+ _pyomo_subparsers = _pyomo_parser.add_subparsers(
+ dest='subparser_name', title='subcommands' )
return _pyomo_parser
-
| {"golden_diff": "diff --git a/pyomo/scripting/pyomo_parser.py b/pyomo/scripting/pyomo_parser.py\n--- a/pyomo/scripting/pyomo_parser.py\n+++ b/pyomo/scripting/pyomo_parser.py\n@@ -81,11 +81,8 @@\n \n to print information about the `solve` subcommand.\n \"\"\"\n-_pyomo_parser = argparse.ArgumentParser(\n- description=doc, epilog=epilog, formatter_class=CustomHelpFormatter )\n-_pyomo_parser.add_argument(\"--version\", action=\"version\", version=get_version())\n-_pyomo_subparsers = _pyomo_parser.add_subparsers(\n- dest='subparser_name', title='subcommands' )\n+_pyomo_parser = None\n+_pyomo_subparsers = None\n \n subparsers = []\n \n@@ -93,7 +90,9 @@\n \"\"\"\n Add a subparser to the 'pyomo' command.\n \"\"\"\n- global subparsers\n+ if _pyomo_subparsers is None:\n+ get_parser()\n+\n func = args.pop('func', None)\n parser = _pyomo_subparsers.add_parser(name, **args)\n subparsers.append(name)\n@@ -105,5 +104,16 @@\n \"\"\"\n Return the parser used by the 'pyomo' commmand.\n \"\"\"\n+ global _pyomo_parser\n+ if _pyomo_parser is None:\n+ _pyomo_parser = argparse.ArgumentParser(\n+ description=doc,\n+ epilog=epilog,\n+ formatter_class=CustomHelpFormatter\n+ )\n+ _pyomo_parser.add_argument(\n+ \"--version\", action=\"version\", version=get_version())\n+ global _pyomo_subparsers\n+ _pyomo_subparsers = _pyomo_parser.add_subparsers(\n+ dest='subparser_name', title='subcommands' )\n return _pyomo_parser\n-\n", "issue": "Pyomo.environ doesn't initialize when called by cython\nSee full details on stack overflow:\r\nhttps://stackoverflow.com/questions/71902579/pyomo-doesnt-load-when-called-via-cython\r\n\r\nAlso see\r\n https://github.com/Pyomo/pyomo/issues/2374#issuecomment-1115424111_ :\r\n\r\n\"...(The linked question is probably either a problem with how the pyomo driver script interacts with argparse [it really shouldn't be doing that much when pyomo.environ is imported - that is almost certainly a bug], or with how cython and argparse interact).\"\r\n\r\n_Originally posted by @jsiirola in https://github.com/Pyomo/pyomo/issues/2374#issuecomment-1115424111_\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n__all__ = ['add_subparser', 'get_parser', 'subparsers']\n\nimport argparse\nimport sys\n\n#\n# Sort sub_parser names, since these are inserted throughout Pyomo\n#\n# NOTE: This may not be robust to different versions of argparse. We're\n# mucking with a non-public API here ...\n#\nclass CustomHelpFormatter(argparse.RawDescriptionHelpFormatter):\n\n def _metavar_formatter(self, action, default_metavar):\n if action.metavar is not None:\n result = action.metavar\n elif action.choices is not None:\n choice_strs = sorted(str(choice) for choice in action.choices)\n result = '{%s}' % ','.join(choice_strs)\n else:\n result = default_metavar\n\n def format(tuple_size):\n if isinstance(result, tuple):\n return result\n else:\n return (result, ) * tuple_size\n return format\n\n def _iter_indented_subactions(self, action):\n try:\n get_subactions = action._get_subactions\n except AttributeError:\n pass\n else:\n self._indent()\n if isinstance(action, argparse._SubParsersAction):\n for subaction in sorted(get_subactions(), key=lambda x: x.dest):\n yield subaction\n else:\n for subaction in get_subactions():\n yield subaction\n self._dedent()\n\n\ndef get_version():\n from pyomo.version import version\n import platform\n return \"Pyomo %s (%s %s on %s %s)\" % (\n version,\n platform.python_implementation(),\n '.'.join( str(x) for x in sys.version_info[:3] ),\n platform.system(),\n platform.release() )\n\n#\n# Create the argparse parser for Pyomo\n#\ndoc=\"This is the main driver for the Pyomo optimization software.\"\nepilog=\"\"\"\n-------------------------------------------------------------------------\nPyomo supports a variety of modeling and optimization capabilities,\nwhich are executed either as subcommands of 'pyomo' or as separate\ncommands. Use the 'help' subcommand to get information about the\ncapabilities installed with Pyomo. Additionally, each subcommand\nsupports independent command-line options. Use the -h option to\nprint details for a subcommand. For example, type\n\n pyomo solve -h\n\nto print information about the `solve` subcommand.\n\"\"\"\n_pyomo_parser = argparse.ArgumentParser(\n description=doc, epilog=epilog, formatter_class=CustomHelpFormatter )\n_pyomo_parser.add_argument(\"--version\", action=\"version\", version=get_version())\n_pyomo_subparsers = _pyomo_parser.add_subparsers(\n dest='subparser_name', title='subcommands' )\n\nsubparsers = []\n\ndef add_subparser(name, **args):\n \"\"\"\n Add a subparser to the 'pyomo' command.\n \"\"\"\n global subparsers\n func = args.pop('func', None)\n parser = _pyomo_subparsers.add_parser(name, **args)\n subparsers.append(name)\n if func is not None:\n parser.set_defaults(func=func)\n return parser\n\ndef get_parser():\n \"\"\"\n Return the parser used by the 'pyomo' commmand.\n \"\"\"\n return _pyomo_parser\n\n", "path": "pyomo/scripting/pyomo_parser.py"}]} | 1,744 | 402 |
gh_patches_debug_7895 | rasdani/github-patches | git_diff | chainer__chainer-819 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
link.LSTM.to_cpu (resp. to_gpu) does not move state variables to CPU (resp. GPU).
When `link.LSTM` is move to CPU (resp. GPU), its state variables should also be moved to CPU(resp. GPU). But it does not.
```
In [39]: l = chainer.links.LSTM(10, 10)
In [40]: l.xp
Out[40]: <module 'numpy' from '/home/delta/.pyenv/versions/pyenv-2.7.9/lib/python2.7/site-packages/numpy/__init__.pyc'>
In [41]: x = chainer.Variable(numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32))
In [42]: l(x)
Out[42]: <variable at 0x7fe900a88d90>
In [43]: type(l.h.data)
Out[43]: numpy.ndarray
In [44]: l.to_gpu()
Out[44]: <chainer.links.connection.lstm.LSTM at 0x7fe900a42a10>
In [45]: l.xp
Out[45]: <module 'cupy' from 'cupy/__init__.pyc'>
In [46]: type(l.h.data)
Out[46]: numpy.ndarray
In [47]: type(l.c.data)
Out[47]: numpy.ndarray
```
The output of [46] and [47] should be `cupy.ndarray`
</issue>
<code>
[start of chainer/links/connection/lstm.py]
1 from chainer.functions.activation import lstm
2 from chainer import link
3 from chainer.links.connection import linear
4 from chainer import variable
5
6
7 class LSTM(link.Chain):
8
9 """Fully-connected LSTM layer.
10
11 This is a fully-connected LSTM layer as a chain. Unlike the
12 :func:`~chainer.functions.lstm` function, which is defined as a stateless
13 activation function, this chain holds upward and lateral connections as
14 child links.
15
16 It also maintains *states*, including the cell state and the output
17 at the previous time step. Therefore, it can be used as a *stateful LSTM*.
18
19 Args:
20 in_size (int): Dimensionality of input vectors.
21 out_size (int): Dimensionality of output vectors.
22
23 Attributes:
24 upward (chainer.links.Linear): Linear layer of upward connections.
25 lateral (chainer.links.Linear): Linear layer of lateral connections.
26 c (chainer.Variable): Cell states of LSTM units.
27 h (chainer.Variable): Output at the previous timestep.
28
29 """
30 def __init__(self, in_size, out_size):
31 super(LSTM, self).__init__(
32 upward=linear.Linear(in_size, 4 * out_size),
33 lateral=linear.Linear(out_size, 4 * out_size, nobias=True),
34 )
35 self.state_size = out_size
36 self.reset_state()
37
38 def reset_state(self):
39 """Resets the internal state.
40
41 It sets None to the :attr:`c` and :attr:`h` attributes.
42
43 """
44 self.c = self.h = None
45
46 def __call__(self, x):
47 """Updates the internal state and returns the LSTM outputs.
48
49 Args:
50 x (~chainer.Variable): A new batch from the input sequence.
51
52 Returns:
53 ~chainer.Variable: Outputs of updated LSTM units.
54
55 """
56 lstm_in = self.upward(x)
57 if self.h is not None:
58 lstm_in += self.lateral(self.h)
59 if self.c is None:
60 xp = self.xp
61 self.c = variable.Variable(
62 xp.zeros((len(x.data), self.state_size), dtype=x.data.dtype),
63 volatile='auto')
64 self.c, self.h = lstm.lstm(self.c, lstm_in)
65 return self.h
66
[end of chainer/links/connection/lstm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/links/connection/lstm.py b/chainer/links/connection/lstm.py
--- a/chainer/links/connection/lstm.py
+++ b/chainer/links/connection/lstm.py
@@ -35,6 +35,20 @@
self.state_size = out_size
self.reset_state()
+ def to_cpu(self):
+ super(LSTM, self).to_cpu()
+ if self.c is not None:
+ self.c.to_cpu()
+ if self.h is not None:
+ self.h.to_cpu()
+
+ def to_gpu(self, device=None):
+ super(LSTM, self).to_gpu(device)
+ if self.c is not None:
+ self.c.to_gpu(device)
+ if self.h is not None:
+ self.h.to_gpu(device)
+
def reset_state(self):
"""Resets the internal state.
| {"golden_diff": "diff --git a/chainer/links/connection/lstm.py b/chainer/links/connection/lstm.py\n--- a/chainer/links/connection/lstm.py\n+++ b/chainer/links/connection/lstm.py\n@@ -35,6 +35,20 @@\n self.state_size = out_size\n self.reset_state()\n \n+ def to_cpu(self):\n+ super(LSTM, self).to_cpu()\n+ if self.c is not None:\n+ self.c.to_cpu()\n+ if self.h is not None:\n+ self.h.to_cpu()\n+\n+ def to_gpu(self, device=None):\n+ super(LSTM, self).to_gpu(device)\n+ if self.c is not None:\n+ self.c.to_gpu(device)\n+ if self.h is not None:\n+ self.h.to_gpu(device)\n+\n def reset_state(self):\n \"\"\"Resets the internal state.\n", "issue": "link.LSTM.to_cpu (resp. to_gpu) does not move state variables to CPU (resp. GPU).\nWhen `link.LSTM` is move to CPU (resp. GPU), its state variables should also be moved to CPU(resp. GPU). But it does not.\n\n```\nIn [39]: l = chainer.links.LSTM(10, 10)\n\nIn [40]: l.xp\nOut[40]: <module 'numpy' from '/home/delta/.pyenv/versions/pyenv-2.7.9/lib/python2.7/site-packages/numpy/__init__.pyc'>\n\nIn [41]: x = chainer.Variable(numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32))\n\nIn [42]: l(x)\nOut[42]: <variable at 0x7fe900a88d90>\n\nIn [43]: type(l.h.data)\nOut[43]: numpy.ndarray\n\nIn [44]: l.to_gpu()\nOut[44]: <chainer.links.connection.lstm.LSTM at 0x7fe900a42a10>\n\nIn [45]: l.xp\nOut[45]: <module 'cupy' from 'cupy/__init__.pyc'>\n\nIn [46]: type(l.h.data)\nOut[46]: numpy.ndarray\n\nIn [47]: type(l.c.data)\nOut[47]: numpy.ndarray\n```\n\nThe output of [46] and [47] should be `cupy.ndarray`\n\n", "before_files": [{"content": "from chainer.functions.activation import lstm\nfrom chainer import link\nfrom chainer.links.connection import linear\nfrom chainer import variable\n\n\nclass LSTM(link.Chain):\n\n \"\"\"Fully-connected LSTM layer.\n\n This is a fully-connected LSTM layer as a chain. Unlike the\n :func:`~chainer.functions.lstm` function, which is defined as a stateless\n activation function, this chain holds upward and lateral connections as\n child links.\n\n It also maintains *states*, including the cell state and the output\n at the previous time step. Therefore, it can be used as a *stateful LSTM*.\n\n Args:\n in_size (int): Dimensionality of input vectors.\n out_size (int): Dimensionality of output vectors.\n\n Attributes:\n upward (chainer.links.Linear): Linear layer of upward connections.\n lateral (chainer.links.Linear): Linear layer of lateral connections.\n c (chainer.Variable): Cell states of LSTM units.\n h (chainer.Variable): Output at the previous timestep.\n\n \"\"\"\n def __init__(self, in_size, out_size):\n super(LSTM, self).__init__(\n upward=linear.Linear(in_size, 4 * out_size),\n lateral=linear.Linear(out_size, 4 * out_size, nobias=True),\n )\n self.state_size = out_size\n self.reset_state()\n\n def reset_state(self):\n \"\"\"Resets the internal state.\n\n It sets None to the :attr:`c` and :attr:`h` attributes.\n\n \"\"\"\n self.c = self.h = None\n\n def __call__(self, x):\n \"\"\"Updates the internal state and returns the LSTM outputs.\n\n Args:\n x (~chainer.Variable): A new batch from the input sequence.\n\n Returns:\n ~chainer.Variable: Outputs of updated LSTM units.\n\n \"\"\"\n lstm_in = self.upward(x)\n if self.h is not None:\n lstm_in += self.lateral(self.h)\n if self.c is None:\n xp = self.xp\n self.c = variable.Variable(\n xp.zeros((len(x.data), self.state_size), dtype=x.data.dtype),\n volatile='auto')\n self.c, self.h = lstm.lstm(self.c, lstm_in)\n return self.h\n", "path": "chainer/links/connection/lstm.py"}]} | 1,492 | 194 |
gh_patches_debug_21024 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-1146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
create config aliases for pre-hook and post-hook
## Feature
### Feature description
Pre-hooks and post-hooks can be specified in a `config()` block, but they require the use of a dictionary instead of a list of keyword arguments. Instead, dbt should support the specification of these hooks using `pre_hook` and `post_hook` aliases.
Before:
```
{{
config({
"post-hook" : "grant select on {{ this }} to db_reader"
})
}}
```
After:
```
{{
config(
post_hook="grant select on {{ this }} to db_reader"
)
}}
```
### Who will this benefit?
This will be a quality of life improvement for many dbt users :)
</issue>
<code>
[start of dbt/context/parser.py]
1 import dbt.exceptions
2
3 import dbt.context.common
4
5
6 execute = False
7
8
9 def ref(db_wrapper, model, config, manifest):
10
11 def ref(*args):
12 if len(args) == 1 or len(args) == 2:
13 model.refs.append(list(args))
14
15 else:
16 dbt.exceptions.ref_invalid_args(model, args)
17
18 return db_wrapper.adapter.Relation.create_from_node(config, model)
19
20 return ref
21
22
23 def docs(unparsed, docrefs, column_name=None):
24
25 def do_docs(*args):
26 if len(args) != 1 and len(args) != 2:
27 dbt.exceptions.doc_invalid_args(unparsed, args)
28 doc_package_name = ''
29 doc_name = args[0]
30 if len(args) == 2:
31 doc_package_name = args[1]
32
33 docref = {
34 'documentation_package': doc_package_name,
35 'documentation_name': doc_name,
36 }
37 if column_name is not None:
38 docref['column_name'] = column_name
39
40 docrefs.append(docref)
41
42 # IDK
43 return True
44
45 return do_docs
46
47
48 class Config:
49 def __init__(self, model, source_config):
50 self.model = model
51 self.source_config = source_config
52
53 def __call__(self, *args, **kwargs):
54 if len(args) == 1 and len(kwargs) == 0:
55 opts = args[0]
56 elif len(args) == 0 and len(kwargs) > 0:
57 opts = kwargs
58 else:
59 dbt.exceptions.raise_compiler_error(
60 "Invalid inline model config",
61 self.model)
62
63 self.source_config.update_in_model_config(opts)
64 return ''
65
66 def set(self, name, value):
67 return self.__call__({name: value})
68
69 def require(self, name, validator=None):
70 return ''
71
72 def get(self, name, validator=None, default=None):
73 return ''
74
75
76 def generate(model, runtime_config, manifest, source_config):
77 return dbt.context.common.generate(
78 model, runtime_config, manifest, source_config, dbt.context.parser)
79
[end of dbt/context/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dbt/context/parser.py b/dbt/context/parser.py
--- a/dbt/context/parser.py
+++ b/dbt/context/parser.py
@@ -50,6 +50,19 @@
self.model = model
self.source_config = source_config
+ def _transform_config(self, config):
+ for oldkey in ('pre_hook', 'post_hook'):
+ if oldkey in config:
+ newkey = oldkey.replace('_', '-')
+ if newkey in config:
+ dbt.exceptions.raise_compiler_error(
+ 'Invalid config, has conflicting keys "{}" and "{}"'
+ .format(oldkey, newkey),
+ self.model
+ )
+ config[newkey] = config.pop(oldkey)
+ return config
+
def __call__(self, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
opts = args[0]
@@ -60,6 +73,8 @@
"Invalid inline model config",
self.model)
+ opts = self._transform_config(opts)
+
self.source_config.update_in_model_config(opts)
return ''
| {"golden_diff": "diff --git a/dbt/context/parser.py b/dbt/context/parser.py\n--- a/dbt/context/parser.py\n+++ b/dbt/context/parser.py\n@@ -50,6 +50,19 @@\n self.model = model\n self.source_config = source_config\n \n+ def _transform_config(self, config):\n+ for oldkey in ('pre_hook', 'post_hook'):\n+ if oldkey in config:\n+ newkey = oldkey.replace('_', '-')\n+ if newkey in config:\n+ dbt.exceptions.raise_compiler_error(\n+ 'Invalid config, has conflicting keys \"{}\" and \"{}\"'\n+ .format(oldkey, newkey),\n+ self.model\n+ )\n+ config[newkey] = config.pop(oldkey)\n+ return config\n+\n def __call__(self, *args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0:\n opts = args[0]\n@@ -60,6 +73,8 @@\n \"Invalid inline model config\",\n self.model)\n \n+ opts = self._transform_config(opts)\n+\n self.source_config.update_in_model_config(opts)\n return ''\n", "issue": "create config aliases for pre-hook and post-hook\n## Feature\r\n\r\n### Feature description\r\nPre-hooks and post-hooks can be specified in a `config()` block, but they require the use of a dictionary instead of a list of keyword arguments. Instead, dbt should support the specification of these hooks using `pre_hook` and `post_hook` aliases.\r\n\r\nBefore:\r\n```\r\n{{\r\n config({\r\n \"post-hook\" : \"grant select on {{ this }} to db_reader\"\r\n })\r\n}}\r\n```\r\n\r\nAfter:\r\n```\r\n{{\r\n config(\r\n post_hook=\"grant select on {{ this }} to db_reader\"\r\n )\r\n}}\r\n```\r\n\r\n\r\n### Who will this benefit?\r\nThis will be a quality of life improvement for many dbt users :)\n", "before_files": [{"content": "import dbt.exceptions\n\nimport dbt.context.common\n\n\nexecute = False\n\n\ndef ref(db_wrapper, model, config, manifest):\n\n def ref(*args):\n if len(args) == 1 or len(args) == 2:\n model.refs.append(list(args))\n\n else:\n dbt.exceptions.ref_invalid_args(model, args)\n\n return db_wrapper.adapter.Relation.create_from_node(config, model)\n\n return ref\n\n\ndef docs(unparsed, docrefs, column_name=None):\n\n def do_docs(*args):\n if len(args) != 1 and len(args) != 2:\n dbt.exceptions.doc_invalid_args(unparsed, args)\n doc_package_name = ''\n doc_name = args[0]\n if len(args) == 2:\n doc_package_name = args[1]\n\n docref = {\n 'documentation_package': doc_package_name,\n 'documentation_name': doc_name,\n }\n if column_name is not None:\n docref['column_name'] = column_name\n\n docrefs.append(docref)\n\n # IDK\n return True\n\n return do_docs\n\n\nclass Config:\n def __init__(self, model, source_config):\n self.model = model\n self.source_config = source_config\n\n def __call__(self, *args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0:\n opts = args[0]\n elif len(args) == 0 and len(kwargs) > 0:\n opts = kwargs\n else:\n dbt.exceptions.raise_compiler_error(\n \"Invalid inline model config\",\n self.model)\n\n self.source_config.update_in_model_config(opts)\n return ''\n\n def set(self, name, value):\n return self.__call__({name: value})\n\n def require(self, name, validator=None):\n return ''\n\n def get(self, name, validator=None, default=None):\n return ''\n\n\ndef generate(model, runtime_config, manifest, source_config):\n return dbt.context.common.generate(\n model, runtime_config, manifest, source_config, dbt.context.parser)\n", "path": "dbt/context/parser.py"}]} | 1,298 | 253 |
gh_patches_debug_13231 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6028 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mauvais titre de notification pour le signalement de billet
**Description du bug**
Lorsqu'un membre signale un billet, le titre de la notification est `(<bound method PublishedContent.title of <PublishedContent: Version publique de "La Belgique a un nouveau gouvernement, ça se fête !">>,)` au lieu d'être le nom du contenu.
**Comment reproduire ?**
La liste des étapes qui permet de reproduire le bug :
1. Aller sur le site
2. Connectez vous avec le compte admin
3. Aller sur un billet
4. Cliquez sur « Signaler le contenu »
5. Voir que le titre de la nouvelle notification de signalement est incorrect
**Comportement attendu**
Le titre de la nouvelle notification de signalement est le titre du contenu.
</issue>
<code>
[start of zds/utils/header_notifications.py]
1 from django.contrib.contenttypes.models import ContentType
2 from django.utils.translation import gettext_lazy as _
3
4 from zds.forum.models import Post
5 from zds.mp.models import PrivateTopic
6 from zds.notification.models import Notification
7 from zds.tutorialv2.models.database import ContentReaction, PublishableContent
8 from zds.utils.models import Alert
9
10
11 def _notifications_to_list(notifications_query):
12 query = notifications_query.select_related("sender__profile").order_by("-pubdate")[:10]
13
14 return [{"pubdate": n.pubdate, "author": n.sender, "title": n.title, "url": n.url} for n in query]
15
16
17 def _get_alert_info(alert):
18 if alert.scope == "FORUM":
19 post = Post.objects.select_related("topic").get(pk=alert.comment.pk)
20 return post.topic.title, post.get_absolute_url()
21 elif alert.scope == "CONTENT":
22 published = PublishableContent.objects.select_related("public_version").get(pk=alert.content.pk)
23 title = (published.public_version.title if published.public_version else published.title,)
24 url = published.get_absolute_url_online() if published.public_version else ""
25 return title, url
26 elif alert.scope == "PROFILE":
27 return _("Profil de {}").format(alert.profile.user.username), alert.profile.get_absolute_url() + "#alerts"
28 else:
29 comment = ContentReaction.objects.select_related("related_content").get(pk=alert.comment.pk)
30 return (
31 comment.related_content.title,
32 comment.get_absolute_url(),
33 )
34
35
36 def _alert_to_dict(alert):
37 title, url = _get_alert_info(alert)
38 return {"title": title, "url": url, "pubdate": alert.pubdate, "author": alert.author, "text": alert.text}
39
40
41 def _alerts_to_list(alerts_query):
42 query = alerts_query.select_related("author", "comment", "content").order_by("-pubdate")[:10]
43
44 return [_alert_to_dict(a) for a in query]
45
46
47 def get_header_notifications(user):
48 if not user.is_authenticated:
49 return None
50
51 private_topic = ContentType.objects.get_for_model(PrivateTopic)
52
53 notifications = Notification.objects.filter(subscription__user=user, is_read=False)
54
55 general_notifications = notifications.exclude(subscription__content_type=private_topic)
56
57 private_notifications = notifications.filter(subscription__content_type=private_topic)
58
59 alerts = Alert.objects.filter(solved=False)
60
61 return {
62 "general_notifications": {
63 "total": general_notifications.count(),
64 "list": _notifications_to_list(general_notifications),
65 },
66 "private_topic_notifications": {
67 "total": private_notifications.count(),
68 "list": _notifications_to_list(private_notifications),
69 },
70 "alerts": user.has_perm("forum.change_post")
71 and {
72 "total": alerts.count(),
73 "list": _alerts_to_list(alerts),
74 },
75 }
76
[end of zds/utils/header_notifications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/header_notifications.py b/zds/utils/header_notifications.py
--- a/zds/utils/header_notifications.py
+++ b/zds/utils/header_notifications.py
@@ -20,7 +20,7 @@
return post.topic.title, post.get_absolute_url()
elif alert.scope == "CONTENT":
published = PublishableContent.objects.select_related("public_version").get(pk=alert.content.pk)
- title = (published.public_version.title if published.public_version else published.title,)
+ title = published.public_version.title if published.public_version else published.title
url = published.get_absolute_url_online() if published.public_version else ""
return title, url
elif alert.scope == "PROFILE":
| {"golden_diff": "diff --git a/zds/utils/header_notifications.py b/zds/utils/header_notifications.py\n--- a/zds/utils/header_notifications.py\n+++ b/zds/utils/header_notifications.py\n@@ -20,7 +20,7 @@\n return post.topic.title, post.get_absolute_url()\n elif alert.scope == \"CONTENT\":\n published = PublishableContent.objects.select_related(\"public_version\").get(pk=alert.content.pk)\n- title = (published.public_version.title if published.public_version else published.title,)\n+ title = published.public_version.title if published.public_version else published.title\n url = published.get_absolute_url_online() if published.public_version else \"\"\n return title, url\n elif alert.scope == \"PROFILE\":\n", "issue": "Mauvais titre de notification pour le signalement de billet\n**Description du bug**\r\n\r\nLorsqu'un membre signale un billet, le titre de la notification est `(<bound method PublishedContent.title of <PublishedContent: Version publique de \"La Belgique a un nouveau gouvernement, \u00e7a se f\u00eate !\">>,)` au lieu d'\u00eatre le nom du contenu.\r\n\r\n**Comment reproduire ?**\r\n\r\nLa liste des \u00e9tapes qui permet de reproduire le bug :\r\n\r\n1. Aller sur le site\r\n2. Connectez vous avec le compte admin\r\n3. Aller sur un billet\r\n4. Cliquez sur \u00ab Signaler le contenu \u00bb\r\n5. Voir que le titre de la nouvelle notification de signalement est incorrect\r\n\r\n**Comportement attendu**\r\n\r\nLe titre de la nouvelle notification de signalement est le titre du contenu.\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.forum.models import Post\nfrom zds.mp.models import PrivateTopic\nfrom zds.notification.models import Notification\nfrom zds.tutorialv2.models.database import ContentReaction, PublishableContent\nfrom zds.utils.models import Alert\n\n\ndef _notifications_to_list(notifications_query):\n query = notifications_query.select_related(\"sender__profile\").order_by(\"-pubdate\")[:10]\n\n return [{\"pubdate\": n.pubdate, \"author\": n.sender, \"title\": n.title, \"url\": n.url} for n in query]\n\n\ndef _get_alert_info(alert):\n if alert.scope == \"FORUM\":\n post = Post.objects.select_related(\"topic\").get(pk=alert.comment.pk)\n return post.topic.title, post.get_absolute_url()\n elif alert.scope == \"CONTENT\":\n published = PublishableContent.objects.select_related(\"public_version\").get(pk=alert.content.pk)\n title = (published.public_version.title if published.public_version else published.title,)\n url = published.get_absolute_url_online() if published.public_version else \"\"\n return title, url\n elif alert.scope == \"PROFILE\":\n return _(\"Profil de {}\").format(alert.profile.user.username), alert.profile.get_absolute_url() + \"#alerts\"\n else:\n comment = ContentReaction.objects.select_related(\"related_content\").get(pk=alert.comment.pk)\n return (\n comment.related_content.title,\n comment.get_absolute_url(),\n )\n\n\ndef _alert_to_dict(alert):\n title, url = _get_alert_info(alert)\n return {\"title\": title, \"url\": url, \"pubdate\": alert.pubdate, \"author\": alert.author, \"text\": alert.text}\n\n\ndef _alerts_to_list(alerts_query):\n query = alerts_query.select_related(\"author\", \"comment\", \"content\").order_by(\"-pubdate\")[:10]\n\n return [_alert_to_dict(a) for a in query]\n\n\ndef get_header_notifications(user):\n if not user.is_authenticated:\n return None\n\n private_topic = ContentType.objects.get_for_model(PrivateTopic)\n\n notifications = Notification.objects.filter(subscription__user=user, is_read=False)\n\n general_notifications = notifications.exclude(subscription__content_type=private_topic)\n\n private_notifications = notifications.filter(subscription__content_type=private_topic)\n\n alerts = Alert.objects.filter(solved=False)\n\n return {\n \"general_notifications\": {\n \"total\": general_notifications.count(),\n \"list\": _notifications_to_list(general_notifications),\n },\n \"private_topic_notifications\": {\n \"total\": private_notifications.count(),\n \"list\": _notifications_to_list(private_notifications),\n },\n \"alerts\": user.has_perm(\"forum.change_post\")\n and {\n \"total\": alerts.count(),\n \"list\": _alerts_to_list(alerts),\n },\n }\n", "path": "zds/utils/header_notifications.py"}]} | 1,474 | 150 |
gh_patches_debug_26355 | rasdani/github-patches | git_diff | catalyst-team__catalyst-119 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multiple dataset source for tag2label
It will be nice if I can choose not one input dir here https://github.com/catalyst-team/catalyst/blob/master/catalyst/contrib/scripts/tag2label.py
For example it can looks like:
`catalyst-contrib tag2label --in-dir=dataset1,dataset2`
</issue>
<code>
[start of catalyst/contrib/scripts/tag2label.py]
1 import argparse
2 import json
3 import pandas as pd
4
5 from catalyst.utils.data import create_dataset, create_dataframe, \
6 prepare_dataset_labeling, separate_tags
7
8
9 def prepare_df_from_dirs(in_dir, tag_column_name):
10 if not in_dir.endswith("/"):
11 in_dir = f"{in_dir}/"
12
13 dataset = create_dataset(
14 f"{in_dir}/**", process_fn=lambda x: x.replace(f"{in_dir}", "")
15 )
16 df = create_dataframe(dataset, columns=[tag_column_name, "filepath"])
17
18 return df
19
20
21 def build_args(parser):
22 parser.add_argument(
23 "--in-csv",
24 type=str,
25 default=None,
26 help="Path to data in `.csv`."
27 )
28 parser.add_argument(
29 "--in-dir",
30 type=str,
31 default=None,
32 help="Path to directory with dataset."
33 )
34
35 parser.add_argument(
36 "--out-dataset",
37 type=str,
38 default=None,
39 required=True,
40 help="Path to output dataframe"
41 )
42 parser.add_argument(
43 "--out-labeling",
44 type=str,
45 default=None,
46 required=True,
47 help="Path to output JSON"
48 )
49
50 parser.add_argument(
51 "--tag-column",
52 type=str,
53 default="tag",
54 help="Target column name"
55 )
56 parser.add_argument(
57 "--tag-delim",
58 type=str,
59 default=None,
60 help="Separator if you want to use several target columns"
61 )
62
63 return parser
64
65
66 def parse_args():
67 parser = argparse.ArgumentParser()
68 build_args(parser)
69 args = parser.parse_args()
70 return args
71
72
73 def main(args, _=None):
74 if args.in_csv is not None:
75 df = pd.read_csv(args.in_csv)
76 elif args.in_dir is not None:
77 df = prepare_df_from_dirs(args.in_dir, args.tag_column)
78 else:
79 raise Exception
80
81 if args.tag_delim is not None:
82 df = separate_tags(
83 df, tag_column=args.tag_column, tag_delim=args.tag_delim
84 )
85
86 tag2lbl = prepare_dataset_labeling(df, args.tag_column)
87 print("Num classes: ", len(tag2lbl))
88
89 with open(args.out_labeling, "w") as fout:
90 json.dump(tag2lbl, fout, indent=4)
91
92 if args.out_dataset is not None:
93 df.to_csv(args.out_dataset, index=False)
94
95
96 if __name__ == "__main__":
97 args = parse_args()
98 main(args)
99
[end of catalyst/contrib/scripts/tag2label.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/catalyst/contrib/scripts/tag2label.py b/catalyst/contrib/scripts/tag2label.py
--- a/catalyst/contrib/scripts/tag2label.py
+++ b/catalyst/contrib/scripts/tag2label.py
@@ -6,15 +6,35 @@
prepare_dataset_labeling, separate_tags
-def prepare_df_from_dirs(in_dir, tag_column_name):
- if not in_dir.endswith("/"):
- in_dir = f"{in_dir}/"
-
- dataset = create_dataset(
- f"{in_dir}/**", process_fn=lambda x: x.replace(f"{in_dir}", "")
- )
- df = create_dataframe(dataset, columns=[tag_column_name, "filepath"])
-
+def prepare_df_from_dirs(in_dirs, tag_column_name):
+ dfs = []
+ splitted_dirs = in_dirs.strip(',').split(',')
+
+ def process_fn(x):
+ if len(splitted_dirs) == 1:
+ # remove all in_dir part from path
+ return x.replace(f"{in_dir}", "")
+ else:
+ # leaves last part of in_dir path,
+ # which identifies separate in_dir
+ return x.replace(
+ f"{in_dir}",
+ f"{in_dir.split('/')[-2]}/")
+
+ for in_dir in splitted_dirs:
+ if not in_dir.endswith("/"):
+ in_dir = f"{in_dir}/"
+
+ dataset = create_dataset(
+ f"{in_dir}/**",
+ process_fn=process_fn)
+
+ dfs.append(
+ create_dataframe(
+ dataset,
+ columns=[tag_column_name, "filepath"]))
+
+ df = pd.concat(dfs).reset_index(drop=True)
return df
@@ -29,7 +49,8 @@
"--in-dir",
type=str,
default=None,
- help="Path to directory with dataset."
+ help="Path to directory with dataset"
+ "or paths separated by commas for several datasets"
)
parser.add_argument(
| {"golden_diff": "diff --git a/catalyst/contrib/scripts/tag2label.py b/catalyst/contrib/scripts/tag2label.py\n--- a/catalyst/contrib/scripts/tag2label.py\n+++ b/catalyst/contrib/scripts/tag2label.py\n@@ -6,15 +6,35 @@\n prepare_dataset_labeling, separate_tags\n \n \n-def prepare_df_from_dirs(in_dir, tag_column_name):\n- if not in_dir.endswith(\"/\"):\n- in_dir = f\"{in_dir}/\"\n-\n- dataset = create_dataset(\n- f\"{in_dir}/**\", process_fn=lambda x: x.replace(f\"{in_dir}\", \"\")\n- )\n- df = create_dataframe(dataset, columns=[tag_column_name, \"filepath\"])\n-\n+def prepare_df_from_dirs(in_dirs, tag_column_name):\n+ dfs = []\n+ splitted_dirs = in_dirs.strip(',').split(',')\n+\n+ def process_fn(x):\n+ if len(splitted_dirs) == 1:\n+ # remove all in_dir part from path\n+ return x.replace(f\"{in_dir}\", \"\")\n+ else:\n+ # leaves last part of in_dir path,\n+ # which identifies separate in_dir\n+ return x.replace(\n+ f\"{in_dir}\",\n+ f\"{in_dir.split('/')[-2]}/\")\n+\n+ for in_dir in splitted_dirs:\n+ if not in_dir.endswith(\"/\"):\n+ in_dir = f\"{in_dir}/\"\n+\n+ dataset = create_dataset(\n+ f\"{in_dir}/**\",\n+ process_fn=process_fn)\n+\n+ dfs.append(\n+ create_dataframe(\n+ dataset,\n+ columns=[tag_column_name, \"filepath\"]))\n+\n+ df = pd.concat(dfs).reset_index(drop=True)\n return df\n \n \n@@ -29,7 +49,8 @@\n \"--in-dir\",\n type=str,\n default=None,\n- help=\"Path to directory with dataset.\"\n+ help=\"Path to directory with dataset\"\n+ \"or paths separated by commas for several datasets\"\n )\n \n parser.add_argument(\n", "issue": "Multiple dataset source for tag2label\nIt will be nice if I can choose not one input dir here https://github.com/catalyst-team/catalyst/blob/master/catalyst/contrib/scripts/tag2label.py\r\n\r\nFor example it can looks like:\r\n`catalyst-contrib tag2label --in-dir=dataset1,dataset2`\n", "before_files": [{"content": "import argparse\nimport json\nimport pandas as pd\n\nfrom catalyst.utils.data import create_dataset, create_dataframe, \\\n prepare_dataset_labeling, separate_tags\n\n\ndef prepare_df_from_dirs(in_dir, tag_column_name):\n if not in_dir.endswith(\"/\"):\n in_dir = f\"{in_dir}/\"\n\n dataset = create_dataset(\n f\"{in_dir}/**\", process_fn=lambda x: x.replace(f\"{in_dir}\", \"\")\n )\n df = create_dataframe(dataset, columns=[tag_column_name, \"filepath\"])\n\n return df\n\n\ndef build_args(parser):\n parser.add_argument(\n \"--in-csv\",\n type=str,\n default=None,\n help=\"Path to data in `.csv`.\"\n )\n parser.add_argument(\n \"--in-dir\",\n type=str,\n default=None,\n help=\"Path to directory with dataset.\"\n )\n\n parser.add_argument(\n \"--out-dataset\",\n type=str,\n default=None,\n required=True,\n help=\"Path to output dataframe\"\n )\n parser.add_argument(\n \"--out-labeling\",\n type=str,\n default=None,\n required=True,\n help=\"Path to output JSON\"\n )\n\n parser.add_argument(\n \"--tag-column\",\n type=str,\n default=\"tag\",\n help=\"Target column name\"\n )\n parser.add_argument(\n \"--tag-delim\",\n type=str,\n default=None,\n help=\"Separator if you want to use several target columns\"\n )\n\n return parser\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n build_args(parser)\n args = parser.parse_args()\n return args\n\n\ndef main(args, _=None):\n if args.in_csv is not None:\n df = pd.read_csv(args.in_csv)\n elif args.in_dir is not None:\n df = prepare_df_from_dirs(args.in_dir, args.tag_column)\n else:\n raise Exception\n\n if args.tag_delim is not None:\n df = separate_tags(\n df, tag_column=args.tag_column, tag_delim=args.tag_delim\n )\n\n tag2lbl = prepare_dataset_labeling(df, args.tag_column)\n print(\"Num classes: \", len(tag2lbl))\n\n with open(args.out_labeling, \"w\") as fout:\n json.dump(tag2lbl, fout, indent=4)\n\n if args.out_dataset is not None:\n df.to_csv(args.out_dataset, index=False)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n", "path": "catalyst/contrib/scripts/tag2label.py"}]} | 1,341 | 447 |
gh_patches_debug_2691 | rasdani/github-patches | git_diff | hydroshare__hydroshare-2769 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add back Active, Date joined, and last login in mezzanine listing of users
In the 3/19/18 version of HydroShare when an admin listed users the fields listed were

At present when an admin lists users the fields are

The fields Active, Date joined and last login are needed so that when there are problems with users creating and activating accounts (as occurred this week) an admin can list recent account creations and account creation attempts to assess the extent of the problem, and contact users that may have been impacted.
This regression was noted in https://github.com/hydroshare/hydroshare/pull/2677#issuecomment-374183106
</issue>
<code>
[start of hs_core/admin.py]
1 from django import forms
2 from django.contrib.auth.admin import UserAdmin
3 from django.contrib.auth.forms import UserCreationForm
4 from django.contrib.auth.models import User
5 from django.contrib.gis import admin
6 from django.contrib.contenttypes.admin import GenericTabularInline
7 from django.utils.translation import ugettext_lazy as _
8
9 from mezzanine.pages.admin import PageAdmin
10
11 from .models import *
12
13
14 class UserCreationFormExtended(UserCreationForm):
15 def __init__(self, *args, **kwargs):
16 super(UserCreationFormExtended, self).__init__(*args, **kwargs)
17 self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
18
19 UserAdmin.add_form = UserCreationFormExtended
20 UserAdmin.add_fieldsets = (
21 (None, {
22 'classes': ('wide',),
23 'fields': ('email', 'username', 'password1', 'password2',)
24 }),
25 )
26
27 class InlineResourceFiles(GenericTabularInline):
28 model = ResourceFile
29
30 class GenericResourceAdmin(PageAdmin):
31 inlines = PageAdmin.inlines + [InlineResourceFiles]
32
33 admin.site.unregister(User)
34 admin.site.register(User, UserAdmin)
35 admin.site.register(GenericResource, GenericResourceAdmin)
36
[end of hs_core/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_core/admin.py b/hs_core/admin.py
--- a/hs_core/admin.py
+++ b/hs_core/admin.py
@@ -23,6 +23,10 @@
'fields': ('email', 'username', 'password1', 'password2',)
}),
)
+UserAdmin.list_display = [
+ 'username', 'email', 'first_name', 'last_name', 'is_staff',
+ 'is_active', 'date_joined', 'last_login'
+]
class InlineResourceFiles(GenericTabularInline):
model = ResourceFile
| {"golden_diff": "diff --git a/hs_core/admin.py b/hs_core/admin.py\n--- a/hs_core/admin.py\n+++ b/hs_core/admin.py\n@@ -23,6 +23,10 @@\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n )\n+UserAdmin.list_display = [\n+ 'username', 'email', 'first_name', 'last_name', 'is_staff',\n+ 'is_active', 'date_joined', 'last_login'\n+]\n \n class InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n", "issue": "Add back Active, Date joined, and last login in mezzanine listing of users\nIn the 3/19/18 version of HydroShare when an admin listed users the fields listed were\r\n\r\n\r\nAt present when an admin lists users the fields are\r\n\r\n\r\n\r\nThe fields Active, Date joined and last login are needed so that when there are problems with users creating and activating accounts (as occurred this week) an admin can list recent account creations and account creation attempts to assess the extent of the problem, and contact users that may have been impacted.\r\n\r\nThis regression was noted in https://github.com/hydroshare/hydroshare/pull/2677#issuecomment-374183106\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mezzanine.pages.admin import PageAdmin\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nclass GenericResourceAdmin(PageAdmin):\n inlines = PageAdmin.inlines + [InlineResourceFiles]\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(GenericResource, GenericResourceAdmin)\n", "path": "hs_core/admin.py"}]} | 1,120 | 128 |
gh_patches_debug_17207 | rasdani/github-patches | git_diff | pytorch__pytorch-4461 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JIT leaks memory in nets with dropout layer
if I add a dropout layer in FC layers, a JIT compiled forward pass of a net starts to seriously leak memory in the backward pass. I made a simple test case below. When it runs, you will see a rapidly increasing GPU/CPU memory usage in nvidia-smi/top and it will crash with an out of memory exception soon. Without the dropout layer or the backward pass, it works fine.
```python
import torch
from torch import jit
import torch.nn as nn
from torch.autograd import Variable
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
self.net1 = nn.Linear(100, 200)
self.net2 = nn.Linear(200, 1)
self.sigmoid = nn.Sigmoid()
self.ReLU = nn.ReLU(inplace=False)
self.drop = nn.Dropout(0.5)
def forward(self, V):
return self.sigmoid(self.net2(self.drop(self.ReLU(self.net1(V))))).squeeze()
use_cuda = True
net = TestNet()
criterion = nn.BCELoss()
if use_cuda:
net.cuda()
criterion.cuda()
V = Variable(torch.randn(100, 100)).cuda()
label = Variable(torch.randn(100)).cuda()
else:
V = Variable(torch.randn(100, 100))
label = Variable(torch.randn(100))
net.train()
fwd = jit.compile(net.forward)
for i in range(0,1000000):
r = fwd(V)
err = criterion(r, label)
err.backward()
```
</issue>
<code>
[start of torch/nn/_functions/dropout.py]
1 import torch
2 from torch.autograd.function import InplaceFunction
3 from torch.autograd import Variable
4 from itertools import repeat
5
6
7 class Dropout(InplaceFunction):
8
9 @staticmethod
10 def _make_noise(input):
11 return input.new().resize_as_(input)
12
13 @staticmethod
14 def symbolic(g, input, p=0.5, train=False, inplace=False):
15 # See Note [Export inplace]
16 r, _ = g.op("Dropout", input, ratio_f=p, is_test_i=not train, outputs=2)
17 return r
18
19 @classmethod
20 def forward(cls, ctx, input, p=0.5, train=False, inplace=False):
21 if p < 0 or p > 1:
22 raise ValueError("dropout probability has to be between 0 and 1, "
23 "but got {}".format(p))
24 ctx.p = p
25 ctx.train = train
26 ctx.inplace = inplace
27
28 if ctx.inplace:
29 ctx.mark_dirty(input)
30 output = input
31 else:
32 output = input.clone()
33
34 if ctx.p > 0 and ctx.train:
35 ctx.noise = cls._make_noise(input)
36 if ctx.p == 1:
37 ctx.noise.fill_(0)
38 else:
39 ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)
40 ctx.noise = ctx.noise.expand_as(input)
41 output.mul_(ctx.noise)
42
43 return output
44
45 @staticmethod
46 def backward(ctx, grad_output):
47 if ctx.p > 0 and ctx.train:
48 return grad_output.mul(Variable(ctx.noise)), None, None, None
49 else:
50 return grad_output, None, None, None
51
52
53 class FeatureDropout(Dropout):
54
55 @staticmethod
56 def symbolic(g, input, p=0.5, train=False, inplace=False):
57 # See Note [Export inplace]
58 # NB: In inference mode, FeatureDropout is exported as an identity op.
59 from torch.onnx.symbolic import _unimplemented
60 if train:
61 return _unimplemented("FeatureDropout", "training mode")
62 return input
63
64 @staticmethod
65 def _make_noise(input):
66 return input.new().resize_(input.size(0), input.size(1),
67 *repeat(1, input.dim() - 2))
68
[end of torch/nn/_functions/dropout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch/nn/_functions/dropout.py b/torch/nn/_functions/dropout.py
--- a/torch/nn/_functions/dropout.py
+++ b/torch/nn/_functions/dropout.py
@@ -25,20 +25,22 @@
ctx.train = train
ctx.inplace = inplace
+ if ctx.p == 0 or not ctx.train:
+ return input
+
if ctx.inplace:
ctx.mark_dirty(input)
output = input
else:
output = input.clone()
- if ctx.p > 0 and ctx.train:
- ctx.noise = cls._make_noise(input)
- if ctx.p == 1:
- ctx.noise.fill_(0)
- else:
- ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)
- ctx.noise = ctx.noise.expand_as(input)
- output.mul_(ctx.noise)
+ ctx.noise = cls._make_noise(input)
+ if ctx.p == 1:
+ ctx.noise.fill_(0)
+ else:
+ ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)
+ ctx.noise = ctx.noise.expand_as(input)
+ output.mul_(ctx.noise)
return output
| {"golden_diff": "diff --git a/torch/nn/_functions/dropout.py b/torch/nn/_functions/dropout.py\n--- a/torch/nn/_functions/dropout.py\n+++ b/torch/nn/_functions/dropout.py\n@@ -25,20 +25,22 @@\n ctx.train = train\n ctx.inplace = inplace\n \n+ if ctx.p == 0 or not ctx.train:\n+ return input\n+\n if ctx.inplace:\n ctx.mark_dirty(input)\n output = input\n else:\n output = input.clone()\n \n- if ctx.p > 0 and ctx.train:\n- ctx.noise = cls._make_noise(input)\n- if ctx.p == 1:\n- ctx.noise.fill_(0)\n- else:\n- ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)\n- ctx.noise = ctx.noise.expand_as(input)\n- output.mul_(ctx.noise)\n+ ctx.noise = cls._make_noise(input)\n+ if ctx.p == 1:\n+ ctx.noise.fill_(0)\n+ else:\n+ ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)\n+ ctx.noise = ctx.noise.expand_as(input)\n+ output.mul_(ctx.noise)\n \n return output\n", "issue": "JIT leaks memory in nets with dropout layer \nif I add a dropout layer in FC layers, a JIT compiled forward pass of a net starts to seriously leak memory in the backward pass. I made a simple test case below. When it runs, you will see a rapidly increasing GPU/CPU memory usage in nvidia-smi/top and it will crash with an out of memory exception soon. Without the dropout layer or the backward pass, it works fine. \r\n\r\n```python\r\nimport torch\r\nfrom torch import jit\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\n\r\nclass TestNet(nn.Module):\r\n def __init__(self):\r\n super(TestNet, self).__init__()\r\n self.net1 = nn.Linear(100, 200)\r\n self.net2 = nn.Linear(200, 1)\r\n self.sigmoid = nn.Sigmoid()\r\n self.ReLU = nn.ReLU(inplace=False)\r\n self.drop = nn.Dropout(0.5)\r\n \r\n def forward(self, V):\r\n return self.sigmoid(self.net2(self.drop(self.ReLU(self.net1(V))))).squeeze() \r\n\r\n\r\nuse_cuda = True\r\nnet = TestNet()\r\ncriterion = nn.BCELoss()\r\nif use_cuda:\r\n net.cuda()\r\n criterion.cuda()\r\n V = Variable(torch.randn(100, 100)).cuda()\r\n label = Variable(torch.randn(100)).cuda()\r\nelse:\r\n V = Variable(torch.randn(100, 100))\r\n label = Variable(torch.randn(100))\r\n\r\nnet.train()\r\nfwd = jit.compile(net.forward)\r\nfor i in range(0,1000000):\r\n r = fwd(V)\r\n err = criterion(r, label)\r\n err.backward() \r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom torch.autograd.function import InplaceFunction\nfrom torch.autograd import Variable\nfrom itertools import repeat\n\n\nclass Dropout(InplaceFunction):\n\n @staticmethod\n def _make_noise(input):\n return input.new().resize_as_(input)\n\n @staticmethod\n def symbolic(g, input, p=0.5, train=False, inplace=False):\n # See Note [Export inplace]\n r, _ = g.op(\"Dropout\", input, ratio_f=p, is_test_i=not train, outputs=2)\n return r\n\n @classmethod\n def forward(cls, ctx, input, p=0.5, train=False, inplace=False):\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n ctx.p = p\n ctx.train = train\n ctx.inplace = inplace\n\n if ctx.inplace:\n ctx.mark_dirty(input)\n output = input\n else:\n output = input.clone()\n\n if ctx.p > 0 and ctx.train:\n ctx.noise = cls._make_noise(input)\n if ctx.p == 1:\n ctx.noise.fill_(0)\n else:\n ctx.noise.bernoulli_(1 - ctx.p).div_(1 - ctx.p)\n ctx.noise = ctx.noise.expand_as(input)\n output.mul_(ctx.noise)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.p > 0 and ctx.train:\n return grad_output.mul(Variable(ctx.noise)), None, None, None\n else:\n return grad_output, None, None, None\n\n\nclass FeatureDropout(Dropout):\n\n @staticmethod\n def symbolic(g, input, p=0.5, train=False, inplace=False):\n # See Note [Export inplace]\n # NB: In inference mode, FeatureDropout is exported as an identity op.\n from torch.onnx.symbolic import _unimplemented\n if train:\n return _unimplemented(\"FeatureDropout\", \"training mode\")\n return input\n\n @staticmethod\n def _make_noise(input):\n return input.new().resize_(input.size(0), input.size(1),\n *repeat(1, input.dim() - 2))\n", "path": "torch/nn/_functions/dropout.py"}]} | 1,547 | 299 |
gh_patches_debug_23964 | rasdani/github-patches | git_diff | fidals__shopelectro-582 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Правь шаблон писем
В шаблоне письма, который приходит если оплачивать онлайн, нет строки комментариями. Как в обычном шаблоне. Добавь.
</issue>
<code>
[start of shopelectro/views/ecommerce.py]
1 from django.conf import settings
2 from django.core import serializers
3 from django.http import HttpResponse, JsonResponse
4 from django.shortcuts import get_object_or_404, render
5 from django.views.decorators.http import require_POST
6
7 from ecommerce import mailer, views as ec_views
8 from pages.models import CustomPage
9
10 from shopelectro.cart import SECart
11 from shopelectro.forms import OrderForm
12 from shopelectro.models import Product, Order
13
14
15 # ECOMMERCE VIEWS
16 class OrderPage(ec_views.OrderPage):
17 order_form = OrderForm
18 cart = SECart
19
20 def get_context_data(self, request, **kwargs):
21 return {
22 **super(OrderPage, self).get_context_data(request, **kwargs),
23 'page': CustomPage.objects.get(slug='order'),
24 }
25
26
27 class AddToCart(ec_views.AddToCart):
28 cart = SECart
29 product_model = Product
30 order_form = OrderForm
31
32
33 class RemoveFromCart(ec_views.RemoveFromCart):
34 cart = SECart
35 product_model = Product
36 order_form = OrderForm
37
38
39 class ChangeCount(ec_views.ChangeCount):
40 cart = SECart
41 product_model = Product
42 order_form = OrderForm
43
44
45 class FlushCart(ec_views.FlushCart):
46 product_model = Product
47 order_form = OrderForm
48
49
50 class OrderSuccess(ec_views.OrderSuccess):
51 order = Order.objects.all().prefetch_related('positions')
52
53 def get_context_data(self, **kwargs):
54 context = super().get_context_data(**kwargs)
55 positions_json = serializers.serialize(
56 'json',
57 context['order'].positions.all(),
58 fields=['name', 'quantity', 'price'],
59 )
60
61 return {
62 **context,
63 'positions_json': positions_json,
64 }
65
66
67 @require_POST
68 def one_click_buy(request):
69 """
70 Handle one-click-buy.
71
72 Accept XHR, save Order to DB, send mail about it
73 and return 200 OK.
74 """
75 SECart(request.session).clear()
76
77 cart = SECart(request.session)
78 product = get_object_or_404(Product, id=request.POST['product'])
79 cart.add(product, int(request.POST['quantity']))
80 order = Order(phone=request.POST['phone'])
81 order.set_positions(cart)
82 ec_views.save_order_to_session(request.session, order)
83 mailer.send_order(
84 subject=settings.EMAIL_SUBJECTS['one_click'],
85 order=order,
86 to_customer=False,
87 )
88 return HttpResponse('ok')
89
90
91 @require_POST
92 def order_call(request):
93 """Send email about ordered call."""
94 phone, time, url = ec_views.get_keys_from_post(
95 request, 'phone', 'time', 'url')
96
97 mailer.send_backcall(
98 subject=settings.EMAIL_SUBJECTS['call'],
99 phone=phone,
100 time=time,
101 url=url,
102 )
103
104 return HttpResponse('ok')
105
106
107 class YandexOrder(OrderPage):
108
109 def post(self, request):
110 cart = self.cart(request.session)
111 form = self.order_form(request.POST.dict())
112
113 if not form.is_valid():
114 return render(request, self.template, {'cart': cart, 'form': form})
115
116 order = form.save()
117 order.set_positions(cart)
118 ec_views.save_order_to_session(request.session, order)
119
120 # Took form fields from Yandex docs https://goo.gl/afKfsz
121 response_data = {
122 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required
123 'shopId': settings.SHOP['id'], # Required
124 'scid': settings.SHOP['scid'], # Required
125 'shopSuccessURL': settings.SHOP['success_url'],
126 'shopFailURL': settings.SHOP['fail_url'],
127 'customerNumber': order.id, # Required
128 'sum': order.total_price, # Required
129 'orderNumber': order.fake_order_number,
130 'cps_phone': order.phone,
131 'cps_email': order.email,
132 'paymentType': request.POST.get('payment_type'),
133 }
134
135 return JsonResponse(response_data)
136
[end of shopelectro/views/ecommerce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py
--- a/shopelectro/views/ecommerce.py
+++ b/shopelectro/views/ecommerce.py
@@ -1,3 +1,5 @@
+import json
+
from django.conf import settings
from django.core import serializers
from django.http import HttpResponse, JsonResponse
@@ -18,9 +20,13 @@
cart = SECart
def get_context_data(self, request, **kwargs):
+ data = super().get_context_data(request, **kwargs)
return {
- **super(OrderPage, self).get_context_data(request, **kwargs),
+ **data,
'page': CustomPage.objects.get(slug='order'),
+ 'raw_order_fields': json.dumps({
+ field.html_name: f'#{field.id_for_label}' for field in data['form']
+ }),
}
@@ -108,8 +114,7 @@
def post(self, request):
cart = self.cart(request.session)
- form = self.order_form(request.POST.dict())
-
+ form = self.order_form(request.POST)
if not form.is_valid():
return render(request, self.template, {'cart': cart, 'form': form})
| {"golden_diff": "diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py\n--- a/shopelectro/views/ecommerce.py\n+++ b/shopelectro/views/ecommerce.py\n@@ -1,3 +1,5 @@\n+import json\n+\n from django.conf import settings\n from django.core import serializers\n from django.http import HttpResponse, JsonResponse\n@@ -18,9 +20,13 @@\n cart = SECart\n \n def get_context_data(self, request, **kwargs):\n+ data = super().get_context_data(request, **kwargs)\n return {\n- **super(OrderPage, self).get_context_data(request, **kwargs),\n+ **data,\n 'page': CustomPage.objects.get(slug='order'),\n+ 'raw_order_fields': json.dumps({\n+ field.html_name: f'#{field.id_for_label}' for field in data['form']\n+ }),\n }\n \n \n@@ -108,8 +114,7 @@\n \n def post(self, request):\n cart = self.cart(request.session)\n- form = self.order_form(request.POST.dict())\n-\n+ form = self.order_form(request.POST)\n if not form.is_valid():\n return render(request, self.template, {'cart': cart, 'form': form})\n", "issue": "\u041f\u0440\u0430\u0432\u044c \u0448\u0430\u0431\u043b\u043e\u043d \u043f\u0438\u0441\u0435\u043c\n\u0412 \u0448\u0430\u0431\u043b\u043e\u043d\u0435 \u043f\u0438\u0441\u044c\u043c\u0430, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u043f\u0440\u0438\u0445\u043e\u0434\u0438\u0442 \u0435\u0441\u043b\u0438 \u043e\u043f\u043b\u0430\u0447\u0438\u0432\u0430\u0442\u044c \u043e\u043d\u043b\u0430\u0439\u043d, \u043d\u0435\u0442 \u0441\u0442\u0440\u043e\u043a\u0438 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u044f\u043c\u0438. \u041a\u0430\u043a \u0432 \u043e\u0431\u044b\u0447\u043d\u043e\u043c \u0448\u0430\u0431\u043b\u043e\u043d\u0435. \u0414\u043e\u0431\u0430\u0432\u044c.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.core import serializers\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer, views as ec_views\nfrom pages.models import CustomPage\n\nfrom shopelectro.cart import SECart\nfrom shopelectro.forms import OrderForm\nfrom shopelectro.models import Product, Order\n\n\n# ECOMMERCE VIEWS\nclass OrderPage(ec_views.OrderPage):\n order_form = OrderForm\n cart = SECart\n\n def get_context_data(self, request, **kwargs):\n return {\n **super(OrderPage, self).get_context_data(request, **kwargs),\n 'page': CustomPage.objects.get(slug='order'),\n }\n\n\nclass AddToCart(ec_views.AddToCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass RemoveFromCart(ec_views.RemoveFromCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass ChangeCount(ec_views.ChangeCount):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass FlushCart(ec_views.FlushCart):\n product_model = Product\n order_form = OrderForm\n\n\nclass OrderSuccess(ec_views.OrderSuccess):\n order = Order.objects.all().prefetch_related('positions')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n positions_json = serializers.serialize(\n 'json',\n context['order'].positions.all(),\n fields=['name', 'quantity', 'price'],\n )\n\n return {\n **context,\n 'positions_json': positions_json,\n }\n\n\n@require_POST\ndef one_click_buy(request):\n \"\"\"\n Handle one-click-buy.\n\n Accept XHR, save Order to DB, send mail about it\n and return 200 OK.\n \"\"\"\n SECart(request.session).clear()\n\n cart = SECart(request.session)\n product = get_object_or_404(Product, id=request.POST['product'])\n cart.add(product, int(request.POST['quantity']))\n order = Order(phone=request.POST['phone'])\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['one_click'],\n order=order,\n to_customer=False,\n )\n return HttpResponse('ok')\n\n\n@require_POST\ndef order_call(request):\n \"\"\"Send email about ordered call.\"\"\"\n phone, time, url = ec_views.get_keys_from_post(\n request, 'phone', 'time', 'url')\n\n mailer.send_backcall(\n subject=settings.EMAIL_SUBJECTS['call'],\n phone=phone,\n time=time,\n url=url,\n )\n\n return HttpResponse('ok')\n\n\nclass YandexOrder(OrderPage):\n\n def post(self, request):\n cart = self.cart(request.session)\n form = self.order_form(request.POST.dict())\n\n if not form.is_valid():\n return render(request, self.template, {'cart': cart, 'form': form})\n\n order = form.save()\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n\n # Took form fields from Yandex docs https://goo.gl/afKfsz\n response_data = {\n 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required\n 'shopId': settings.SHOP['id'], # Required\n 'scid': settings.SHOP['scid'], # Required\n 'shopSuccessURL': settings.SHOP['success_url'],\n 'shopFailURL': settings.SHOP['fail_url'],\n 'customerNumber': order.id, # Required\n 'sum': order.total_price, # Required\n 'orderNumber': order.fake_order_number,\n 'cps_phone': order.phone,\n 'cps_email': order.email,\n 'paymentType': request.POST.get('payment_type'),\n }\n\n return JsonResponse(response_data)\n", "path": "shopelectro/views/ecommerce.py"}]} | 1,773 | 280 |
gh_patches_debug_13315 | rasdani/github-patches | git_diff | pypi__warehouse-3525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove X-Forwarded-Proto/X-Forwarded-Port h2o workaround
[In `warehouse/utils/wsgi.py`](https://github.com/pypa/warehouse/blob/master/warehouse/utils/wsgi.py#L51-L58) we note that once https://github.com/h2o/h2o/issues/883 is solved, we can remove a few lines about `X-Forwarded-Port` and `X-Forwarded-Proto`. They resolved that issue in May 2016 and [have released several new versions since then](https://h2o.examp1e.net/). OK to remove workaround?
Followup to b8b9f385382cd659750c694cf8b1b3db6f1f6d35 .
</issue>
<code>
[start of warehouse/utils/wsgi.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import hmac
14
15
16 def _forwarded_value(values, num_proxies):
17 values = [v.strip() for v in values.split(",")]
18 if len(values) >= num_proxies:
19 return values[-num_proxies]
20
21
22 class ProxyFixer:
23
24 def __init__(self, app, token, num_proxies=1):
25 self.app = app
26 self.token = token
27 self.num_proxies = num_proxies
28
29 def __call__(self, environ, start_response):
30 # Determine if the request comes from a trusted proxy or not by looking
31 # for a token in the request.
32 request_token = environ.get("HTTP_WAREHOUSE_TOKEN")
33 if (request_token is not None and
34 hmac.compare_digest(self.token, request_token)):
35 # Compute our values from the environment.
36 proto = environ.get("HTTP_WAREHOUSE_PROTO", "")
37 remote_addr = environ.get("HTTP_WAREHOUSE_IP", "")
38 host = environ.get("HTTP_WAREHOUSE_HOST", "")
39 # If we're not getting headers from a trusted third party via the
40 # specialized Warehouse-* headers, then we'll fall back to looking at
41 # X-Fowarded-* headers, assuming that whatever we have in front of us
42 # will strip invalid ones.
43 else:
44 proto = environ.get("HTTP_X_FORWARDED_PROTO", "")
45 remote_addr = _forwarded_value(
46 environ.get("HTTP_X_FORWARDED_FOR", ""),
47 self.num_proxies,
48 )
49 host = environ.get("HTTP_X_FORWARDED_HOST", "")
50
51 # If we have a X-Forwarded-Port and it disagreed with
52 # X-Forwarded-Proto then we're going to listen to X-Forwarded-Port
53 # instead. This is because h2o overwrites X-Forwarded-Proto but not
54 # X-Forwarded-Port
55 # TODO: Note, this can go away if/once h2o/h2o#883 is solved.
56 port = environ.get("HTTP_X_FORWARDED_PORT", "")
57 if port == "443":
58 proto = "https"
59
60 # Put the new header values into our environment.
61 if remote_addr:
62 environ["REMOTE_ADDR"] = remote_addr
63 if host:
64 environ["HTTP_HOST"] = host
65 if proto:
66 environ["wsgi.url_scheme"] = proto
67
68 # Remove any of the forwarded or warehouse headers from the environment
69 for header in {
70 "HTTP_X_FORWARDED_PROTO", "HTTP_X_FORWARDED_FOR",
71 "HTTP_X_FORWARDED_HOST", "HTTP_X_FORWARDED_PORT",
72 "HTTP_WAREHOUSE_TOKEN", "HTTP_WAREHOUSE_PROTO",
73 "HTTP_WAREHOUSE_IP", "HTTP_WAREHOUSE_HOST"}:
74 if header in environ:
75 del environ[header]
76
77 # Dispatch to the real underlying application.
78 return self.app(environ, start_response)
79
80
81 class VhmRootRemover:
82
83 def __init__(self, app):
84 self.app = app
85
86 def __call__(self, environ, start_response):
87 # Delete the X-Vhm-Root header if it exists.
88 if "HTTP_X_VHM_ROOT" in environ:
89 del environ["HTTP_X_VHM_ROOT"]
90
91 return self.app(environ, start_response)
92
93
94 class HostRewrite:
95
96 # TODO: This entire class should not be required.
97
98 def __init__(self, app):
99 self.app = app
100
101 def __call__(self, environ, start_response):
102 # If the host header matches upload.pypi.io, then we want to rewrite it
103 # so that it is instead upload.pypi.org.
104 if environ.get("HTTP_HOST", "").lower() == "upload.pypi.io":
105 environ["HTTP_HOST"] = "upload.pypi.org"
106
107 return self.app(environ, start_response)
108
[end of warehouse/utils/wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/utils/wsgi.py b/warehouse/utils/wsgi.py
--- a/warehouse/utils/wsgi.py
+++ b/warehouse/utils/wsgi.py
@@ -48,15 +48,6 @@
)
host = environ.get("HTTP_X_FORWARDED_HOST", "")
- # If we have a X-Forwarded-Port and it disagreed with
- # X-Forwarded-Proto then we're going to listen to X-Forwarded-Port
- # instead. This is because h2o overwrites X-Forwarded-Proto but not
- # X-Forwarded-Port
- # TODO: Note, this can go away if/once h2o/h2o#883 is solved.
- port = environ.get("HTTP_X_FORWARDED_PORT", "")
- if port == "443":
- proto = "https"
-
# Put the new header values into our environment.
if remote_addr:
environ["REMOTE_ADDR"] = remote_addr
| {"golden_diff": "diff --git a/warehouse/utils/wsgi.py b/warehouse/utils/wsgi.py\n--- a/warehouse/utils/wsgi.py\n+++ b/warehouse/utils/wsgi.py\n@@ -48,15 +48,6 @@\n )\n host = environ.get(\"HTTP_X_FORWARDED_HOST\", \"\")\n \n- # If we have a X-Forwarded-Port and it disagreed with\n- # X-Forwarded-Proto then we're going to listen to X-Forwarded-Port\n- # instead. This is because h2o overwrites X-Forwarded-Proto but not\n- # X-Forwarded-Port\n- # TODO: Note, this can go away if/once h2o/h2o#883 is solved.\n- port = environ.get(\"HTTP_X_FORWARDED_PORT\", \"\")\n- if port == \"443\":\n- proto = \"https\"\n-\n # Put the new header values into our environment.\n if remote_addr:\n environ[\"REMOTE_ADDR\"] = remote_addr\n", "issue": "Remove X-Forwarded-Proto/X-Forwarded-Port h2o workaround\n[In `warehouse/utils/wsgi.py`](https://github.com/pypa/warehouse/blob/master/warehouse/utils/wsgi.py#L51-L58) we note that once https://github.com/h2o/h2o/issues/883 is solved, we can remove a few lines about `X-Forwarded-Port` and `X-Forwarded-Proto`. They resolved that issue in May 2016 and [have released several new versions since then](https://h2o.examp1e.net/). OK to remove workaround?\r\n\r\nFollowup to b8b9f385382cd659750c694cf8b1b3db6f1f6d35 .\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hmac\n\n\ndef _forwarded_value(values, num_proxies):\n values = [v.strip() for v in values.split(\",\")]\n if len(values) >= num_proxies:\n return values[-num_proxies]\n\n\nclass ProxyFixer:\n\n def __init__(self, app, token, num_proxies=1):\n self.app = app\n self.token = token\n self.num_proxies = num_proxies\n\n def __call__(self, environ, start_response):\n # Determine if the request comes from a trusted proxy or not by looking\n # for a token in the request.\n request_token = environ.get(\"HTTP_WAREHOUSE_TOKEN\")\n if (request_token is not None and\n hmac.compare_digest(self.token, request_token)):\n # Compute our values from the environment.\n proto = environ.get(\"HTTP_WAREHOUSE_PROTO\", \"\")\n remote_addr = environ.get(\"HTTP_WAREHOUSE_IP\", \"\")\n host = environ.get(\"HTTP_WAREHOUSE_HOST\", \"\")\n # If we're not getting headers from a trusted third party via the\n # specialized Warehouse-* headers, then we'll fall back to looking at\n # X-Fowarded-* headers, assuming that whatever we have in front of us\n # will strip invalid ones.\n else:\n proto = environ.get(\"HTTP_X_FORWARDED_PROTO\", \"\")\n remote_addr = _forwarded_value(\n environ.get(\"HTTP_X_FORWARDED_FOR\", \"\"),\n self.num_proxies,\n )\n host = environ.get(\"HTTP_X_FORWARDED_HOST\", \"\")\n\n # If we have a X-Forwarded-Port and it disagreed with\n # X-Forwarded-Proto then we're going to listen to X-Forwarded-Port\n # instead. This is because h2o overwrites X-Forwarded-Proto but not\n # X-Forwarded-Port\n # TODO: Note, this can go away if/once h2o/h2o#883 is solved.\n port = environ.get(\"HTTP_X_FORWARDED_PORT\", \"\")\n if port == \"443\":\n proto = \"https\"\n\n # Put the new header values into our environment.\n if remote_addr:\n environ[\"REMOTE_ADDR\"] = remote_addr\n if host:\n environ[\"HTTP_HOST\"] = host\n if proto:\n environ[\"wsgi.url_scheme\"] = proto\n\n # Remove any of the forwarded or warehouse headers from the environment\n for header in {\n \"HTTP_X_FORWARDED_PROTO\", \"HTTP_X_FORWARDED_FOR\",\n \"HTTP_X_FORWARDED_HOST\", \"HTTP_X_FORWARDED_PORT\",\n \"HTTP_WAREHOUSE_TOKEN\", \"HTTP_WAREHOUSE_PROTO\",\n \"HTTP_WAREHOUSE_IP\", \"HTTP_WAREHOUSE_HOST\"}:\n if header in environ:\n del environ[header]\n\n # Dispatch to the real underlying application.\n return self.app(environ, start_response)\n\n\nclass VhmRootRemover:\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n # Delete the X-Vhm-Root header if it exists.\n if \"HTTP_X_VHM_ROOT\" in environ:\n del environ[\"HTTP_X_VHM_ROOT\"]\n\n return self.app(environ, start_response)\n\n\nclass HostRewrite:\n\n # TODO: This entire class should not be required.\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n # If the host header matches upload.pypi.io, then we want to rewrite it\n # so that it is instead upload.pypi.org.\n if environ.get(\"HTTP_HOST\", \"\").lower() == \"upload.pypi.io\":\n environ[\"HTTP_HOST\"] = \"upload.pypi.org\"\n\n return self.app(environ, start_response)\n", "path": "warehouse/utils/wsgi.py"}]} | 1,878 | 224 |
gh_patches_debug_20909 | rasdani/github-patches | git_diff | pypi__warehouse-821 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Display how long ago the release was made
We'll want to display how long ago the release was made on the project/release detail page. Probably we'll want to implement this in javascript?
</issue>
<code>
[start of warehouse/i18n/filters.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import babel.dates
14 import jinja2
15
16 from pyramid.threadlocal import get_current_request
17
18
19 @jinja2.contextfilter
20 def format_date(ctx, *args, **kwargs):
21 request = ctx.get("request") or get_current_request()
22 kwargs.setdefault("locale", request.locale)
23 return babel.dates.format_date(*args, **kwargs)
24
[end of warehouse/i18n/filters.py]
[start of warehouse/i18n/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import os.path
14
15 from babel.core import Locale
16 from babel.support import Translations
17
18 from warehouse.i18n.translations import (
19 JinjaRequestTranslation, translate_value, gettext, ngettext,
20 )
21
22
23 __all__ = ["gettext", "ngettext", "includeme"]
24
25
26 GETTEXT_DOMAIN = "warehouse"
27
28 LOCALE_DIR = os.path.abspath(
29 os.path.join(os.path.dirname(__file__), "..", "translations")
30 )
31
32
33 def _locale(request):
34 """
35 Computes a babel.core:Locale() object for this request.
36 """
37 return Locale.parse(request.locale_name)
38
39
40 def _translation(request):
41 """
42 Loads a translation object for this request.
43 """
44 # TODO: Should we cache these in memory?
45 return Translations.load(LOCALE_DIR, request.locale, domain=GETTEXT_DOMAIN)
46
47
48 def includeme(config):
49 # Add the request attributes
50 config.add_request_method(_locale, name="locale", reify=True)
51 config.add_request_method(_translation, name="translation", reify=True)
52
53 # Register our i18n/l10n filters for Jinja2
54 filters = config.get_settings().setdefault("jinja2.filters", {})
55 filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
56
57 # Register our finalize function for Jinja2
58 config.get_settings()["jinja2.finalize"] = translate_value
59
60 # Configure Jinja2 for translation
61 config.get_settings()["jinja2.i18n.domain"] = GETTEXT_DOMAIN
62 config.get_settings()["jinja2.i18n.gettext"] = JinjaRequestTranslation
63
[end of warehouse/i18n/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py
--- a/warehouse/i18n/__init__.py
+++ b/warehouse/i18n/__init__.py
@@ -53,6 +53,10 @@
# Register our i18n/l10n filters for Jinja2
filters = config.get_settings().setdefault("jinja2.filters", {})
filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
+ filters.setdefault(
+ "format_datetime",
+ "warehouse.i18n.filters:format_datetime",
+ )
# Register our finalize function for Jinja2
config.get_settings()["jinja2.finalize"] = translate_value
diff --git a/warehouse/i18n/filters.py b/warehouse/i18n/filters.py
--- a/warehouse/i18n/filters.py
+++ b/warehouse/i18n/filters.py
@@ -21,3 +21,10 @@
request = ctx.get("request") or get_current_request()
kwargs.setdefault("locale", request.locale)
return babel.dates.format_date(*args, **kwargs)
+
+
[email protected]
+def format_datetime(ctx, *args, **kwargs):
+ request = ctx.get("request") or get_current_request()
+ kwargs.setdefault("locale", request.locale)
+ return babel.dates.format_datetime(*args, **kwargs)
| {"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -53,6 +53,10 @@\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n+ filters.setdefault(\n+ \"format_datetime\",\n+ \"warehouse.i18n.filters:format_datetime\",\n+ )\n \n # Register our finalize function for Jinja2\n config.get_settings()[\"jinja2.finalize\"] = translate_value\ndiff --git a/warehouse/i18n/filters.py b/warehouse/i18n/filters.py\n--- a/warehouse/i18n/filters.py\n+++ b/warehouse/i18n/filters.py\n@@ -21,3 +21,10 @@\n request = ctx.get(\"request\") or get_current_request()\n kwargs.setdefault(\"locale\", request.locale)\n return babel.dates.format_date(*args, **kwargs)\n+\n+\[email protected]\n+def format_datetime(ctx, *args, **kwargs):\n+ request = ctx.get(\"request\") or get_current_request()\n+ kwargs.setdefault(\"locale\", request.locale)\n+ return babel.dates.format_datetime(*args, **kwargs)\n", "issue": "Display how long ago the release was made\nWe'll want to display how long ago the release was made on the project/release detail page. Probably we'll want to implement this in javascript?\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport babel.dates\nimport jinja2\n\nfrom pyramid.threadlocal import get_current_request\n\n\[email protected]\ndef format_date(ctx, *args, **kwargs):\n request = ctx.get(\"request\") or get_current_request()\n kwargs.setdefault(\"locale\", request.locale)\n return babel.dates.format_date(*args, **kwargs)\n", "path": "warehouse/i18n/filters.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\nfrom babel.core import Locale\nfrom babel.support import Translations\n\nfrom warehouse.i18n.translations import (\n JinjaRequestTranslation, translate_value, gettext, ngettext,\n)\n\n\n__all__ = [\"gettext\", \"ngettext\", \"includeme\"]\n\n\nGETTEXT_DOMAIN = \"warehouse\"\n\nLOCALE_DIR = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"..\", \"translations\")\n)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name)\n\n\ndef _translation(request):\n \"\"\"\n Loads a translation object for this request.\n \"\"\"\n # TODO: Should we cache these in memory?\n return Translations.load(LOCALE_DIR, request.locale, domain=GETTEXT_DOMAIN)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n config.add_request_method(_translation, name=\"translation\", reify=True)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n\n # Register our finalize function for Jinja2\n config.get_settings()[\"jinja2.finalize\"] = translate_value\n\n # Configure Jinja2 for translation\n config.get_settings()[\"jinja2.i18n.domain\"] = GETTEXT_DOMAIN\n config.get_settings()[\"jinja2.i18n.gettext\"] = JinjaRequestTranslation\n", "path": "warehouse/i18n/__init__.py"}]} | 1,428 | 328 |
gh_patches_debug_7923 | rasdani/github-patches | git_diff | plotly__dash-2734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] Python 3.10 support
**Is your feature request related to a problem? Please describe.**
Hi, I'm wondering if Dash supports Python 3.10 or if this is on the roadmap. Thanks for all your great work!
**Describe the solution you'd like**
As a user of Dash, I would like to be able to install Dash and/or Plotly in a Python 3.10 environment.
**Describe alternatives you've considered**
I tried [searching the forums](https://community.plotly.com/search?q=python%203.10), but didn't find any results. I haven't tried building it myself yet, but plan to.
**Additional context**
n/a
</issue>
<code>
[start of setup.py]
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open("dash/version.py", encoding="utf-8").read(), main_ns) # pylint: disable=exec-used, consider-using-with
6
7
8 def read_req_file(req_type):
9 with open(f"requires-{req_type}.txt", encoding="utf-8") as fp:
10 requires = (line.strip() for line in fp)
11 return [req for req in requires if req and not req.startswith("#")]
12
13
14 setup(
15 name="dash",
16 version=main_ns["__version__"],
17 author="Chris Parmer",
18 author_email="[email protected]",
19 packages=find_packages(exclude=["tests*"]),
20 include_package_data=True,
21 license="MIT",
22 description=(
23 "A Python framework for building reactive web-apps. "
24 "Developed by Plotly."
25 ),
26 long_description=io.open("README.md", encoding="utf-8").read(), # pylint: disable=consider-using-with
27 long_description_content_type="text/markdown",
28 install_requires=read_req_file("install"),
29 python_requires=">=3.6",
30 extras_require={
31 "ci": read_req_file("ci"),
32 "dev": read_req_file("dev"),
33 "testing": read_req_file("testing"),
34 "celery": read_req_file("celery"),
35 "diskcache": read_req_file("diskcache"),
36 "compress": read_req_file("compress")
37 },
38 entry_points={
39 "console_scripts": [
40 "dash-generate-components = "
41 "dash.development.component_generator:cli",
42 "renderer = dash.development.build_process:renderer",
43 "dash-update-components = dash.development.update_components:cli"
44 ],
45 "pytest11": ["dash = dash.testing.plugin"],
46 },
47 url="https://plotly.com/dash",
48 project_urls={
49 "Documentation": "https://dash.plotly.com",
50 "Source": "https://github.com/plotly/dash",
51 "Issue Tracker": "https://github.com/plotly/dash/issues",
52 },
53 classifiers=[
54 "Development Status :: 5 - Production/Stable",
55 "Environment :: Web Environment",
56 "Framework :: Dash",
57 "Framework :: Flask",
58 "Intended Audience :: Developers",
59 "Intended Audience :: Education",
60 "Intended Audience :: Financial and Insurance Industry",
61 "Intended Audience :: Healthcare Industry",
62 "Intended Audience :: Manufacturing",
63 "Intended Audience :: Science/Research",
64 "License :: OSI Approved :: MIT License",
65 "Programming Language :: Python",
66 "Programming Language :: Python :: 3",
67 "Programming Language :: Python :: 3.6",
68 "Programming Language :: Python :: 3.7",
69 "Programming Language :: Python :: 3.8",
70 "Programming Language :: Python :: 3.9",
71 "Topic :: Database :: Front-Ends",
72 "Topic :: Office/Business :: Financial :: Spreadsheet",
73 "Topic :: Scientific/Engineering :: Visualization",
74 "Topic :: Software Development :: Libraries :: Application Frameworks",
75 "Topic :: Software Development :: Widget Sets",
76 ],
77 data_files=[
78 # like `jupyter nbextension install --sys-prefix`
79 ("share/jupyter/nbextensions/dash", [
80 "dash/nbextension/main.js",
81 ]),
82 # like `jupyter nbextension enable --sys-prefix`
83 ("etc/jupyter/nbconfig/notebook.d", [
84 "dash/nbextension/dash.json"
85 ]),
86 # Place jupyterlab extension in extension directory
87 ("share/jupyter/lab/extensions", [
88 "dash/labextension/dist/dash-jupyterlab.tgz"
89 ]),
90 ],
91 )
92
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,6 +68,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Topic :: Database :: Front-Ends",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Scientific/Engineering :: Visualization",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,6 +68,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n", "issue": "[Feature Request] Python 3.10 support\n**Is your feature request related to a problem? Please describe.**\r\nHi, I'm wondering if Dash supports Python 3.10 or if this is on the roadmap. Thanks for all your great work!\r\n\r\n**Describe the solution you'd like**\r\nAs a user of Dash, I would like to be able to install Dash and/or Plotly in a Python 3.10 environment.\r\n\r\n**Describe alternatives you've considered**\r\nI tried [searching the forums](https://community.plotly.com/search?q=python%203.10), but didn't find any results. I haven't tried building it myself yet, but plan to.\r\n\r\n**Additional context**\r\nn/a\r\n\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open(\"dash/version.py\", encoding=\"utf-8\").read(), main_ns) # pylint: disable=exec-used, consider-using-with\n\n\ndef read_req_file(req_type):\n with open(f\"requires-{req_type}.txt\", encoding=\"utf-8\") as fp:\n requires = (line.strip() for line in fp)\n return [req for req in requires if req and not req.startswith(\"#\")]\n\n\nsetup(\n name=\"dash\",\n version=main_ns[\"__version__\"],\n author=\"Chris Parmer\",\n author_email=\"[email protected]\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n license=\"MIT\",\n description=(\n \"A Python framework for building reactive web-apps. \"\n \"Developed by Plotly.\"\n ),\n long_description=io.open(\"README.md\", encoding=\"utf-8\").read(), # pylint: disable=consider-using-with\n long_description_content_type=\"text/markdown\",\n install_requires=read_req_file(\"install\"),\n python_requires=\">=3.6\",\n extras_require={\n \"ci\": read_req_file(\"ci\"),\n \"dev\": read_req_file(\"dev\"),\n \"testing\": read_req_file(\"testing\"),\n \"celery\": read_req_file(\"celery\"),\n \"diskcache\": read_req_file(\"diskcache\"),\n \"compress\": read_req_file(\"compress\")\n },\n entry_points={\n \"console_scripts\": [\n \"dash-generate-components = \"\n \"dash.development.component_generator:cli\",\n \"renderer = dash.development.build_process:renderer\",\n \"dash-update-components = dash.development.update_components:cli\"\n ],\n \"pytest11\": [\"dash = dash.testing.plugin\"],\n },\n url=\"https://plotly.com/dash\",\n project_urls={\n \"Documentation\": \"https://dash.plotly.com\",\n \"Source\": \"https://github.com/plotly/dash\",\n \"Issue Tracker\": \"https://github.com/plotly/dash/issues\",\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Dash\",\n \"Framework :: Flask\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Manufacturing\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Database :: Front-Ends\",\n \"Topic :: Office/Business :: Financial :: Spreadsheet\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Widget Sets\",\n ],\n data_files=[\n # like `jupyter nbextension install --sys-prefix`\n (\"share/jupyter/nbextensions/dash\", [\n \"dash/nbextension/main.js\",\n ]),\n # like `jupyter nbextension enable --sys-prefix`\n (\"etc/jupyter/nbconfig/notebook.d\", [\n \"dash/nbextension/dash.json\"\n ]),\n # Place jupyterlab extension in extension directory\n (\"share/jupyter/lab/extensions\", [\n \"dash/labextension/dist/dash-jupyterlab.tgz\"\n ]),\n ],\n)\n", "path": "setup.py"}]} | 1,645 | 116 |
gh_patches_debug_1562 | rasdani/github-patches | git_diff | pytorch__ignite-1629 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Images not rendered on PyPI
## 📚 Documentation
The logo and other assets on the project README.md are not rendered on the PyPI page of [pytorch-ignite](https://pypi.org/project/pytorch-ignite/)
This is because PyPI does not read the repo for the images, and we'll have to use external links for the image. (reference [here](https://stackoverflow.com/questions/41983209/how-do-i-add-images-to-a-pypi-readme-that-works-on-github))
We could use the `https://raw.githubusercontent.com/*` counterparts for the assets instead of specifying the path
By specifying the path, I mean the following
https://github.com/pytorch/ignite/blob/6753b19b74fd8d128188dd0a75b405d19aa515b5/README.md#L5
Just a suggestion, please ignore if it is unnecessary :)
</issue>
<code>
[start of setup.py]
1 import io
2 import os
3 import re
4
5 from setuptools import find_packages, setup
6
7
8 def read(*names, **kwargs):
9 with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
10 return fp.read()
11
12
13 def find_version(*file_paths):
14 version_file = read(*file_paths)
15 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
16 if version_match:
17 return version_match.group(1)
18 raise RuntimeError("Unable to find version string.")
19
20
21 readme = read("README.md")
22
23 VERSION = find_version("ignite", "__init__.py")
24
25 requirements = [
26 "torch>=1.3,<2",
27 ]
28
29 setup(
30 # Metadata
31 name="pytorch-ignite",
32 version=VERSION,
33 author="PyTorch Core Team",
34 author_email="[email protected]",
35 url="https://github.com/pytorch/ignite",
36 description="A lightweight library to help with training neural networks in PyTorch.",
37 long_description_content_type="text/markdown",
38 long_description=readme,
39 license="BSD",
40 # Package info
41 packages=find_packages(exclude=("tests", "tests.*",)),
42 zip_safe=True,
43 install_requires=requirements,
44 )
45
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,9 @@
raise RuntimeError("Unable to find version string.")
-readme = read("README.md")
+readme = read("README.md").replace(
+ 'src="assets/', 'src="https://raw.githubusercontent.com/pytorch/ignite/master/assets/'
+)
VERSION = find_version("ignite", "__init__.py")
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,9 @@\n raise RuntimeError(\"Unable to find version string.\")\n \n \n-readme = read(\"README.md\")\n+readme = read(\"README.md\").replace(\n+ 'src=\"assets/', 'src=\"https://raw.githubusercontent.com/pytorch/ignite/master/assets/'\n+)\n \n VERSION = find_version(\"ignite\", \"__init__.py\")\n", "issue": "Images not rendered on PyPI\n## \ud83d\udcda Documentation\r\nThe logo and other assets on the project README.md are not rendered on the PyPI page of [pytorch-ignite](https://pypi.org/project/pytorch-ignite/)\r\nThis is because PyPI does not read the repo for the images, and we'll have to use external links for the image. (reference [here](https://stackoverflow.com/questions/41983209/how-do-i-add-images-to-a-pypi-readme-that-works-on-github))\r\nWe could use the `https://raw.githubusercontent.com/*` counterparts for the assets instead of specifying the path\r\n\r\nBy specifying the path, I mean the following\r\nhttps://github.com/pytorch/ignite/blob/6753b19b74fd8d128188dd0a75b405d19aa515b5/README.md#L5\r\n\r\nJust a suggestion, please ignore if it is unnecessary :)\r\n\n", "before_files": [{"content": "import io\nimport os\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ndef read(*names, **kwargs):\n with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nreadme = read(\"README.md\")\n\nVERSION = find_version(\"ignite\", \"__init__.py\")\n\nrequirements = [\n \"torch>=1.3,<2\",\n]\n\nsetup(\n # Metadata\n name=\"pytorch-ignite\",\n version=VERSION,\n author=\"PyTorch Core Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pytorch/ignite\",\n description=\"A lightweight library to help with training neural networks in PyTorch.\",\n long_description_content_type=\"text/markdown\",\n long_description=readme,\n license=\"BSD\",\n # Package info\n packages=find_packages(exclude=(\"tests\", \"tests.*\",)),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]} | 1,114 | 100 |
gh_patches_debug_2725 | rasdani/github-patches | git_diff | beeware__toga-569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error looking for icon for tutorial for 0.3.0.dev9
This is with Python 3.6.5 in a clean venv:
```
(.venv) PS C:\Users\_\Desktop\toga_tutorial> python .\helloworld.py
[Winforms] No valid icon format available for C:\Users\brcan\Desktop\toga_tutorial\.venv\lib\site-packages\toga\resources\tiberius; fall back on Tiberius instead
Unhandled Exception: Python.Runtime.PythonException: FileNotFoundException : Could not find file 'C:\Users\brcan\Desktop\toga_tutorial\.venv\lib\site-packages\toga\resources\tiberius.ico'.
at System.IO.__Error.WinIOError(Int32 errorCode, String maybeFullPath)
at System.IO.FileStream.Init(String path, FileMode mode, FileAccess access, Int32 rights, Boolean useRights, FileShare share, Int32 bufferSize, FileOptions options, SECURITY_ATTRIBUTES secAttrs, String msgPath, Boolean bFromProxy, Boolean useLongPath, Boolean checkHost)
at System.IO.FileStream..ctor(String path, FileMode mode, FileAccess access, FileShare share)
at System.Drawing.Icon..ctor(String fileName, Int32 width, Int32 height)
at Python.Runtime.Dispatcher.Dispatch(ArrayList args)
at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)
at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)
at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state)
at System.Threading.ThreadHelper.ThreadStart()
```
</issue>
<code>
[start of src/core/setup.py]
1 #/usr/bin/env python
2 import io
3 import re
4
5 from setuptools import setup, find_packages
6
7 with io.open('toga/__init__.py', encoding='utf8') as version_file:
8 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M)
9 if version_match:
10 version = version_match.group(1)
11 else:
12 raise RuntimeError("Unable to find version string.")
13
14
15 with io.open('README.rst', encoding='utf8') as readme:
16 long_description = readme.read()
17
18
19 setup(
20 name='toga-core',
21 version=version,
22 description='A Python native, OS native GUI toolkit.',
23 long_description=long_description,
24 author='Russell Keith-Magee',
25 author_email='[email protected]',
26 url='http://pybee.org/toga',
27 packages=find_packages(exclude='tests'),
28 python_requires='>=3.5',
29 package_data={
30 'toga': ['resources/*.icns', 'resources/*.png'],
31 },
32 include_package_data=True,
33 install_requires=[
34 'travertino>=0.1.0'
35 ],
36 tests_require=[
37 'toga-dummy==%s' % version
38 ],
39 license='New BSD',
40 classifiers=[
41 'Development Status :: 3 - Alpha',
42 'Intended Audience :: Developers',
43 'License :: OSI Approved :: BSD License',
44 'Operating System :: OS Independent',
45 'Programming Language :: Python :: 3',
46 'Programming Language :: Python :: 3.5',
47 'Programming Language :: Python :: 3.6',
48 'Programming Language :: Python :: 3.7',
49 'Programming Language :: Python :: 3 :: Only',
50 'Topic :: Software Development',
51 'Topic :: Software Development :: User Interfaces',
52 'Topic :: Software Development :: Widget Sets',
53 ],
54 test_suite='tests',
55 zip_safe=False,
56 )
57
[end of src/core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/core/setup.py b/src/core/setup.py
--- a/src/core/setup.py
+++ b/src/core/setup.py
@@ -27,7 +27,7 @@
packages=find_packages(exclude='tests'),
python_requires='>=3.5',
package_data={
- 'toga': ['resources/*.icns', 'resources/*.png'],
+ 'toga': ['resources/*.icns', 'resources/*.ico', 'resources/*.png'],
},
include_package_data=True,
install_requires=[
| {"golden_diff": "diff --git a/src/core/setup.py b/src/core/setup.py\n--- a/src/core/setup.py\n+++ b/src/core/setup.py\n@@ -27,7 +27,7 @@\n packages=find_packages(exclude='tests'),\n python_requires='>=3.5',\n package_data={\n- 'toga': ['resources/*.icns', 'resources/*.png'],\n+ 'toga': ['resources/*.icns', 'resources/*.ico', 'resources/*.png'],\n },\n include_package_data=True,\n install_requires=[\n", "issue": "Error looking for icon for tutorial for 0.3.0.dev9\nThis is with Python 3.6.5 in a clean venv:\r\n```\r\n(.venv) PS C:\\Users\\_\\Desktop\\toga_tutorial> python .\\helloworld.py\r\n[Winforms] No valid icon format available for C:\\Users\\brcan\\Desktop\\toga_tutorial\\.venv\\lib\\site-packages\\toga\\resources\\tiberius; fall back on Tiberius instead\r\n\r\nUnhandled Exception: Python.Runtime.PythonException: FileNotFoundException : Could not find file 'C:\\Users\\brcan\\Desktop\\toga_tutorial\\.venv\\lib\\site-packages\\toga\\resources\\tiberius.ico'.\r\n at System.IO.__Error.WinIOError(Int32 errorCode, String maybeFullPath)\r\n at System.IO.FileStream.Init(String path, FileMode mode, FileAccess access, Int32 rights, Boolean useRights, FileShare share, Int32 bufferSize, FileOptions options, SECURITY_ATTRIBUTES secAttrs, String msgPath, Boolean bFromProxy, Boolean useLongPath, Boolean checkHost)\r\n at System.IO.FileStream..ctor(String path, FileMode mode, FileAccess access, FileShare share)\r\n at System.Drawing.Icon..ctor(String fileName, Int32 width, Int32 height)\r\n at Python.Runtime.Dispatcher.Dispatch(ArrayList args)\r\n at System.Threading.ExecutionContext.RunInternal(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)\r\n at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state, Boolean preserveSyncCtx)\r\n at System.Threading.ExecutionContext.Run(ExecutionContext executionContext, ContextCallback callback, Object state)\r\n at System.Threading.ThreadHelper.ThreadStart()\r\n```\n", "before_files": [{"content": "#/usr/bin/env python\nimport io\nimport re\n\nfrom setuptools import setup, find_packages\n\nwith io.open('toga/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M)\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='toga-core',\n version=version,\n description='A Python native, OS native GUI toolkit.',\n long_description=long_description,\n author='Russell Keith-Magee',\n author_email='[email protected]',\n url='http://pybee.org/toga',\n packages=find_packages(exclude='tests'),\n python_requires='>=3.5',\n package_data={\n 'toga': ['resources/*.icns', 'resources/*.png'],\n },\n include_package_data=True,\n install_requires=[\n 'travertino>=0.1.0'\n ],\n tests_require=[\n 'toga-dummy==%s' % version\n ],\n license='New BSD',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: User Interfaces',\n 'Topic :: Software Development :: Widget Sets',\n ],\n test_suite='tests',\n zip_safe=False,\n)\n", "path": "src/core/setup.py"}]} | 1,415 | 114 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.