problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_5571
rasdani/github-patches
git_diff
certbot__certbot-6099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> certbot-nginx requires acme >= 0.25 Because of the import of symbols `from acme.magic_typing`, the nginx plugin released in 0.25 depends on acme 0.25 or better. However, setup.py only lists `acme>0.21.1`, leading to a failure to build from source (and potential run-time failures). </issue> <code> [start of certbot-nginx/setup.py] 1 from setuptools import setup 2 from setuptools import find_packages 3 4 5 version = '0.26.0.dev0' 6 7 # Remember to update local-oldest-requirements.txt when changing the minimum 8 # acme/certbot version. 9 install_requires = [ 10 # This plugin works with an older version of acme, but Certbot does not. 11 # 0.22.0 is specified here to work around 12 # https://github.com/pypa/pip/issues/988. 13 'acme>0.21.1', 14 'certbot>0.21.1', 15 'mock', 16 'PyOpenSSL', 17 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary? 18 'setuptools', 19 'zope.interface', 20 ] 21 22 docs_extras = [ 23 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags 24 'sphinx_rtd_theme', 25 ] 26 27 setup( 28 name='certbot-nginx', 29 version=version, 30 description="Nginx plugin for Certbot", 31 url='https://github.com/letsencrypt/letsencrypt', 32 author="Certbot Project", 33 author_email='[email protected]', 34 license='Apache License 2.0', 35 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', 36 classifiers=[ 37 'Development Status :: 3 - Alpha', 38 'Environment :: Plugins', 39 'Intended Audience :: System Administrators', 40 'License :: OSI Approved :: Apache Software License', 41 'Operating System :: POSIX :: Linux', 42 'Programming Language :: Python', 43 'Programming Language :: Python :: 2', 44 'Programming Language :: Python :: 2.7', 45 'Programming Language :: Python :: 3', 46 'Programming Language :: Python :: 3.4', 47 'Programming Language :: Python :: 3.5', 48 'Programming Language :: Python :: 3.6', 49 'Topic :: Internet :: WWW/HTTP', 50 'Topic :: Security', 51 'Topic :: System :: Installation/Setup', 52 'Topic :: System :: Networking', 53 'Topic :: System :: Systems Administration', 54 'Topic :: Utilities', 55 ], 56 57 packages=find_packages(), 58 include_package_data=True, 59 install_requires=install_requires, 60 extras_require={ 61 'docs': docs_extras, 62 }, 63 entry_points={ 64 'certbot.plugins': [ 65 'nginx = certbot_nginx.configurator:NginxConfigurator', 66 ], 67 }, 68 test_suite='certbot_nginx', 69 ) 70 [end of certbot-nginx/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py --- a/certbot-nginx/setup.py +++ b/certbot-nginx/setup.py @@ -7,10 +7,7 @@ # Remember to update local-oldest-requirements.txt when changing the minimum # acme/certbot version. install_requires = [ - # This plugin works with an older version of acme, but Certbot does not. - # 0.22.0 is specified here to work around - # https://github.com/pypa/pip/issues/988. - 'acme>0.21.1', + 'acme>=0.25.0', 'certbot>0.21.1', 'mock', 'PyOpenSSL',
{"golden_diff": "diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py\n--- a/certbot-nginx/setup.py\n+++ b/certbot-nginx/setup.py\n@@ -7,10 +7,7 @@\n # Remember to update local-oldest-requirements.txt when changing the minimum\n # acme/certbot version.\n install_requires = [\n- # This plugin works with an older version of acme, but Certbot does not.\n- # 0.22.0 is specified here to work around\n- # https://github.com/pypa/pip/issues/988.\n- 'acme>0.21.1',\n+ 'acme>=0.25.0',\n 'certbot>0.21.1',\n 'mock',\n 'PyOpenSSL',\n", "issue": "certbot-nginx requires acme >= 0.25\nBecause of the import of symbols `from acme.magic_typing`, the nginx plugin released in 0.25 depends on acme 0.25 or better. However, setup.py only lists `acme>0.21.1`, leading to a failure to build from source (and potential run-time failures).\n", "before_files": [{"content": "from setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '0.26.0.dev0'\n\n# Remember to update local-oldest-requirements.txt when changing the minimum\n# acme/certbot version.\ninstall_requires = [\n # This plugin works with an older version of acme, but Certbot does not.\n # 0.22.0 is specified here to work around\n # https://github.com/pypa/pip/issues/988.\n 'acme>0.21.1',\n 'certbot>0.21.1',\n 'mock',\n 'PyOpenSSL',\n 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n 'setuptools',\n 'zope.interface',\n]\n\ndocs_extras = [\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot-nginx',\n version=version,\n description=\"Nginx plugin for Certbot\",\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'docs': docs_extras,\n },\n entry_points={\n 'certbot.plugins': [\n 'nginx = certbot_nginx.configurator:NginxConfigurator',\n ],\n },\n test_suite='certbot_nginx',\n)\n", "path": "certbot-nginx/setup.py"}]}
1,316
183
gh_patches_debug_20003
rasdani/github-patches
git_diff
searx__searx-925
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a software categorie and add the Free software directory search engine Shame on me I forgot to ask this. I am a volunteer on the [FSD](https://directory.fsf.org/wiki/Main_Page) (Free software directory) It would be nice if people could look for free/libre software in the searx engine. When possible could someone please add the free software directory so that people can easily find free software. </issue> <code> [start of searx/engines/mediawiki.py] 1 """ 2 general mediawiki-engine (Web) 3 4 @website websites built on mediawiki (https://www.mediawiki.org) 5 @provide-api yes (http://www.mediawiki.org/wiki/API:Search) 6 7 @using-api yes 8 @results JSON 9 @stable yes 10 @parse url, title 11 12 @todo content 13 """ 14 15 from json import loads 16 from string import Formatter 17 from searx.url_utils import urlencode, quote 18 19 # engine dependent config 20 categories = ['general'] 21 language_support = True 22 paging = True 23 number_of_results = 1 24 25 # search-url 26 base_url = 'https://{language}.wikipedia.org/' 27 search_postfix = 'w/api.php?action=query'\ 28 '&list=search'\ 29 '&{query}'\ 30 '&format=json'\ 31 '&sroffset={offset}'\ 32 '&srlimit={limit}'\ 33 '&srwhat=nearmatch' # search for a near match in the title 34 35 36 # do search-request 37 def request(query, params): 38 offset = (params['pageno'] - 1) * number_of_results 39 40 string_args = dict(query=urlencode({'srsearch': query}), 41 offset=offset, 42 limit=number_of_results) 43 44 format_strings = list(Formatter().parse(base_url)) 45 46 if params['language'] == 'all': 47 language = 'en' 48 else: 49 language = params['language'].split('-')[0] 50 51 # format_string [('https://', 'language', '', None), ('.wikipedia.org/', None, None, None)] 52 if any(x[1] == 'language' for x in format_strings): 53 string_args['language'] = language 54 55 # write search-language back to params, required in response 56 params['language'] = language 57 58 search_url = base_url + search_postfix 59 60 params['url'] = search_url.format(**string_args) 61 62 return params 63 64 65 # get response from search-request 66 def response(resp): 67 results = [] 68 69 search_results = loads(resp.text) 70 71 # return empty array if there are no results 72 if not search_results.get('query', {}).get('search'): 73 return [] 74 75 # parse results 76 for result in search_results['query']['search']: 77 if result.get('snippet', '').startswith('#REDIRECT'): 78 continue 79 url = base_url.format(language=resp.search_params['language']) +\ 80 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')) 81 82 # append result 83 results.append({'url': url, 84 'title': result['title'], 85 'content': ''}) 86 87 # return results 88 return results 89 [end of searx/engines/mediawiki.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py --- a/searx/engines/mediawiki.py +++ b/searx/engines/mediawiki.py @@ -21,6 +21,7 @@ language_support = True paging = True number_of_results = 1 +search_type = 'nearmatch' # possible values: title, text, nearmatch # search-url base_url = 'https://{language}.wikipedia.org/' @@ -30,7 +31,7 @@ '&format=json'\ '&sroffset={offset}'\ '&srlimit={limit}'\ - '&srwhat=nearmatch' # search for a near match in the title + '&srwhat={searchtype}' # do search-request @@ -39,7 +40,8 @@ string_args = dict(query=urlencode({'srsearch': query}), offset=offset, - limit=number_of_results) + limit=number_of_results, + searchtype=search_type) format_strings = list(Formatter().parse(base_url))
{"golden_diff": "diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py\n--- a/searx/engines/mediawiki.py\n+++ b/searx/engines/mediawiki.py\n@@ -21,6 +21,7 @@\n language_support = True\n paging = True\n number_of_results = 1\n+search_type = 'nearmatch' # possible values: title, text, nearmatch\n \n # search-url\n base_url = 'https://{language}.wikipedia.org/'\n@@ -30,7 +31,7 @@\n '&format=json'\\\n '&sroffset={offset}'\\\n '&srlimit={limit}'\\\n- '&srwhat=nearmatch' # search for a near match in the title\n+ '&srwhat={searchtype}'\n \n \n # do search-request\n@@ -39,7 +40,8 @@\n \n string_args = dict(query=urlencode({'srsearch': query}),\n offset=offset,\n- limit=number_of_results)\n+ limit=number_of_results,\n+ searchtype=search_type)\n \n format_strings = list(Formatter().parse(base_url))\n", "issue": "Add a software categorie and add the Free software directory search engine\nShame on me I forgot to ask this.\r\nI am a volunteer on the [FSD](https://directory.fsf.org/wiki/Main_Page) (Free software directory)\r\nIt would be nice if people could look for free/libre software in the searx engine.\r\nWhen possible could someone please add the free software directory so that people can easily find free software.\n", "before_files": [{"content": "\"\"\"\n general mediawiki-engine (Web)\n\n @website websites built on mediawiki (https://www.mediawiki.org)\n @provide-api yes (http://www.mediawiki.org/wiki/API:Search)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title\n\n @todo content\n\"\"\"\n\nfrom json import loads\nfrom string import Formatter\nfrom searx.url_utils import urlencode, quote\n\n# engine dependent config\ncategories = ['general']\nlanguage_support = True\npaging = True\nnumber_of_results = 1\n\n# search-url\nbase_url = 'https://{language}.wikipedia.org/'\nsearch_postfix = 'w/api.php?action=query'\\\n '&list=search'\\\n '&{query}'\\\n '&format=json'\\\n '&sroffset={offset}'\\\n '&srlimit={limit}'\\\n '&srwhat=nearmatch' # search for a near match in the title\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n string_args = dict(query=urlencode({'srsearch': query}),\n offset=offset,\n limit=number_of_results)\n\n format_strings = list(Formatter().parse(base_url))\n\n if params['language'] == 'all':\n language = 'en'\n else:\n language = params['language'].split('-')[0]\n\n # format_string [('https://', 'language', '', None), ('.wikipedia.org/', None, None, None)]\n if any(x[1] == 'language' for x in format_strings):\n string_args['language'] = language\n\n # write search-language back to params, required in response\n params['language'] = language\n\n search_url = base_url + search_postfix\n\n params['url'] = search_url.format(**string_args)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = loads(resp.text)\n\n # return empty array if there are no results\n if not search_results.get('query', {}).get('search'):\n return []\n\n # parse results\n for result in search_results['query']['search']:\n if result.get('snippet', '').startswith('#REDIRECT'):\n continue\n url = base_url.format(language=resp.search_params['language']) +\\\n 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))\n\n # append result\n results.append({'url': url,\n 'title': result['title'],\n 'content': ''})\n\n # return results\n return results\n", "path": "searx/engines/mediawiki.py"}]}
1,395
259
gh_patches_debug_63962
rasdani/github-patches
git_diff
redis__redis-py-1678
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CI run to install the built package In light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package. CI run to install the built package In light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package. </issue> <code> [start of tasks.py] 1 import os 2 import shutil 3 from invoke import task, run 4 5 with open('tox.ini') as fp: 6 lines = fp.read().split("\n") 7 dockers = [line.split("=")[1].strip() for line in lines 8 if line.find("name") != -1] 9 10 11 @task 12 def devenv(c): 13 """Builds a development environment: downloads, and starts all dockers 14 specified in the tox.ini file. 15 """ 16 clean(c) 17 cmd = 'tox -e devenv' 18 for d in dockers: 19 cmd += " --docker-dont-stop={}".format(d) 20 run(cmd) 21 22 23 @task 24 def linters(c): 25 """Run code linters""" 26 run("tox -e linters") 27 28 29 @task 30 def all_tests(c): 31 """Run all linters, and tests in redis-py. This assumes you have all 32 the python versions specified in the tox.ini file. 33 """ 34 linters(c) 35 tests(c) 36 37 38 @task 39 def tests(c): 40 """Run the redis-py test suite against the current python, 41 with and without hiredis. 42 """ 43 run("tox -e plain -e hiredis") 44 45 46 @task 47 def clean(c): 48 """Stop all dockers, and clean up the built binaries, if generated.""" 49 if os.path.isdir("build"): 50 shutil.rmtree("build") 51 if os.path.isdir("dist"): 52 shutil.rmtree("dist") 53 run("docker rm -f {}".format(' '.join(dockers))) 54 55 56 @task 57 def package(c): 58 """Create the python packages""" 59 run("python setup.py build install") 60 [end of tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tasks.py b/tasks.py --- a/tasks.py +++ b/tasks.py @@ -56,4 +56,4 @@ @task def package(c): """Create the python packages""" - run("python setup.py build install") + run("python setup.py sdist bdist_wheel")
{"golden_diff": "diff --git a/tasks.py b/tasks.py\n--- a/tasks.py\n+++ b/tasks.py\n@@ -56,4 +56,4 @@\n @task\n def package(c):\n \"\"\"Create the python packages\"\"\"\n- run(\"python setup.py build install\")\n+ run(\"python setup.py sdist bdist_wheel\")\n", "issue": "CI run to install the built package\nIn light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package.\nCI run to install the built package\nIn light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package.\n", "before_files": [{"content": "import os\nimport shutil\nfrom invoke import task, run\n\nwith open('tox.ini') as fp:\n lines = fp.read().split(\"\\n\")\n dockers = [line.split(\"=\")[1].strip() for line in lines\n if line.find(\"name\") != -1]\n\n\n@task\ndef devenv(c):\n \"\"\"Builds a development environment: downloads, and starts all dockers\n specified in the tox.ini file.\n \"\"\"\n clean(c)\n cmd = 'tox -e devenv'\n for d in dockers:\n cmd += \" --docker-dont-stop={}\".format(d)\n run(cmd)\n\n\n@task\ndef linters(c):\n \"\"\"Run code linters\"\"\"\n run(\"tox -e linters\")\n\n\n@task\ndef all_tests(c):\n \"\"\"Run all linters, and tests in redis-py. This assumes you have all\n the python versions specified in the tox.ini file.\n \"\"\"\n linters(c)\n tests(c)\n\n\n@task\ndef tests(c):\n \"\"\"Run the redis-py test suite against the current python,\n with and without hiredis.\n \"\"\"\n run(\"tox -e plain -e hiredis\")\n\n\n@task\ndef clean(c):\n \"\"\"Stop all dockers, and clean up the built binaries, if generated.\"\"\"\n if os.path.isdir(\"build\"):\n shutil.rmtree(\"build\")\n if os.path.isdir(\"dist\"):\n shutil.rmtree(\"dist\")\n run(\"docker rm -f {}\".format(' '.join(dockers)))\n\n\n@task\ndef package(c):\n \"\"\"Create the python packages\"\"\"\n run(\"python setup.py build install\")\n", "path": "tasks.py"}]}
1,123
70
gh_patches_debug_1769
rasdani/github-patches
git_diff
googleapis__google-auth-library-python-697
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing pyOpenSSL Dependency Thanks for stopping by to let us know something could be better! **PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. Please run down the following list and make sure you've tried the usual "quick fixes": - Search the issues already opened: https://github.com/googleapis/google-auth-library-python/issues If you are still having issues, please be sure to include as much information as possible: #### Environment details - OS: - Python version: - pip version: - `google-auth` version: #### Steps to reproduce 1. Missing pyOpenSSL dependency in setup.py For the tests there is a requirement in https://github.com/googleapis/google-auth-library-python/blob/master/noxfile.py against pyOpenSSL. But there are imports for pyOpenSSL in multiple modules in the code. Should pyOpenSSL be added to the requirements in setup.py? I created https://github.com/googleapis/google-auth-library-python/pull/550 with the proposal but wanted to get feedback from an issue first as I don't see this showing up in previous issues or pull requests. Making sure to follow these steps will guarantee the quickest resolution possible. Thanks! </issue> <code> [start of setup.py] 1 # Copyright 2014 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 17 from setuptools import find_packages 18 from setuptools import setup 19 20 21 DEPENDENCIES = ( 22 "cachetools>=2.0.0,<5.0", 23 "pyasn1-modules>=0.2.1", 24 # rsa==4.5 is the last version to support 2.7 25 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233 26 'rsa<4.6; python_version < "3.6"', 27 'rsa>=3.1.4,<5; python_version >= "3.6"', 28 "setuptools>=40.3.0", 29 "six>=1.9.0", 30 ) 31 32 extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"} 33 34 with io.open("README.rst", "r") as fh: 35 long_description = fh.read() 36 37 version = "1.26.1" 38 39 setup( 40 name="google-auth", 41 version=version, 42 author="Google Cloud Platform", 43 author_email="[email protected]", 44 description="Google Authentication Library", 45 long_description=long_description, 46 url="https://github.com/googleapis/google-auth-library-python", 47 packages=find_packages(exclude=("tests*", "system_tests*")), 48 namespace_packages=("google",), 49 install_requires=DEPENDENCIES, 50 extras_require=extras, 51 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*", 52 license="Apache 2.0", 53 keywords="google auth oauth client", 54 classifiers=[ 55 "Programming Language :: Python :: 2", 56 "Programming Language :: Python :: 2.7", 57 "Programming Language :: Python :: 3", 58 "Programming Language :: Python :: 3.6", 59 "Programming Language :: Python :: 3.7", 60 "Programming Language :: Python :: 3.8", 61 "Programming Language :: Python :: 3.9", 62 "Development Status :: 5 - Production/Stable", 63 "Intended Audience :: Developers", 64 "License :: OSI Approved :: Apache Software License", 65 "Operating System :: POSIX", 66 "Operating System :: Microsoft :: Windows", 67 "Operating System :: MacOS :: MacOS X", 68 "Operating System :: OS Independent", 69 "Topic :: Internet :: WWW/HTTP", 70 ], 71 ) 72 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,7 +29,10 @@ "six>=1.9.0", ) -extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"} +extras = { + "aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'", + "pyopenssl": "pyopenssl>=20.0.0", +} with io.open("README.rst", "r") as fh: long_description = fh.read()
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,10 @@\n \"six>=1.9.0\",\n )\n \n-extras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n+extras = {\n+ \"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n+ \"pyopenssl\": \"pyopenssl>=20.0.0\",\n+}\n \n with io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n", "issue": "Missing pyOpenSSL Dependency\nThanks for stopping by to let us know something could be better!\r\n\r\n**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.\r\n\r\nPlease run down the following list and make sure you've tried the usual \"quick fixes\":\r\n\r\n - Search the issues already opened: https://github.com/googleapis/google-auth-library-python/issues\r\n\r\nIf you are still having issues, please be sure to include as much information as possible:\r\n\r\n#### Environment details\r\n\r\n - OS:\r\n - Python version:\r\n - pip version:\r\n - `google-auth` version:\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Missing pyOpenSSL dependency in setup.py\r\n\r\nFor the tests there is a requirement in https://github.com/googleapis/google-auth-library-python/blob/master/noxfile.py against pyOpenSSL. But there are imports for pyOpenSSL in multiple modules in the code. Should pyOpenSSL be added to the requirements in setup.py?\r\n\r\nI created https://github.com/googleapis/google-auth-library-python/pull/550 with the proposal but wanted to get feedback from an issue first as I don't see this showing up in previous issues or pull requests.\r\n\r\nMaking sure to follow these steps will guarantee the quickest resolution possible.\r\n\r\nThanks!\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.26.1\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
1,625
165
gh_patches_debug_235
rasdani/github-patches
git_diff
scikit-hep__pyhf-1460
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Logging configuration in contrib/utils # Question `pyhf.contrib.utils` sets up logging: https://github.com/scikit-hep/pyhf/blob/6b769fd6f5e1473deba2b4c55d49ebdb3db5b447/src/pyhf/contrib/utils.py#L9 This interferes with custom logging users may want to set up. To achieve this now, they would have to do so before `from pyhf.contrib.utils import download`. To avoid this issue, the logging should not be configured in this part of the code (and only for the CLI). # Relevant Issues and Pull Requests #865 User-defined log formatting # Description `pyhf` uses `logging` for outputs, and calls `logging.basicConfig()` in a few places. This has the effect of preventing the user to set their desired logging behavior after `pyhf` import. While calling this a bug might be a bit of a stretch, I think it might be unintentional since `pyhf` does not apply any logging formatting as far as I can tell. # Expected Behavior I expect no calls to `logging.basicConfig()` within `pyhf` to leave the formatting fully up to the user, no matter whether they want to set it before or after importing `pyhf`. # Actual Behavior User-defined `logging` formatting only works before importing `pyhf`. # Steps to Reproduce importing `pyhf` before formatting: ``` import logging import pyhf print(pyhf.__version__) logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) log.info("message") ``` output: ``` 0.4.1 ``` and when applying formatting before input, the expected behavior: ``` import logging logging.basicConfig(level=logging.INFO) import pyhf print(pyhf.__version__) log = logging.getLogger(__name__) log.info("message") ``` output: ``` 0.4.1 INFO:__main__:message ``` # Checklist - [ ] Run `git fetch` to get the most up to date version of `master` - no, but checked code on master to confirm that the relevant part is unchanged - [X] Searched through existing Issues to confirm this is not a duplicate issue - [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue </issue> <code> [start of src/pyhf/contrib/utils.py] 1 """Helper utilities for common tasks.""" 2 3 from urllib.parse import urlparse 4 import tarfile 5 from io import BytesIO 6 import logging 7 from .. import exceptions 8 9 logging.basicConfig() 10 log = logging.getLogger(__name__) 11 12 __all__ = ["download"] 13 14 15 def __dir__(): 16 return __all__ 17 18 19 try: 20 import requests 21 22 def download(archive_url, output_directory, force=False, compress=False): 23 """ 24 Download the patchset archive from the remote URL and extract it in a 25 directory at the path given. 26 27 Example: 28 29 >>> from pyhf.contrib.utils import download 30 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods") 31 >>> import os 32 >>> sorted(os.listdir("1Lbb-likelihoods")) 33 ['BkgOnly.json', 'README.md', 'patchset.json'] 34 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True) 35 >>> import glob 36 >>> glob.glob("1Lbb-likelihoods.tar.gz") 37 ['1Lbb-likelihoods.tar.gz'] 38 39 Args: 40 archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download. 41 output_directory (:obj:`str`): Name of the directory to unpack the archive into. 42 force (:obj:`bool`): Force download from non-approved host. Default is ``False``. 43 compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``. 44 45 Raises: 46 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid 47 """ 48 if not force: 49 valid_hosts = ["www.hepdata.net", "doi.org"] 50 netloc = urlparse(archive_url).netloc 51 if netloc not in valid_hosts: 52 raise exceptions.InvalidArchiveHost( 53 f"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\n" 54 + "To download an archive from this host use the --force option." 55 ) 56 57 with requests.get(archive_url) as response: 58 if compress: 59 with open(output_directory, "wb") as archive: 60 archive.write(response.content) 61 else: 62 with tarfile.open( 63 mode="r|gz", fileobj=BytesIO(response.content) 64 ) as archive: 65 archive.extractall(output_directory) 66 67 68 except ModuleNotFoundError: 69 log.error( 70 "\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download" 71 + "\nPlease install with: python -m pip install pyhf[contrib]\n", 72 exc_info=True, 73 ) 74 [end of src/pyhf/contrib/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py --- a/src/pyhf/contrib/utils.py +++ b/src/pyhf/contrib/utils.py @@ -6,7 +6,6 @@ import logging from .. import exceptions -logging.basicConfig() log = logging.getLogger(__name__) __all__ = ["download"]
{"golden_diff": "diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py\n--- a/src/pyhf/contrib/utils.py\n+++ b/src/pyhf/contrib/utils.py\n@@ -6,7 +6,6 @@\n import logging\n from .. import exceptions\n \n-logging.basicConfig()\n log = logging.getLogger(__name__)\n \n __all__ = [\"download\"]\n", "issue": "Logging configuration in contrib/utils\n# Question\r\n\r\n`pyhf.contrib.utils` sets up logging:\r\nhttps://github.com/scikit-hep/pyhf/blob/6b769fd6f5e1473deba2b4c55d49ebdb3db5b447/src/pyhf/contrib/utils.py#L9 \r\n\r\nThis interferes with custom logging users may want to set up. To achieve this now, they would have to do so before `from pyhf.contrib.utils import download`. To avoid this issue, the logging should not be configured in this part of the code (and only for the CLI).\r\n\r\n# Relevant Issues and Pull Requests\r\n\r\n#865\r\n\nUser-defined log formatting\n# Description\r\n\r\n`pyhf` uses `logging` for outputs, and calls `logging.basicConfig()` in a few places.\r\nThis has the effect of preventing the user to set their desired logging behavior after `pyhf` import.\r\nWhile calling this a bug might be a bit of a stretch, I think it might be unintentional since `pyhf` does not apply any logging formatting as far as I can tell.\r\n\r\n# Expected Behavior\r\n\r\nI expect no calls to `logging.basicConfig()` within `pyhf` to leave the formatting fully up to the user, no matter whether they want to set it before or after importing `pyhf`.\r\n\r\n# Actual Behavior\r\n\r\nUser-defined `logging` formatting only works before importing `pyhf`.\r\n\r\n# Steps to Reproduce\r\n\r\nimporting `pyhf` before formatting:\r\n```\r\nimport logging\r\nimport pyhf\r\nprint(pyhf.__version__)\r\nlogging.basicConfig(level=logging.INFO)\r\nlog = logging.getLogger(__name__)\r\nlog.info(\"message\")\r\n```\r\noutput:\r\n```\r\n0.4.1\r\n```\r\nand when applying formatting before input, the expected behavior:\r\n```\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\nimport pyhf\r\nprint(pyhf.__version__)\r\nlog = logging.getLogger(__name__)\r\nlog.info(\"message\")\r\n```\r\noutput:\r\n```\r\n0.4.1\r\nINFO:__main__:message\r\n``` \r\n\r\n# Checklist\r\n\r\n- [ ] Run `git fetch` to get the most up to date version of `master`\r\n - no, but checked code on master to confirm that the relevant part is unchanged\r\n- [X] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom .. import exceptions\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n with requests.get(archive_url) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}]}
1,829
78
gh_patches_debug_8585
rasdani/github-patches
git_diff
scrapy__scrapy-4759
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a job for Python 3.9 to .travis.yml It looks like Travis support specifying such a Python version as `3.9-dev`. While I’m not sure we should officially support Python 3.9 it until its release, running tests on it will allow us to catch any issue early. </issue> <code> [start of setup.py] 1 from os.path import dirname, join 2 from pkg_resources import parse_version 3 from setuptools import setup, find_packages, __version__ as setuptools_version 4 5 6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f: 7 version = f.read().decode('ascii').strip() 8 9 10 def has_environment_marker_platform_impl_support(): 11 """Code extracted from 'pytest/setup.py' 12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31 13 14 The first known release to support environment marker with range operators 15 it is 18.5, see: 16 https://setuptools.readthedocs.io/en/latest/history.html#id235 17 """ 18 return parse_version(setuptools_version) >= parse_version('18.5') 19 20 21 install_requires = [ 22 'Twisted>=17.9.0', 23 'cryptography>=2.0', 24 'cssselect>=0.9.1', 25 'itemloaders>=1.0.1', 26 'parsel>=1.5.0', 27 'pyOpenSSL>=16.2.0', 28 'queuelib>=1.4.2', 29 'service_identity>=16.0.0', 30 'w3lib>=1.17.0', 31 'zope.interface>=4.1.3', 32 'protego>=0.1.15', 33 'itemadapter>=0.1.0', 34 ] 35 extras_require = {} 36 cpython_dependencies = [ 37 'lxml>=3.5.0', 38 'PyDispatcher>=2.0.5', 39 ] 40 if has_environment_marker_platform_impl_support(): 41 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies 42 extras_require[':platform_python_implementation == "PyPy"'] = [ 43 # Earlier lxml versions are affected by 44 # https://foss.heptapod.net/pypy/pypy/-/issues/2498, 45 # which was fixed in Cython 0.26, released on 2017-06-19, and used to 46 # generate the C headers of lxml release tarballs published since then, the 47 # first of which was: 48 'lxml>=4.0.0', 49 'PyPyDispatcher>=2.1.0', 50 ] 51 else: 52 install_requires.extend(cpython_dependencies) 53 54 55 setup( 56 name='Scrapy', 57 version=version, 58 url='https://scrapy.org', 59 project_urls={ 60 'Documentation': 'https://docs.scrapy.org/', 61 'Source': 'https://github.com/scrapy/scrapy', 62 'Tracker': 'https://github.com/scrapy/scrapy/issues', 63 }, 64 description='A high-level Web Crawling and Web Scraping framework', 65 long_description=open('README.rst').read(), 66 author='Scrapy developers', 67 maintainer='Pablo Hoffman', 68 maintainer_email='[email protected]', 69 license='BSD', 70 packages=find_packages(exclude=('tests', 'tests.*')), 71 include_package_data=True, 72 zip_safe=False, 73 entry_points={ 74 'console_scripts': ['scrapy = scrapy.cmdline:execute'] 75 }, 76 classifiers=[ 77 'Framework :: Scrapy', 78 'Development Status :: 5 - Production/Stable', 79 'Environment :: Console', 80 'Intended Audience :: Developers', 81 'License :: OSI Approved :: BSD License', 82 'Operating System :: OS Independent', 83 'Programming Language :: Python', 84 'Programming Language :: Python :: 3', 85 'Programming Language :: Python :: 3.6', 86 'Programming Language :: Python :: 3.7', 87 'Programming Language :: Python :: 3.8', 88 'Programming Language :: Python :: Implementation :: CPython', 89 'Programming Language :: Python :: Implementation :: PyPy', 90 'Topic :: Internet :: WWW/HTTP', 91 'Topic :: Software Development :: Libraries :: Application Frameworks', 92 'Topic :: Software Development :: Libraries :: Python Modules', 93 ], 94 python_requires='>=3.6', 95 install_requires=install_requires, 96 extras_require=extras_require, 97 ) 98 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -85,6 +85,7 @@ 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Internet :: WWW/HTTP',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,6 +85,7 @@\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n+ 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n", "issue": "Add a job for Python 3.9 to .travis.yml\nIt looks like Travis support specifying such a Python version as `3.9-dev`.\r\n\r\nWhile I\u2019m not sure we should officially support Python 3.9 it until its release, running tests on it will allow us to catch any issue early.\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
1,672
116
gh_patches_debug_1345
rasdani/github-patches
git_diff
castorini__pyserini-667
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Switch to jnius_config.add_classpath Currently, pyserini replaces any previously registered jars on the classpath in its setup code. Is there any reason to not use add_classpath() instead of set_classpath()? Here is the pyjnius relevant code: ```python def set_classpath(*path): """ Sets the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable. """ check_vm_running() global classpath classpath = list(path) def add_classpath(*path): """ Appends items to the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable. """ check_vm_running() global classpath if classpath is None: classpath = list(path) else: classpath.extend(path) ``` </issue> <code> [start of pyserini/setup.py] 1 # 2 # Pyserini: Reproducible IR research with sparse and dense representations 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 17 """ 18 Module for adding Anserini jar to classpath for pyjnius usage 19 """ 20 21 import glob 22 import os 23 24 import jnius_config 25 26 27 def configure_classpath(anserini_root="."): 28 """ 29 Parameters 30 ---------- 31 anserini_root : str 32 (Optional) path to root anserini directory. 33 34 """ 35 paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar')) 36 if not paths: 37 raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root))) 38 39 latest = max(paths, key=os.path.getctime) 40 jnius_config.set_classpath(latest) 41 [end of pyserini/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyserini/setup.py b/pyserini/setup.py --- a/pyserini/setup.py +++ b/pyserini/setup.py @@ -37,4 +37,4 @@ raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root))) latest = max(paths, key=os.path.getctime) - jnius_config.set_classpath(latest) + jnius_config.add_classpath(latest)
{"golden_diff": "diff --git a/pyserini/setup.py b/pyserini/setup.py\n--- a/pyserini/setup.py\n+++ b/pyserini/setup.py\n@@ -37,4 +37,4 @@\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n \n latest = max(paths, key=os.path.getctime)\n- jnius_config.set_classpath(latest)\n+ jnius_config.add_classpath(latest)\n", "issue": "Switch to jnius_config.add_classpath\nCurrently, pyserini replaces any previously registered jars on the classpath in its setup code. Is there any reason to not use add_classpath() instead of set_classpath()?\r\n\r\nHere is the pyjnius relevant code:\r\n```python\r\ndef set_classpath(*path):\r\n \"\"\"\r\n Sets the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable.\r\n \"\"\"\r\n check_vm_running()\r\n global classpath\r\n classpath = list(path)\r\n\r\n\r\ndef add_classpath(*path):\r\n \"\"\"\r\n Appends items to the classpath for the JVM to use.\r\n Replaces any existing classpath, overriding the CLASSPATH environment variable.\r\n \"\"\"\r\n check_vm_running()\r\n global classpath\r\n if classpath is None:\r\n classpath = list(path)\r\n else:\r\n classpath.extend(path)\r\n```\n", "before_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nModule for adding Anserini jar to classpath for pyjnius usage\n\"\"\"\n\nimport glob\nimport os\n\nimport jnius_config\n\n\ndef configure_classpath(anserini_root=\".\"):\n \"\"\"\n Parameters\n ----------\n anserini_root : str\n (Optional) path to root anserini directory.\n\n \"\"\"\n paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar'))\n if not paths:\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n\n latest = max(paths, key=os.path.getctime)\n jnius_config.set_classpath(latest)\n", "path": "pyserini/setup.py"}]}
1,080
102
gh_patches_debug_21488
rasdani/github-patches
git_diff
WeblateOrg__weblate-10189
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Add missing languages" add-on not working ### Describe the issue I have enabled the "Add missing languages" add-on on https://hosted.weblate.org/projects/catima/. However, despite waiting over 24 hours as the documentation on https://docs.weblate.org/en/latest/admin/addons.html#addon-weblate-consistency-languages states, it has not put the different components of the same project in sync. This is most noticeable when comparing https://hosted.weblate.org/projects/catima/catima/ with https://hosted.weblate.org/projects/catima/android-debug/ ### I already tried - [X] I've read and searched [the documentation](https://docs.weblate.org/). - [X] I've searched for similar issues in this repository. ### Steps to reproduce the behavior 1. Enable the "Add missing languages" add-on in a project with multiple components where one component has less languages than the other 2. Wait at least 24 hours as the add-on states ### Expected behavior All components have the same languages, missing languages on components get created ### Screenshots Android component: ![image](https://github.com/WeblateOrg/weblate/assets/1885159/bae7c490-381f-4e13-a025-97c0a1494c01) Android (Debug) component: ![image](https://github.com/WeblateOrg/weblate/assets/1885159/03230fe9-c0a4-44e6-9dbe-ced43868fb69) ### Exception traceback _No response_ ### How do you run Weblate? weblate.org service ### Weblate versions _No response_ ### Weblate deploy checks _No response_ ### Additional context _No response_ </issue> <code> [start of weblate/addons/consistency.py] 1 # Copyright © Michal Čihař <[email protected]> 2 # 3 # SPDX-License-Identifier: GPL-3.0-or-later 4 5 from django.db.models import Q 6 from django.utils.translation import gettext_lazy 7 8 from weblate.addons.base import BaseAddon 9 from weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD 10 from weblate.addons.tasks import language_consistency 11 from weblate.lang.models import Language 12 13 14 class LangaugeConsistencyAddon(BaseAddon): 15 events = (EVENT_DAILY, EVENT_POST_ADD) 16 name = "weblate.consistency.languages" 17 verbose = gettext_lazy("Add missing languages") 18 description = gettext_lazy( 19 "Ensures a consistent set of languages is used for all components " 20 "within a project." 21 ) 22 icon = "language.svg" 23 project_scope = True 24 25 def daily(self, component): 26 language_consistency.delay( 27 component.project_id, 28 list( 29 Language.objects.filter( 30 Q(translation__component=component) | Q(component=component) 31 ).values_list("pk", flat=True) 32 ), 33 ) 34 35 def post_add(self, translation): 36 language_consistency.delay( 37 translation.component.project_id, 38 [translation.language_id], 39 ) 40 [end of weblate/addons/consistency.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/weblate/addons/consistency.py b/weblate/addons/consistency.py --- a/weblate/addons/consistency.py +++ b/weblate/addons/consistency.py @@ -2,13 +2,11 @@ # # SPDX-License-Identifier: GPL-3.0-or-later -from django.db.models import Q from django.utils.translation import gettext_lazy from weblate.addons.base import BaseAddon from weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD from weblate.addons.tasks import language_consistency -from weblate.lang.models import Language class LangaugeConsistencyAddon(BaseAddon): @@ -25,11 +23,7 @@ def daily(self, component): language_consistency.delay( component.project_id, - list( - Language.objects.filter( - Q(translation__component=component) | Q(component=component) - ).values_list("pk", flat=True) - ), + [language.id for language in component.project.languages], ) def post_add(self, translation):
{"golden_diff": "diff --git a/weblate/addons/consistency.py b/weblate/addons/consistency.py\n--- a/weblate/addons/consistency.py\n+++ b/weblate/addons/consistency.py\n@@ -2,13 +2,11 @@\n #\n # SPDX-License-Identifier: GPL-3.0-or-later\n \n-from django.db.models import Q\n from django.utils.translation import gettext_lazy\n \n from weblate.addons.base import BaseAddon\n from weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD\n from weblate.addons.tasks import language_consistency\n-from weblate.lang.models import Language\n \n \n class LangaugeConsistencyAddon(BaseAddon):\n@@ -25,11 +23,7 @@\n def daily(self, component):\n language_consistency.delay(\n component.project_id,\n- list(\n- Language.objects.filter(\n- Q(translation__component=component) | Q(component=component)\n- ).values_list(\"pk\", flat=True)\n- ),\n+ [language.id for language in component.project.languages],\n )\n \n def post_add(self, translation):\n", "issue": "\"Add missing languages\" add-on not working\n### Describe the issue\n\nI have enabled the \"Add missing languages\" add-on on https://hosted.weblate.org/projects/catima/. However, despite waiting over 24 hours as the documentation on https://docs.weblate.org/en/latest/admin/addons.html#addon-weblate-consistency-languages states, it has not put the different components of the same project in sync.\r\n\r\nThis is most noticeable when comparing https://hosted.weblate.org/projects/catima/catima/ with https://hosted.weblate.org/projects/catima/android-debug/\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Enable the \"Add missing languages\" add-on in a project with multiple components where one component has less languages than the other\r\n2. Wait at least 24 hours as the add-on states\n\n### Expected behavior\n\nAll components have the same languages, missing languages on components get created\n\n### Screenshots\n\nAndroid component:\r\n![image](https://github.com/WeblateOrg/weblate/assets/1885159/bae7c490-381f-4e13-a025-97c0a1494c01)\r\n\r\nAndroid (Debug) component:\r\n![image](https://github.com/WeblateOrg/weblate/assets/1885159/03230fe9-c0a4-44e6-9dbe-ced43868fb69)\r\n\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.db.models import Q\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import BaseAddon\nfrom weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD\nfrom weblate.addons.tasks import language_consistency\nfrom weblate.lang.models import Language\n\n\nclass LangaugeConsistencyAddon(BaseAddon):\n events = (EVENT_DAILY, EVENT_POST_ADD)\n name = \"weblate.consistency.languages\"\n verbose = gettext_lazy(\"Add missing languages\")\n description = gettext_lazy(\n \"Ensures a consistent set of languages is used for all components \"\n \"within a project.\"\n )\n icon = \"language.svg\"\n project_scope = True\n\n def daily(self, component):\n language_consistency.delay(\n component.project_id,\n list(\n Language.objects.filter(\n Q(translation__component=component) | Q(component=component)\n ).values_list(\"pk\", flat=True)\n ),\n )\n\n def post_add(self, translation):\n language_consistency.delay(\n translation.component.project_id,\n [translation.language_id],\n )\n", "path": "weblate/addons/consistency.py"}]}
1,296
243
gh_patches_debug_3297
rasdani/github-patches
git_diff
liberapay__liberapay.com-1484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Requests access to GitHub private repos? Hi. I'm a brand-new user. I have a question I didn't see in the FAQ or when I searched issues here. I was going to connect my GitHub account and saw this: > Liberapay by liberapay > wants to access your greghendershott account > > Organizations and teams > Read-only access > > This application will be able to read your organization and team membership and private Projects. I almost clicked OK, but noticed "**private** Projects". I stopped. I don't want to do that. Is this as-intended? </issue> <code> [start of liberapay/elsewhere/github.py] 1 from liberapay.elsewhere._base import PlatformOAuth2 2 from liberapay.elsewhere._exceptions import CantReadMembership 3 from liberapay.elsewhere._extractors import key, drop_keys 4 from liberapay.elsewhere._paginators import header_links_paginator 5 6 7 class GitHub(PlatformOAuth2): 8 9 # Platform attributes 10 name = 'github' 11 display_name = 'GitHub' 12 fontawesome_name = name 13 account_url = 'https://github.com/{user_name}' 14 repo_url = 'https://github.com/{slug}' 15 has_teams = True 16 17 # Auth attributes 18 auth_url = 'https://github.com/login/oauth/authorize' 19 access_token_url = 'https://github.com/login/oauth/access_token' 20 oauth_email_scope = 'user:email' 21 oauth_default_scope = ['read:org'] 22 23 # API attributes 24 api_format = 'json' 25 api_paginator = header_links_paginator() 26 api_url = 'https://api.github.com' 27 api_app_auth_params = 'client_id={api_key}&client_secret={api_secret}' 28 api_user_info_path = '/user/{user_id}' 29 api_user_name_info_path = '/users/{user_name}' 30 api_user_self_info_path = '/user' 31 api_team_members_path = '/orgs/{user_name}/public_members' 32 api_friends_path = '/users/{user_name}/following' 33 api_repos_path = '/users/{user_name}/repos?type=owner&sort=updated&per_page=100' 34 api_starred_path = '/users/{user_name}/starred' 35 ratelimit_headers_prefix = 'x-ratelimit-' 36 37 # User info extractors 38 x_user_id = key('id') 39 x_user_name = key('login') 40 x_display_name = key('name') 41 x_email = key('email') 42 x_gravatar_id = key('gravatar_id') 43 x_avatar_url = key('avatar_url') 44 x_is_team = key('type', clean=lambda t: t.lower() == 'organization') 45 x_description = key('bio') 46 x_extra_info_drop = drop_keys(lambda k: k.endswith('_url')) 47 48 # Repo info extractors 49 x_repo_id = key('id') 50 x_repo_name = key('name') 51 x_repo_slug = key('full_name') 52 x_repo_description = key('description') 53 x_repo_last_update = key('pushed_at') 54 x_repo_is_fork = key('fork') 55 x_repo_stars_count = key('stargazers_count') 56 x_repo_owner_id = key('owner', clean=lambda d: d['id']) 57 x_repo_extra_info_drop = drop_keys(lambda k: k.endswith('_url')) 58 59 def get_CantReadMembership_url(self, **kw): 60 return 'https://github.com/settings/connections/applications/'+self.api_key 61 62 def is_team_member(self, org_name, sess, account): 63 org_name = org_name.lower() 64 65 # Check public membership first 66 response = self.api_get( 67 '', '/orgs/'+org_name+'/public_members/'+account.user_name, 68 sess=sess, error_handler=None 69 ) 70 if response.status_code == 204: 71 return True 72 elif response.status_code != 404: 73 self.api_error_handler(response, True, self.domain) 74 75 # Check private membership 76 response = self.api_get( 77 '', '/user/memberships/orgs/'+org_name, sess=sess, error_handler=None 78 ) 79 if response.status_code == 403: 80 raise CantReadMembership 81 elif response.status_code >= 400: 82 self.api_error_handler(response, True, self.domain) 83 membership = self.api_parser(response) 84 if membership['state'] == 'active': 85 return True 86 87 # Try the endpoint we were using before 88 user_orgs = self.api_parser(self.api_get('', '/user/orgs', sess=sess)) 89 return any(org.get('login') == org_name for org in user_orgs) 90 [end of liberapay/elsewhere/github.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py --- a/liberapay/elsewhere/github.py +++ b/liberapay/elsewhere/github.py @@ -18,7 +18,6 @@ auth_url = 'https://github.com/login/oauth/authorize' access_token_url = 'https://github.com/login/oauth/access_token' oauth_email_scope = 'user:email' - oauth_default_scope = ['read:org'] # API attributes api_format = 'json'
{"golden_diff": "diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py\n--- a/liberapay/elsewhere/github.py\n+++ b/liberapay/elsewhere/github.py\n@@ -18,7 +18,6 @@\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n- oauth_default_scope = ['read:org']\n \n # API attributes\n api_format = 'json'\n", "issue": "Requests access to GitHub private repos?\nHi. I'm a brand-new user. I have a question I didn't see in the FAQ or when I searched issues here.\r\n\r\nI was going to connect my GitHub account and saw this:\r\n\r\n> Liberapay by liberapay\r\n> wants to access your greghendershott account\r\n> \r\n> Organizations and teams\r\n> Read-only access\r\n>\r\n> This application will be able to read your organization and team membership and private Projects.\r\n\r\nI almost clicked OK, but noticed \"**private** Projects\". I stopped. I don't want to do that.\r\n\r\nIs this as-intended?\n", "before_files": [{"content": "from liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._exceptions import CantReadMembership\nfrom liberapay.elsewhere._extractors import key, drop_keys\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitHub(PlatformOAuth2):\n\n # Platform attributes\n name = 'github'\n display_name = 'GitHub'\n fontawesome_name = name\n account_url = 'https://github.com/{user_name}'\n repo_url = 'https://github.com/{slug}'\n has_teams = True\n\n # Auth attributes\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n oauth_default_scope = ['read:org']\n\n # API attributes\n api_format = 'json'\n api_paginator = header_links_paginator()\n api_url = 'https://api.github.com'\n api_app_auth_params = 'client_id={api_key}&client_secret={api_secret}'\n api_user_info_path = '/user/{user_id}'\n api_user_name_info_path = '/users/{user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/orgs/{user_name}/public_members'\n api_friends_path = '/users/{user_name}/following'\n api_repos_path = '/users/{user_name}/repos?type=owner&sort=updated&per_page=100'\n api_starred_path = '/users/{user_name}/starred'\n ratelimit_headers_prefix = 'x-ratelimit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('login')\n x_display_name = key('name')\n x_email = key('email')\n x_gravatar_id = key('gravatar_id')\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n x_description = key('bio')\n x_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('full_name')\n x_repo_description = key('description')\n x_repo_last_update = key('pushed_at')\n x_repo_is_fork = key('fork')\n x_repo_stars_count = key('stargazers_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n x_repo_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))\n\n def get_CantReadMembership_url(self, **kw):\n return 'https://github.com/settings/connections/applications/'+self.api_key\n\n def is_team_member(self, org_name, sess, account):\n org_name = org_name.lower()\n\n # Check public membership first\n response = self.api_get(\n '', '/orgs/'+org_name+'/public_members/'+account.user_name,\n sess=sess, error_handler=None\n )\n if response.status_code == 204:\n return True\n elif response.status_code != 404:\n self.api_error_handler(response, True, self.domain)\n\n # Check private membership\n response = self.api_get(\n '', '/user/memberships/orgs/'+org_name, sess=sess, error_handler=None\n )\n if response.status_code == 403:\n raise CantReadMembership\n elif response.status_code >= 400:\n self.api_error_handler(response, True, self.domain)\n membership = self.api_parser(response)\n if membership['state'] == 'active':\n return True\n\n # Try the endpoint we were using before\n user_orgs = self.api_parser(self.api_get('', '/user/orgs', sess=sess))\n return any(org.get('login') == org_name for org in user_orgs)\n", "path": "liberapay/elsewhere/github.py"}]}
1,704
124
gh_patches_debug_7347
rasdani/github-patches
git_diff
wagtail__wagtail-10645
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 🎛️ Migrate header search Stimulus Controller > ℹ️ **Part of the [Stimulus 🎛️ RFC 78](https://github.com/wagtail/rfcs/pull/78)** ### Is your proposal related to a problem? We have a core.js implementations of JavaScript code that, when a matching search input receives changes, will trigger an async request to the relevant search results listing. Once the endpoint returns with HTML, it will be patched into the results container HTML element. ### Describe the solution you'd like * Create a stimulus controller `w-search` that will replace the existing ad-hoc JS implementation * The behaviour should be exactly the same as current state but using Stimulus data attributes for the behaviour & classes declaration (note: likely we will drop the `autofocus` and may not re-introduce the `slide` jQuery animation) * Controller should be written in TypeScript * Ensure that the existing unit tests are created to reflect this new behaviour * We will need to document an upgrade consideration that the previous `window.headerSearch` approach will not work in a future release. * We may want to introduce a console warning once all the Wagtail usage of `window.headerSearch` has been removed * Nice to have - a Storybook story for this component ### Additional context * Implementation https://github.com/wagtail/wagtail/blob/main/client/src/entrypoints/admin/core.js#L251-L306 * There is a very similar (almost cut & paste) of logic that is used in the chooser modals for searching here https://github.com/wagtail/wagtail/blob/main/client/src/includes/chooserModal.js#L109-L176 (the Stimulus will likely replace this but may be out of scope for this issue ### Potential approach #### Support `input` only usage (with using `window.headerSearch` config) ```JS window.headerSearch = { url: "{% url 'wagtailimages:listing_results' %}", targetOutput: "#image-results" } ``` ```html <div class="w-field__input" data-field-input=""> <svg class="icon icon-search w-field__icon" aria-hidden="true"> <use href="#icon-search"></use> </svg> <input type="text" name="q" placeholder="Search images" data-controller="w-search" data-action="change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search" id="id_q" /> </div> ``` #### Support `input` only usage ```html <div class="w-field__input" data-field-input=""> <svg class="icon icon-search w-field__icon" aria-hidden="true"> <use href="#icon-search"></use> </svg> <input type="text" name="q" placeholder="Search images" data-controller="w-search" data-action="change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search" id="id_q" data-w-search-results-value="#image-results" data-w-search-url-value="/admin/images/results/" /> </div> ``` #### Support controlled form with search input as a target ```html <form class="col search-form" action="/admin/images/" method="get" novalidate="" role="search" data-controller="w-search" data-w-search-url-value="/admin/images/results/" > <div class="w-field__wrapper w-mb-0" data-field-wrapper=""> <label class="w-field__label w-sr-only" for="id_q" id="id_q-label">Search term</label> <div class="w-field w-field--char_field w-field--text_input"> <div class="w-field__input" data-field-input=""> <svg class="icon icon-search w-field__icon" aria-hidden="true"><use href="#icon-search"></use></svg> <input type="text" name="q" placeholder="Search images" data-w-search-target="input" data-action="change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search" id="id_q" /> </div> </div> </div> <div class="visuallyhidden"><input disabled="" type="submit" aria-hidden="true" /></div> </form> ``` </issue> <code> [start of wagtail/admin/forms/search.py] 1 from django import forms 2 from django.utils.translation import gettext as _ 3 from django.utils.translation import gettext_lazy 4 5 6 class SearchForm(forms.Form): 7 def __init__(self, *args, **kwargs): 8 placeholder = kwargs.pop("placeholder", _("Search")) 9 super().__init__(*args, **kwargs) 10 self.fields["q"].widget.attrs = {"placeholder": placeholder} 11 12 q = forms.CharField( 13 label=gettext_lazy("Search term"), 14 widget=forms.TextInput(), 15 required=False, 16 ) 17 [end of wagtail/admin/forms/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py --- a/wagtail/admin/forms/search.py +++ b/wagtail/admin/forms/search.py @@ -7,7 +7,10 @@ def __init__(self, *args, **kwargs): placeholder = kwargs.pop("placeholder", _("Search")) super().__init__(*args, **kwargs) - self.fields["q"].widget.attrs = {"placeholder": placeholder} + self.fields["q"].widget.attrs = { + "placeholder": placeholder, + "data-w-swap-target": "input", + } q = forms.CharField( label=gettext_lazy("Search term"),
{"golden_diff": "diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py\n--- a/wagtail/admin/forms/search.py\n+++ b/wagtail/admin/forms/search.py\n@@ -7,7 +7,10 @@\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n- self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n+ self.fields[\"q\"].widget.attrs = {\n+ \"placeholder\": placeholder,\n+ \"data-w-swap-target\": \"input\",\n+ }\n \n q = forms.CharField(\n label=gettext_lazy(\"Search term\"),\n", "issue": "\ud83c\udf9b\ufe0f Migrate header search Stimulus Controller\n> \u2139\ufe0f **Part of the [Stimulus \ud83c\udf9b\ufe0f RFC 78](https://github.com/wagtail/rfcs/pull/78)**\r\n\r\n### Is your proposal related to a problem?\r\n\r\nWe have a core.js implementations of JavaScript code that, when a matching search input receives changes, will trigger an async request to the relevant search results listing. Once the endpoint returns with HTML, it will be patched into the results container HTML element.\r\n\r\n### Describe the solution you'd like\r\n\r\n* Create a stimulus controller `w-search` that will replace the existing ad-hoc JS implementation\r\n* The behaviour should be exactly the same as current state but using Stimulus data attributes for the behaviour & classes declaration (note: likely we will drop the `autofocus` and may not re-introduce the `slide` jQuery animation)\r\n* Controller should be written in TypeScript\r\n* Ensure that the existing unit tests are created to reflect this new behaviour\r\n* We will need to document an upgrade consideration that the previous `window.headerSearch` approach will not work in a future release.\r\n* We may want to introduce a console warning once all the Wagtail usage of `window.headerSearch` has been removed\r\n* Nice to have - a Storybook story for this component\r\n\r\n### Additional context\r\n\r\n* Implementation https://github.com/wagtail/wagtail/blob/main/client/src/entrypoints/admin/core.js#L251-L306\r\n* There is a very similar (almost cut & paste) of logic that is used in the chooser modals for searching here https://github.com/wagtail/wagtail/blob/main/client/src/includes/chooserModal.js#L109-L176 (the Stimulus will likely replace this but may be out of scope for this issue\r\n\r\n### Potential approach\r\n\r\n#### Support `input` only usage (with using `window.headerSearch` config)\r\n\r\n```JS\r\nwindow.headerSearch = {\r\n url: \"{% url 'wagtailimages:listing_results' %}\",\r\n targetOutput: \"#image-results\"\r\n}\r\n```\r\n\r\n```html\r\n<div class=\"w-field__input\" data-field-input=\"\">\r\n <svg class=\"icon icon-search w-field__icon\" aria-hidden=\"true\">\r\n <use href=\"#icon-search\"></use>\r\n </svg>\r\n <input\r\n type=\"text\"\r\n name=\"q\"\r\n placeholder=\"Search images\"\r\n data-controller=\"w-search\"\r\n data-action=\"change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search\"\r\n id=\"id_q\"\r\n />\r\n</div>\r\n```\r\n\r\n#### Support `input` only usage\r\n\r\n```html\r\n<div class=\"w-field__input\" data-field-input=\"\">\r\n <svg class=\"icon icon-search w-field__icon\" aria-hidden=\"true\">\r\n <use href=\"#icon-search\"></use>\r\n </svg>\r\n <input\r\n type=\"text\"\r\n name=\"q\"\r\n placeholder=\"Search images\"\r\n data-controller=\"w-search\"\r\n data-action=\"change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search\"\r\n id=\"id_q\"\r\n data-w-search-results-value=\"#image-results\"\r\n data-w-search-url-value=\"/admin/images/results/\"\r\n />\r\n</div>\r\n```\r\n\r\n#### Support controlled form with search input as a target\r\n\r\n```html\r\n<form\r\n class=\"col search-form\"\r\n action=\"/admin/images/\"\r\n method=\"get\"\r\n novalidate=\"\"\r\n role=\"search\"\r\n data-controller=\"w-search\"\r\n data-w-search-url-value=\"/admin/images/results/\"\r\n>\r\n <div class=\"w-field__wrapper w-mb-0\" data-field-wrapper=\"\">\r\n <label class=\"w-field__label w-sr-only\" for=\"id_q\" id=\"id_q-label\">Search term</label>\r\n <div class=\"w-field w-field--char_field w-field--text_input\">\r\n <div class=\"w-field__input\" data-field-input=\"\">\r\n <svg class=\"icon icon-search w-field__icon\" aria-hidden=\"true\"><use href=\"#icon-search\"></use></svg>\r\n <input\r\n type=\"text\"\r\n name=\"q\"\r\n placeholder=\"Search images\"\r\n data-w-search-target=\"input\"\r\n data-action=\"change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search\"\r\n id=\"id_q\"\r\n />\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"visuallyhidden\"><input disabled=\"\" type=\"submit\" aria-hidden=\"true\" /></div>\r\n</form>\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\nclass SearchForm(forms.Form):\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n\n q = forms.CharField(\n label=gettext_lazy(\"Search term\"),\n widget=forms.TextInput(),\n required=False,\n )\n", "path": "wagtail/admin/forms/search.py"}]}
1,649
150
gh_patches_debug_23092
rasdani/github-patches
git_diff
freedomofpress__securedrop-6430
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tor2web warning page still using outdated pre-SI-redesign resources ## Description In the SI redesign, we overlooked the Tor2web page which tries to render an old location for the icon and does not show an icon for the flash warning message. ## Steps to Reproduce Visit https://demo-source.securedrop.org/tor2web-warning ## Expected Behavior ![image](https://user-images.githubusercontent.com/201369/166803350-c246a3f4-b648-4787-9e41-d4ba4b39385f.png) ## Actual Behavior ![image](https://user-images.githubusercontent.com/201369/166803550-cb4e380f-7da9-402b-9668-e4605af3f1e5.png) "Tor Browser" link in tor2web warning is broken ## Description The "Tor Browser" link in the tor2web warning is broken because it does not specify a protocol, so the browser treats it as a relative link. ## Steps to Reproduce * Visit `/tor2web-warning` in the SI * Hover over or click on the "Tor Browser" link, it should send you to a non-existent `/www.torproject.org/projects/torbrowser.html` on the SI's domain. ## Expected Behavior * Link takes you to Tor Project website. ## Comments Fix should be as simple as adding "https://" in front. </issue> <code> [start of securedrop/source_app/info.py] 1 # -*- coding: utf-8 -*- 2 import flask 3 from flask import Blueprint, render_template, send_file, redirect, url_for, flash 4 from flask_babel import gettext 5 import werkzeug 6 7 from io import BytesIO # noqa 8 9 from encryption import EncryptionManager 10 from sdconfig import SDConfig 11 from source_app.utils import get_sourcev3_url 12 13 14 def make_blueprint(config: SDConfig) -> Blueprint: 15 view = Blueprint('info', __name__) 16 17 @view.route('/tor2web-warning') 18 def tor2web_warning() -> flask.Response: 19 flash(gettext("Your connection is not anonymous right now!"), "error") 20 return flask.Response( 21 render_template("tor2web-warning.html", source_url=get_sourcev3_url()), 22 403) 23 24 @view.route('/use-tor') 25 def recommend_tor_browser() -> str: 26 return render_template("use-tor-browser.html") 27 28 @view.route('/public-key') 29 def download_public_key() -> flask.Response: 30 journalist_pubkey = EncryptionManager.get_default().get_journalist_public_key() 31 data = BytesIO(journalist_pubkey.encode('utf-8')) 32 return send_file(data, 33 mimetype="application/pgp-keys", 34 attachment_filename=config.JOURNALIST_KEY + ".asc", 35 as_attachment=True) 36 37 @view.route('/journalist-key') 38 def download_journalist_key() -> werkzeug.wrappers.Response: 39 return redirect(url_for('.download_public_key'), code=301) 40 41 @view.route('/why-public-key') 42 def why_download_public_key() -> str: 43 return render_template("why-public-key.html") 44 45 return view 46 [end of securedrop/source_app/info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/source_app/info.py b/securedrop/source_app/info.py --- a/securedrop/source_app/info.py +++ b/securedrop/source_app/info.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- import flask -from flask import Blueprint, render_template, send_file, redirect, url_for, flash +from flask import Blueprint, render_template, send_file, redirect, url_for from flask_babel import gettext import werkzeug @@ -8,7 +8,7 @@ from encryption import EncryptionManager from sdconfig import SDConfig -from source_app.utils import get_sourcev3_url +from source_app.utils import get_sourcev3_url, flash_msg def make_blueprint(config: SDConfig) -> Blueprint: @@ -16,7 +16,7 @@ @view.route('/tor2web-warning') def tor2web_warning() -> flask.Response: - flash(gettext("Your connection is not anonymous right now!"), "error") + flash_msg("error", None, gettext("Your connection is not anonymous right now!")) return flask.Response( render_template("tor2web-warning.html", source_url=get_sourcev3_url()), 403)
{"golden_diff": "diff --git a/securedrop/source_app/info.py b/securedrop/source_app/info.py\n--- a/securedrop/source_app/info.py\n+++ b/securedrop/source_app/info.py\n@@ -1,6 +1,6 @@\n # -*- coding: utf-8 -*-\n import flask\n-from flask import Blueprint, render_template, send_file, redirect, url_for, flash\n+from flask import Blueprint, render_template, send_file, redirect, url_for\n from flask_babel import gettext\n import werkzeug\n \n@@ -8,7 +8,7 @@\n \n from encryption import EncryptionManager\n from sdconfig import SDConfig\n-from source_app.utils import get_sourcev3_url\n+from source_app.utils import get_sourcev3_url, flash_msg\n \n \n def make_blueprint(config: SDConfig) -> Blueprint:\n@@ -16,7 +16,7 @@\n \n @view.route('/tor2web-warning')\n def tor2web_warning() -> flask.Response:\n- flash(gettext(\"Your connection is not anonymous right now!\"), \"error\")\n+ flash_msg(\"error\", None, gettext(\"Your connection is not anonymous right now!\"))\n return flask.Response(\n render_template(\"tor2web-warning.html\", source_url=get_sourcev3_url()),\n 403)\n", "issue": "Tor2web warning page still using outdated pre-SI-redesign resources\n## Description\r\n\r\nIn the SI redesign, we overlooked the Tor2web page which tries to render an old location for the icon and does not show an icon for the flash warning message.\r\n\r\n## Steps to Reproduce\r\n\r\nVisit https://demo-source.securedrop.org/tor2web-warning\r\n\r\n## Expected Behavior\r\n\r\n![image](https://user-images.githubusercontent.com/201369/166803350-c246a3f4-b648-4787-9e41-d4ba4b39385f.png)\r\n\r\n## Actual Behavior\r\n\r\n![image](https://user-images.githubusercontent.com/201369/166803550-cb4e380f-7da9-402b-9668-e4605af3f1e5.png)\r\n\n\"Tor Browser\" link in tor2web warning is broken\n## Description\r\n\r\nThe \"Tor Browser\" link in the tor2web warning is broken because it does not specify a protocol, so the browser treats it as a relative link.\r\n\r\n## Steps to Reproduce\r\n\r\n* Visit `/tor2web-warning` in the SI\r\n* Hover over or click on the \"Tor Browser\" link, it should send you to a non-existent `/www.torproject.org/projects/torbrowser.html` on the SI's domain.\r\n\r\n## Expected Behavior\r\n\r\n* Link takes you to Tor Project website.\r\n\r\n## Comments\r\n\r\nFix should be as simple as adding \"https://\" in front.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport flask\nfrom flask import Blueprint, render_template, send_file, redirect, url_for, flash\nfrom flask_babel import gettext\nimport werkzeug\n\nfrom io import BytesIO # noqa\n\nfrom encryption import EncryptionManager\nfrom sdconfig import SDConfig\nfrom source_app.utils import get_sourcev3_url\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('info', __name__)\n\n @view.route('/tor2web-warning')\n def tor2web_warning() -> flask.Response:\n flash(gettext(\"Your connection is not anonymous right now!\"), \"error\")\n return flask.Response(\n render_template(\"tor2web-warning.html\", source_url=get_sourcev3_url()),\n 403)\n\n @view.route('/use-tor')\n def recommend_tor_browser() -> str:\n return render_template(\"use-tor-browser.html\")\n\n @view.route('/public-key')\n def download_public_key() -> flask.Response:\n journalist_pubkey = EncryptionManager.get_default().get_journalist_public_key()\n data = BytesIO(journalist_pubkey.encode('utf-8'))\n return send_file(data,\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n @view.route('/journalist-key')\n def download_journalist_key() -> werkzeug.wrappers.Response:\n return redirect(url_for('.download_public_key'), code=301)\n\n @view.route('/why-public-key')\n def why_download_public_key() -> str:\n return render_template(\"why-public-key.html\")\n\n return view\n", "path": "securedrop/source_app/info.py"}]}
1,315
271
gh_patches_debug_10569
rasdani/github-patches
git_diff
gratipay__gratipay.com-2047
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Username change fails silently When you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON. [IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849 Thanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart: The Aspen ticket for this is: gittip/aspen-python#279 Username change fails silently When you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON. [IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849 Thanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart: The Aspen ticket for this is: gittip/aspen-python#279 </issue> <code> [start of gittip/exceptions.py] 1 """ 2 This module contains exceptions shared across application code. 3 """ 4 5 from __future__ import print_function, unicode_literals 6 7 8 9 class UnknownPlatform(Exception): pass 10 11 class ProblemChangingUsername(Exception): 12 def __str__(self): 13 return self.msg.format(self.args[0]) 14 15 class UsernameIsEmpty(ProblemChangingUsername): 16 msg = "You need to provide a username!" 17 18 class UsernameTooLong(ProblemChangingUsername): 19 msg = "The username '{}' is too long." 20 21 # Not passing the potentially unicode characters back because of: 22 # https://github.com/gittip/aspen-python/issues/177 23 class UsernameContainsInvalidCharacters(ProblemChangingUsername): 24 msg = "That username contains invalid characters." 25 26 class UsernameIsRestricted(ProblemChangingUsername): 27 msg = "The username '{}' is restricted." 28 29 class UsernameAlreadyTaken(ProblemChangingUsername): 30 msg = "The username '{}' is already taken." 31 32 class TooGreedy(Exception): pass 33 class NoSelfTipping(Exception): pass 34 class BadAmount(Exception): pass 35 [end of gittip/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gittip/exceptions.py b/gittip/exceptions.py --- a/gittip/exceptions.py +++ b/gittip/exceptions.py @@ -18,10 +18,8 @@ class UsernameTooLong(ProblemChangingUsername): msg = "The username '{}' is too long." -# Not passing the potentially unicode characters back because of: -# https://github.com/gittip/aspen-python/issues/177 class UsernameContainsInvalidCharacters(ProblemChangingUsername): - msg = "That username contains invalid characters." + msg = "The username '{}' contains invalid characters." class UsernameIsRestricted(ProblemChangingUsername): msg = "The username '{}' is restricted."
{"golden_diff": "diff --git a/gittip/exceptions.py b/gittip/exceptions.py\n--- a/gittip/exceptions.py\n+++ b/gittip/exceptions.py\n@@ -18,10 +18,8 @@\n class UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n \n-# Not passing the potentially unicode characters back because of:\n-# https://github.com/gittip/aspen-python/issues/177\n class UsernameContainsInvalidCharacters(ProblemChangingUsername):\n- msg = \"That username contains invalid characters.\"\n+ msg = \"The username '{}' contains invalid characters.\"\n \n class UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n", "issue": "Username change fails silently\nWhen you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON.\n\n[IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849\n\nThanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart:\n\nThe Aspen ticket for this is: gittip/aspen-python#279\n\nUsername change fails silently\nWhen you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON.\n\n[IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849\n\nThanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart:\n\nThe Aspen ticket for this is: gittip/aspen-python#279\n\n", "before_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\n\nclass UnknownPlatform(Exception): pass\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\n# Not passing the potentially unicode characters back because of:\n# https://github.com/gittip/aspen-python/issues/177\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"That username contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass BadAmount(Exception): pass\n", "path": "gittip/exceptions.py"}]}
1,232
154
gh_patches_debug_47848
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-404
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Rate stars don't work You should be able to click to give a star rating to a book on the book page, it doesn't do anything. </issue> <code> [start of bookwyrm/activitypub/note.py] 1 ''' note serializer and children thereof ''' 2 from dataclasses import dataclass, field 3 from typing import Dict, List 4 5 from .base_activity import ActivityObject, Link 6 from .image import Image 7 8 @dataclass(init=False) 9 class Tombstone(ActivityObject): 10 ''' the placeholder for a deleted status ''' 11 published: str 12 deleted: str 13 type: str = 'Tombstone' 14 15 16 @dataclass(init=False) 17 class Note(ActivityObject): 18 ''' Note activity ''' 19 published: str 20 attributedTo: str 21 content: str 22 to: List[str] = field(default_factory=lambda: []) 23 cc: List[str] = field(default_factory=lambda: []) 24 replies: Dict = field(default_factory=lambda: {}) 25 inReplyTo: str = '' 26 summary: str = '' 27 tag: List[Link] = field(default_factory=lambda: []) 28 attachment: List[Image] = field(default_factory=lambda: []) 29 sensitive: bool = False 30 type: str = 'Note' 31 32 33 @dataclass(init=False) 34 class Article(Note): 35 ''' what's an article except a note with more fields ''' 36 name: str 37 type: str = 'Article' 38 39 40 @dataclass(init=False) 41 class GeneratedNote(Note): 42 ''' just a re-typed note ''' 43 type: str = 'GeneratedNote' 44 45 46 @dataclass(init=False) 47 class Comment(Note): 48 ''' like a note but with a book ''' 49 inReplyToBook: str 50 type: str = 'Comment' 51 52 53 @dataclass(init=False) 54 class Review(Comment): 55 ''' a full book review ''' 56 name: str 57 rating: int = None 58 type: str = 'Review' 59 60 61 @dataclass(init=False) 62 class Quotation(Comment): 63 ''' a quote and commentary on a book ''' 64 quote: str 65 type: str = 'Quotation' 66 [end of bookwyrm/activitypub/note.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/activitypub/note.py b/bookwyrm/activitypub/note.py --- a/bookwyrm/activitypub/note.py +++ b/bookwyrm/activitypub/note.py @@ -53,7 +53,7 @@ @dataclass(init=False) class Review(Comment): ''' a full book review ''' - name: str + name: str = None rating: int = None type: str = 'Review'
{"golden_diff": "diff --git a/bookwyrm/activitypub/note.py b/bookwyrm/activitypub/note.py\n--- a/bookwyrm/activitypub/note.py\n+++ b/bookwyrm/activitypub/note.py\n@@ -53,7 +53,7 @@\n @dataclass(init=False)\n class Review(Comment):\n ''' a full book review '''\n- name: str\n+ name: str = None\n rating: int = None\n type: str = 'Review'\n", "issue": "Rate stars don't work\nYou should be able to click to give a star rating to a book on the book page, it doesn't do anything.\n", "before_files": [{"content": "''' note serializer and children thereof '''\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List\n\nfrom .base_activity import ActivityObject, Link\nfrom .image import Image\n\n@dataclass(init=False)\nclass Tombstone(ActivityObject):\n ''' the placeholder for a deleted status '''\n published: str\n deleted: str\n type: str = 'Tombstone'\n\n\n@dataclass(init=False)\nclass Note(ActivityObject):\n ''' Note activity '''\n published: str\n attributedTo: str\n content: str\n to: List[str] = field(default_factory=lambda: [])\n cc: List[str] = field(default_factory=lambda: [])\n replies: Dict = field(default_factory=lambda: {})\n inReplyTo: str = ''\n summary: str = ''\n tag: List[Link] = field(default_factory=lambda: [])\n attachment: List[Image] = field(default_factory=lambda: [])\n sensitive: bool = False\n type: str = 'Note'\n\n\n@dataclass(init=False)\nclass Article(Note):\n ''' what's an article except a note with more fields '''\n name: str\n type: str = 'Article'\n\n\n@dataclass(init=False)\nclass GeneratedNote(Note):\n ''' just a re-typed note '''\n type: str = 'GeneratedNote'\n\n\n@dataclass(init=False)\nclass Comment(Note):\n ''' like a note but with a book '''\n inReplyToBook: str\n type: str = 'Comment'\n\n\n@dataclass(init=False)\nclass Review(Comment):\n ''' a full book review '''\n name: str\n rating: int = None\n type: str = 'Review'\n\n\n@dataclass(init=False)\nclass Quotation(Comment):\n ''' a quote and commentary on a book '''\n quote: str\n type: str = 'Quotation'\n", "path": "bookwyrm/activitypub/note.py"}]}
1,091
104
gh_patches_debug_18295
rasdani/github-patches
git_diff
avocado-framework__avocado-5562
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Handle "could not import module" errors gracefully **Describe the bug** Avocado does not handle "could not import module" errors very gracefully, with error messages that are quite cryptic. **Steps to reproduce** Write a valid `avocado-instrumented` test, but with an invalid import. Example: ```python from avocado import Test import foo class PassTest(Test): """ Example test that passes. :avocado: tags=fast """ def test(self): """ A test simply doesn't have to fail in order to pass """ ``` And run it: ``` $ avocado run examples/tests/passtest.py JOB ID : 3fee9803715e414a16c3dcf1ddb9ff2f6dc6c0bd JOB LOG : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/job.log (1/1) examples/tests/passtest.py:PassTest.test: STARTED (1/1) examples/tests/passtest.py:PassTest.test: ERROR: Test.__init__() got an unexpected keyword argument 'run.results_dir' (0.01 s) RESULTS : PASS 0 | ERROR 1 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0 JOB HTML : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/results.html JOB TIME : 1.47 s ``` **Expected behavior** Instead of "unexpected argument..." a more clear error message such as: "failed to import the file containing the test" or something similar. **Current behavior** From original reporter @jnsnow: ``` (08/27) tests/protocol.py:Connect.testBadUNIX: ERROR: Test.__init__() got an unexpected keyword argument 'run.results_dir' (0.01 s) ``` **System information (please complete the following information):** - OS: ```LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch Distributor ID: Fedora Description: Fedora release 36 (Thirty Six) Release: 36 Codename: ThirtySix``` - Avocado version: 5a0c5b2348da450397287a0954e4c335c0d590a9 - Avocado installation method: git </issue> <code> [start of avocado/core/utils/loader.py] 1 import importlib 2 import inspect 3 import os 4 import sys 5 6 from avocado.core import test 7 from avocado.utils import stacktrace 8 9 10 class TestError(test.Test): 11 """ 12 Generic test error. 13 """ 14 15 def __init__(self, *args, **kwargs): 16 exception = kwargs.pop("exception") 17 test.Test.__init__(self, *args, **kwargs) 18 self.exception = exception 19 20 def test(self): 21 self.error(self.exception) 22 23 24 def load_test(test_factory): 25 """ 26 Load test from the test factory. 27 28 :param test_factory: a pair of test class and parameters. 29 :type test_factory: tuple 30 :return: an instance of :class:`avocado.core.test.Test`. 31 """ 32 test_class, test_parameters = test_factory 33 if "modulePath" in test_parameters: 34 test_path = test_parameters.pop("modulePath") 35 else: 36 test_path = None 37 if isinstance(test_class, str): 38 module_name = os.path.basename(test_path).split(".")[0] 39 test_module_dir = os.path.abspath(os.path.dirname(test_path)) 40 # Tests with local dir imports need this 41 try: 42 sys.path.insert(0, test_module_dir) 43 test_module = importlib.import_module(module_name) 44 except: # pylint: disable=W0702 45 # On load_module exception we fake the test class and pass 46 # the exc_info as parameter to be logged. 47 test_parameters["methodName"] = "test" 48 exception = stacktrace.prepare_exc_info(sys.exc_info()) 49 test_parameters["exception"] = exception 50 return TestError(**test_parameters) 51 finally: 52 if test_module_dir in sys.path: 53 sys.path.remove(test_module_dir) 54 for _, obj in inspect.getmembers(test_module): 55 if ( 56 inspect.isclass(obj) 57 and obj.__name__ == test_class 58 and inspect.getmodule(obj) == test_module 59 ): 60 if issubclass(obj, test.Test): 61 test_class = obj 62 break 63 if "run.results_dir" in test_parameters: 64 test_parameters["base_logdir"] = test_parameters.pop("run.results_dir") 65 test_instance = test_class(**test_parameters) 66 67 return test_instance 68 [end of avocado/core/utils/loader.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/avocado/core/utils/loader.py b/avocado/core/utils/loader.py --- a/avocado/core/utils/loader.py +++ b/avocado/core/utils/loader.py @@ -30,6 +30,8 @@ :return: an instance of :class:`avocado.core.test.Test`. """ test_class, test_parameters = test_factory + if "run.results_dir" in test_parameters: + test_parameters["base_logdir"] = test_parameters.pop("run.results_dir") if "modulePath" in test_parameters: test_path = test_parameters.pop("modulePath") else: @@ -60,8 +62,6 @@ if issubclass(obj, test.Test): test_class = obj break - if "run.results_dir" in test_parameters: - test_parameters["base_logdir"] = test_parameters.pop("run.results_dir") test_instance = test_class(**test_parameters) return test_instance
{"golden_diff": "diff --git a/avocado/core/utils/loader.py b/avocado/core/utils/loader.py\n--- a/avocado/core/utils/loader.py\n+++ b/avocado/core/utils/loader.py\n@@ -30,6 +30,8 @@\n :return: an instance of :class:`avocado.core.test.Test`.\n \"\"\"\n test_class, test_parameters = test_factory\n+ if \"run.results_dir\" in test_parameters:\n+ test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n if \"modulePath\" in test_parameters:\n test_path = test_parameters.pop(\"modulePath\")\n else:\n@@ -60,8 +62,6 @@\n if issubclass(obj, test.Test):\n test_class = obj\n break\n- if \"run.results_dir\" in test_parameters:\n- test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n test_instance = test_class(**test_parameters)\n \n return test_instance\n", "issue": "Handle \"could not import module\" errors gracefully\n**Describe the bug**\r\nAvocado does not handle \"could not import module\" errors very gracefully, with error messages that are quite cryptic.\r\n\r\n**Steps to reproduce**\r\nWrite a valid `avocado-instrumented` test, but with an invalid import. Example:\r\n\r\n```python\r\nfrom avocado import Test\r\n\r\nimport foo\r\n\r\n\r\nclass PassTest(Test):\r\n\r\n \"\"\"\r\n Example test that passes.\r\n\r\n :avocado: tags=fast\r\n \"\"\"\r\n\r\n def test(self):\r\n \"\"\"\r\n A test simply doesn't have to fail in order to pass\r\n \"\"\"\r\n```\r\n\r\nAnd run it:\r\n\r\n```\r\n$ avocado run examples/tests/passtest.py \r\nJOB ID : 3fee9803715e414a16c3dcf1ddb9ff2f6dc6c0bd\r\nJOB LOG : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/job.log\r\n (1/1) examples/tests/passtest.py:PassTest.test: STARTED\r\n (1/1) examples/tests/passtest.py:PassTest.test: ERROR: Test.__init__() got an unexpected keyword argument 'run.results_dir' (0.01 s)\r\nRESULTS : PASS 0 | ERROR 1 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0\r\nJOB HTML : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/results.html\r\nJOB TIME : 1.47 s\r\n```\r\n\r\n**Expected behavior**\r\nInstead of \"unexpected argument...\" a more clear error message such as: \"failed to import the file containing the test\" or something similar. \r\n\r\n**Current behavior**\r\n\r\nFrom original reporter @jnsnow:\r\n\r\n```\r\n(08/27) tests/protocol.py:Connect.testBadUNIX: ERROR:\r\n Test.__init__() got an unexpected keyword argument 'run.results_dir'\r\n (0.01 s)\r\n```\r\n\r\n**System information (please complete the following information):**\r\n - OS: ```LSB Version:\t:core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch\r\nDistributor ID:\tFedora\r\nDescription:\tFedora release 36 (Thirty Six)\r\nRelease:\t36\r\nCodename:\tThirtySix```\r\n - Avocado version: 5a0c5b2348da450397287a0954e4c335c0d590a9\r\n - Avocado installation method: git\r\n\n", "before_files": [{"content": "import importlib\nimport inspect\nimport os\nimport sys\n\nfrom avocado.core import test\nfrom avocado.utils import stacktrace\n\n\nclass TestError(test.Test):\n \"\"\"\n Generic test error.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n exception = kwargs.pop(\"exception\")\n test.Test.__init__(self, *args, **kwargs)\n self.exception = exception\n\n def test(self):\n self.error(self.exception)\n\n\ndef load_test(test_factory):\n \"\"\"\n Load test from the test factory.\n\n :param test_factory: a pair of test class and parameters.\n :type test_factory: tuple\n :return: an instance of :class:`avocado.core.test.Test`.\n \"\"\"\n test_class, test_parameters = test_factory\n if \"modulePath\" in test_parameters:\n test_path = test_parameters.pop(\"modulePath\")\n else:\n test_path = None\n if isinstance(test_class, str):\n module_name = os.path.basename(test_path).split(\".\")[0]\n test_module_dir = os.path.abspath(os.path.dirname(test_path))\n # Tests with local dir imports need this\n try:\n sys.path.insert(0, test_module_dir)\n test_module = importlib.import_module(module_name)\n except: # pylint: disable=W0702\n # On load_module exception we fake the test class and pass\n # the exc_info as parameter to be logged.\n test_parameters[\"methodName\"] = \"test\"\n exception = stacktrace.prepare_exc_info(sys.exc_info())\n test_parameters[\"exception\"] = exception\n return TestError(**test_parameters)\n finally:\n if test_module_dir in sys.path:\n sys.path.remove(test_module_dir)\n for _, obj in inspect.getmembers(test_module):\n if (\n inspect.isclass(obj)\n and obj.__name__ == test_class\n and inspect.getmodule(obj) == test_module\n ):\n if issubclass(obj, test.Test):\n test_class = obj\n break\n if \"run.results_dir\" in test_parameters:\n test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n test_instance = test_class(**test_parameters)\n\n return test_instance\n", "path": "avocado/core/utils/loader.py"}]}
1,798
211
gh_patches_debug_56084
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5611
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of examples/inference/benchmark_ops/benchmark_rmsnorm.py] 1 import torch 2 3 from colossalai.kernel.kernel_loader import InferenceOpsLoader 4 from colossalai.kernel.triton import rms_layernorm 5 6 try: 7 import triton # noqa 8 except ImportError: 9 print("please install triton from https://github.com/openai/triton") 10 11 inference_ops = InferenceOpsLoader().load() 12 13 # Triton benchmark plot attributions 14 configs = [ 15 triton.testing.Benchmark( 16 x_names=["SEQUENCE_TOTAL"], 17 x_vals=[i for i in range(128, 1025, 128)], 18 line_arg="provider", 19 line_vals=[ 20 "vllm_rms_layernorm", 21 "triton_rms_layernorm", 22 "cuda_rms_layernorm", 23 "vllm_rms_layernorm_with_residual", 24 "triton_rms_layernorm_with_residual", 25 "cuda_rms_layernorm_with_residual", 26 ], 27 line_names=[ 28 "vllm_rms_layernorm", 29 "triton_rms_layernorm", 30 "cuda_rms_layernorm", 31 "vllm_rms_layernorm_with_residual", 32 "triton_rms_layernorm_with_residual", 33 "cuda_rms_layernorm_with_residual", 34 ], 35 styles=[("red", "-"), ("blue", "-"), ("yellow", "-"), ("red", "--"), ("blue", "--"), ("yellow", "--")], 36 ylabel="ms", 37 plot_name=f"RMSNorm benchmarking results", 38 args={"HIDDEN_SIZE": 1024}, 39 ) 40 ] 41 42 43 @triton.testing.perf_report(configs) 44 def benchmark_rms_layernorm( 45 provider: str, 46 SEQUENCE_TOTAL: int, 47 HIDDEN_SIZE: int, 48 ): 49 try: 50 from vllm.model_executor.layers.layernorm import RMSNorm 51 except ImportError: 52 raise ImportError("Please install vllm from https://github.com/vllm-project/vllm") 53 54 warmup = 10 55 rep = 1000 56 57 dtype = torch.float16 58 eps = 1e-5 59 x_shape = (SEQUENCE_TOTAL, HIDDEN_SIZE) 60 w_shape = (x_shape[-1],) 61 residual = torch.rand(x_shape, dtype=dtype, device="cuda") 62 weight = torch.ones(w_shape, dtype=dtype, device="cuda") 63 vllm_norm = RMSNorm(hidden_size=HIDDEN_SIZE, eps=eps).to(dtype=dtype, device="cuda") 64 x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device="cuda") 65 if provider == "vllm_rms_layernorm": 66 fn = lambda: vllm_norm(x) 67 elif provider == "triton_rms_layernorm": 68 fn = lambda: rms_layernorm(x, weight, eps=eps) 69 elif provider == "cuda_rms_layernorm": 70 out = torch.empty_like(x) 71 fn = lambda: inference_ops.rms_layernorm(out, x, weight, eps) 72 elif provider == "vllm_rms_layernorm_with_residual": 73 fn = lambda: vllm_norm(x, residual=residual) 74 elif provider == "triton_rms_layernorm_with_residual": 75 fn = lambda: rms_layernorm(x, weight, eps=eps, residual=residual) 76 elif provider == "cuda_rms_layernorm_with_residual": 77 fn = lambda: inference_ops.fused_add_rms_layernorm(x, residual, weight, eps) 78 else: 79 raise ValueError("Undefined provider.") 80 81 ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep) 82 83 return ms 84 85 86 if __name__ == "__main__": 87 benchmark_rms_layernorm.run(save_path=".", print_data=True) 88 [end of examples/inference/benchmark_ops/benchmark_rmsnorm.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/inference/benchmark_ops/benchmark_rmsnorm.py b/examples/inference/benchmark_ops/benchmark_rmsnorm.py --- a/examples/inference/benchmark_ops/benchmark_rmsnorm.py +++ b/examples/inference/benchmark_ops/benchmark_rmsnorm.py @@ -35,7 +35,7 @@ styles=[("red", "-"), ("blue", "-"), ("yellow", "-"), ("red", "--"), ("blue", "--"), ("yellow", "--")], ylabel="ms", plot_name=f"RMSNorm benchmarking results", - args={"HIDDEN_SIZE": 1024}, + args={"HIDDEN_SIZE": 5120}, ) ]
{"golden_diff": "diff --git a/examples/inference/benchmark_ops/benchmark_rmsnorm.py b/examples/inference/benchmark_ops/benchmark_rmsnorm.py\n--- a/examples/inference/benchmark_ops/benchmark_rmsnorm.py\n+++ b/examples/inference/benchmark_ops/benchmark_rmsnorm.py\n@@ -35,7 +35,7 @@\n styles=[(\"red\", \"-\"), (\"blue\", \"-\"), (\"yellow\", \"-\"), (\"red\", \"--\"), (\"blue\", \"--\"), (\"yellow\", \"--\")],\n ylabel=\"ms\",\n plot_name=f\"RMSNorm benchmarking results\",\n- args={\"HIDDEN_SIZE\": 1024},\n+ args={\"HIDDEN_SIZE\": 5120},\n )\n ]\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import torch\n\nfrom colossalai.kernel.kernel_loader import InferenceOpsLoader\nfrom colossalai.kernel.triton import rms_layernorm\n\ntry:\n import triton # noqa\nexcept ImportError:\n print(\"please install triton from https://github.com/openai/triton\")\n\ninference_ops = InferenceOpsLoader().load()\n\n# Triton benchmark plot attributions\nconfigs = [\n triton.testing.Benchmark(\n x_names=[\"SEQUENCE_TOTAL\"],\n x_vals=[i for i in range(128, 1025, 128)],\n line_arg=\"provider\",\n line_vals=[\n \"vllm_rms_layernorm\",\n \"triton_rms_layernorm\",\n \"cuda_rms_layernorm\",\n \"vllm_rms_layernorm_with_residual\",\n \"triton_rms_layernorm_with_residual\",\n \"cuda_rms_layernorm_with_residual\",\n ],\n line_names=[\n \"vllm_rms_layernorm\",\n \"triton_rms_layernorm\",\n \"cuda_rms_layernorm\",\n \"vllm_rms_layernorm_with_residual\",\n \"triton_rms_layernorm_with_residual\",\n \"cuda_rms_layernorm_with_residual\",\n ],\n styles=[(\"red\", \"-\"), (\"blue\", \"-\"), (\"yellow\", \"-\"), (\"red\", \"--\"), (\"blue\", \"--\"), (\"yellow\", \"--\")],\n ylabel=\"ms\",\n plot_name=f\"RMSNorm benchmarking results\",\n args={\"HIDDEN_SIZE\": 1024},\n )\n]\n\n\[email protected]_report(configs)\ndef benchmark_rms_layernorm(\n provider: str,\n SEQUENCE_TOTAL: int,\n HIDDEN_SIZE: int,\n):\n try:\n from vllm.model_executor.layers.layernorm import RMSNorm\n except ImportError:\n raise ImportError(\"Please install vllm from https://github.com/vllm-project/vllm\")\n\n warmup = 10\n rep = 1000\n\n dtype = torch.float16\n eps = 1e-5\n x_shape = (SEQUENCE_TOTAL, HIDDEN_SIZE)\n w_shape = (x_shape[-1],)\n residual = torch.rand(x_shape, dtype=dtype, device=\"cuda\")\n weight = torch.ones(w_shape, dtype=dtype, device=\"cuda\")\n vllm_norm = RMSNorm(hidden_size=HIDDEN_SIZE, eps=eps).to(dtype=dtype, device=\"cuda\")\n x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=\"cuda\")\n if provider == \"vllm_rms_layernorm\":\n fn = lambda: vllm_norm(x)\n elif provider == \"triton_rms_layernorm\":\n fn = lambda: rms_layernorm(x, weight, eps=eps)\n elif provider == \"cuda_rms_layernorm\":\n out = torch.empty_like(x)\n fn = lambda: inference_ops.rms_layernorm(out, x, weight, eps)\n elif provider == \"vllm_rms_layernorm_with_residual\":\n fn = lambda: vllm_norm(x, residual=residual)\n elif provider == \"triton_rms_layernorm_with_residual\":\n fn = lambda: rms_layernorm(x, weight, eps=eps, residual=residual)\n elif provider == \"cuda_rms_layernorm_with_residual\":\n fn = lambda: inference_ops.fused_add_rms_layernorm(x, residual, weight, eps)\n else:\n raise ValueError(\"Undefined provider.\")\n\n ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)\n\n return ms\n\n\nif __name__ == \"__main__\":\n benchmark_rms_layernorm.run(save_path=\".\", print_data=True)\n", "path": "examples/inference/benchmark_ops/benchmark_rmsnorm.py"}]}
1,598
155
gh_patches_debug_33315
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-666
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Idea: have a way to specify context via command line Something like repeat arguments: ``` cookiecutter mytemplate -Cname=my-project -Cgithub-user=ionelmc ``` Or maybe the whole json? ``` cookiecutter mytemplate --context='{"name": "my-project", "github-user": "ionelmc"}' ``` Or variable arguments? ``` cookiecutter mytemplate --context-name=my-project --context-github-user=ionelmc ``` </issue> <code> [start of cookiecutter/cli.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.cli 6 ----------------- 7 8 Main `cookiecutter` CLI. 9 """ 10 11 import os 12 import sys 13 import logging 14 import json 15 16 import click 17 18 from cookiecutter import __version__ 19 from cookiecutter.config import USER_CONFIG_PATH 20 from cookiecutter.main import cookiecutter 21 from cookiecutter.exceptions import ( 22 OutputDirExistsException, 23 InvalidModeException, 24 FailedHookException, 25 UndefinedVariableInTemplate, 26 UnknownExtension, 27 RepositoryNotFound 28 ) 29 30 logger = logging.getLogger(__name__) 31 32 33 def version_msg(): 34 """Returns the Cookiecutter version, location and Python powering it.""" 35 python_version = sys.version[:3] 36 location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 37 message = u'Cookiecutter %(version)s from {} (Python {})' 38 return message.format(location, python_version) 39 40 41 @click.command(context_settings=dict(help_option_names=[u'-h', u'--help'])) 42 @click.version_option(__version__, u'-V', u'--version', message=version_msg()) 43 @click.argument(u'template') 44 @click.option( 45 u'--no-input', is_flag=True, 46 help=u'Do not prompt for parameters and only use cookiecutter.json ' 47 u'file content', 48 ) 49 @click.option( 50 u'-c', u'--checkout', 51 help=u'branch, tag or commit to checkout after git clone', 52 ) 53 @click.option( 54 '-v', '--verbose', 55 is_flag=True, help='Print debug information', default=False 56 ) 57 @click.option( 58 u'--replay', is_flag=True, 59 help=u'Do not prompt for parameters and only use information entered ' 60 u'previously', 61 ) 62 @click.option( 63 u'-f', u'--overwrite-if-exists', is_flag=True, 64 help=u'Overwrite the contents of the output directory if it already exists' 65 ) 66 @click.option( 67 u'-o', u'--output-dir', default='.', type=click.Path(), 68 help=u'Where to output the generated project dir into' 69 ) 70 @click.option( 71 u'--config-file', type=click.Path(), default=USER_CONFIG_PATH, 72 help=u'User configuration file' 73 ) 74 @click.option( 75 u'--default-config', is_flag=True, 76 help=u'Do not load a config file. Use the defaults instead' 77 ) 78 def main(template, no_input, checkout, verbose, replay, overwrite_if_exists, 79 output_dir, config_file, default_config): 80 """Create a project from a Cookiecutter project template (TEMPLATE).""" 81 if verbose: 82 logging.basicConfig( 83 format=u'%(levelname)s %(filename)s: %(message)s', 84 level=logging.DEBUG 85 ) 86 else: 87 # Log info and above to console 88 logging.basicConfig( 89 format=u'%(levelname)s: %(message)s', 90 level=logging.INFO 91 ) 92 93 try: 94 # If you _need_ to support a local template in a directory 95 # called 'help', use a qualified path to the directory. 96 if template == u'help': 97 click.echo(click.get_current_context().get_help()) 98 sys.exit(0) 99 100 user_config = None if default_config else config_file 101 102 cookiecutter( 103 template, checkout, no_input, 104 replay=replay, 105 overwrite_if_exists=overwrite_if_exists, 106 output_dir=output_dir, 107 config_file=user_config 108 ) 109 except (OutputDirExistsException, 110 InvalidModeException, 111 FailedHookException, 112 UnknownExtension, 113 RepositoryNotFound) as e: 114 click.echo(e) 115 sys.exit(1) 116 except UndefinedVariableInTemplate as undefined_err: 117 click.echo('{}'.format(undefined_err.message)) 118 click.echo('Error message: {}'.format(undefined_err.error.message)) 119 120 context_str = json.dumps( 121 undefined_err.context, 122 indent=4, 123 sort_keys=True 124 ) 125 click.echo('Context: {}'.format(context_str)) 126 sys.exit(1) 127 128 129 if __name__ == "__main__": 130 main() 131 [end of cookiecutter/cli.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py --- a/cookiecutter/cli.py +++ b/cookiecutter/cli.py @@ -38,9 +38,23 @@ return message.format(location, python_version) +def validate_extra_context(ctx, param, value): + for s in value: + if '=' not in s: + raise click.BadParameter( + 'EXTRA_CONTEXT should contain items of the form key=value; ' + "'{}' doesn't match that form".format(s) + ) + + # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66') + # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'} + return dict(s.split('=', 1) for s in value) or None + + @click.command(context_settings=dict(help_option_names=[u'-h', u'--help'])) @click.version_option(__version__, u'-V', u'--version', message=version_msg()) @click.argument(u'template') [email protected](u'extra_context', nargs=-1, callback=validate_extra_context) @click.option( u'--no-input', is_flag=True, help=u'Do not prompt for parameters and only use cookiecutter.json ' @@ -75,8 +89,8 @@ u'--default-config', is_flag=True, help=u'Do not load a config file. Use the defaults instead' ) -def main(template, no_input, checkout, verbose, replay, overwrite_if_exists, - output_dir, config_file, default_config): +def main(template, extra_context, no_input, checkout, verbose, replay, + overwrite_if_exists, output_dir, config_file, default_config): """Create a project from a Cookiecutter project template (TEMPLATE).""" if verbose: logging.basicConfig( @@ -101,6 +115,7 @@ cookiecutter( template, checkout, no_input, + extra_context=extra_context, replay=replay, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir,
{"golden_diff": "diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py\n--- a/cookiecutter/cli.py\n+++ b/cookiecutter/cli.py\n@@ -38,9 +38,23 @@\n return message.format(location, python_version)\n \n \n+def validate_extra_context(ctx, param, value):\n+ for s in value:\n+ if '=' not in s:\n+ raise click.BadParameter(\n+ 'EXTRA_CONTEXT should contain items of the form key=value; '\n+ \"'{}' doesn't match that form\".format(s)\n+ )\n+\n+ # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')\n+ # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}\n+ return dict(s.split('=', 1) for s in value) or None\n+\n+\n @click.command(context_settings=dict(help_option_names=[u'-h', u'--help']))\n @click.version_option(__version__, u'-V', u'--version', message=version_msg())\n @click.argument(u'template')\[email protected](u'extra_context', nargs=-1, callback=validate_extra_context)\n @click.option(\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n@@ -75,8 +89,8 @@\n u'--default-config', is_flag=True,\n help=u'Do not load a config file. Use the defaults instead'\n )\n-def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n- output_dir, config_file, default_config):\n+def main(template, extra_context, no_input, checkout, verbose, replay,\n+ overwrite_if_exists, output_dir, config_file, default_config):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n@@ -101,6 +115,7 @@\n \n cookiecutter(\n template, checkout, no_input,\n+ extra_context=extra_context,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n", "issue": "Idea: have a way to specify context via command line\nSomething like repeat arguments:\n\n```\ncookiecutter mytemplate -Cname=my-project -Cgithub-user=ionelmc\n```\n\nOr maybe the whole json?\n\n```\ncookiecutter mytemplate --context='{\"name\": \"my-project\", \"github-user\": \"ionelmc\"}'\n```\n\nOr variable arguments?\n\n```\ncookiecutter mytemplate --context-name=my-project --context-github-user=ionelmc\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport json\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.config import USER_CONFIG_PATH\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException,\n InvalidModeException,\n FailedHookException,\n UndefinedVariableInTemplate,\n UnknownExtension,\n RepositoryNotFound\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n \"\"\"Returns the Cookiecutter version, location and Python powering it.\"\"\"\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\[email protected](\n u'-o', u'--output-dir', default='.', type=click.Path(),\n help=u'Where to output the generated project dir into'\n)\[email protected](\n u'--config-file', type=click.Path(), default=USER_CONFIG_PATH,\n help=u'User configuration file'\n)\[email protected](\n u'--default-config', is_flag=True,\n help=u'Do not load a config file. Use the defaults instead'\n)\ndef main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n output_dir, config_file, default_config):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n # If you _need_ to support a local template in a directory\n # called 'help', use a qualified path to the directory.\n if template == u'help':\n click.echo(click.get_current_context().get_help())\n sys.exit(0)\n\n user_config = None if default_config else config_file\n\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n config_file=user_config\n )\n except (OutputDirExistsException,\n InvalidModeException,\n FailedHookException,\n UnknownExtension,\n RepositoryNotFound) as e:\n click.echo(e)\n sys.exit(1)\n except UndefinedVariableInTemplate as undefined_err:\n click.echo('{}'.format(undefined_err.message))\n click.echo('Error message: {}'.format(undefined_err.error.message))\n\n context_str = json.dumps(\n undefined_err.context,\n indent=4,\n sort_keys=True\n )\n click.echo('Context: {}'.format(context_str))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}]}
1,792
476
gh_patches_debug_4402
rasdani/github-patches
git_diff
aws__aws-cli-1769
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> JSON Cache for AssumeRoleProvider not truncating files When we open a file for writing, if we're reusing the same file (same cache key) we don't truncate the file before writing. If the second JSON response is smaller it will result in extra data at the end of the JSON document. This will trigger a json parsing error, which raises a KeyError, which causes the cred provider to retrieve a new set of temporary credentials because it thinks the file is not in the cache. </issue> <code> [start of awscli/customizations/assumerole.py] 1 import os 2 import json 3 import logging 4 5 from botocore.exceptions import ProfileNotFound 6 7 LOG = logging.getLogger(__name__) 8 9 10 def register_assume_role_provider(event_handlers): 11 event_handlers.register('session-initialized', 12 inject_assume_role_provider_cache, 13 unique_id='inject_assume_role_cred_provider_cache') 14 15 16 def inject_assume_role_provider_cache(session, **kwargs): 17 try: 18 cred_chain = session.get_component('credential_provider') 19 except ProfileNotFound: 20 # If a user has provided a profile that does not exist, 21 # trying to retrieve components/config on the session 22 # will raise ProfileNotFound. Sometimes this is invalid: 23 # 24 # "ec2 describe-instances --profile unknown" 25 # 26 # and sometimes this is perfectly valid: 27 # 28 # "configure set region us-west-2 --profile brand-new-profile" 29 # 30 # Because we can't know (and don't want to know) whether 31 # the customer is trying to do something valid, we just 32 # immediately return. If it's invalid something else 33 # up the stack will raise ProfileNotFound, otherwise 34 # the configure (and other) commands will work as expected. 35 LOG.debug("ProfileNotFound caught when trying to inject " 36 "assume-role cred provider cache. Not configuring " 37 "JSONFileCache for assume-role.") 38 return 39 provider = cred_chain.get_provider('assume-role') 40 provider.cache = JSONFileCache() 41 42 43 class JSONFileCache(object): 44 """JSON file cache. 45 46 This provides a dict like interface that stores JSON serializable 47 objects. 48 49 The objects are serialized to JSON and stored in a file. These 50 values can be retrieved at a later time. 51 52 """ 53 54 CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache')) 55 56 def __init__(self, working_dir=CACHE_DIR): 57 self._working_dir = working_dir 58 59 def __contains__(self, cache_key): 60 actual_key = self._convert_cache_key(cache_key) 61 return os.path.isfile(actual_key) 62 63 def __getitem__(self, cache_key): 64 """Retrieve value from a cache key.""" 65 actual_key = self._convert_cache_key(cache_key) 66 try: 67 with open(actual_key) as f: 68 return json.load(f) 69 except (OSError, ValueError, IOError): 70 raise KeyError(cache_key) 71 72 def __setitem__(self, cache_key, value): 73 full_key = self._convert_cache_key(cache_key) 74 try: 75 file_content = json.dumps(value) 76 except (TypeError, ValueError): 77 raise ValueError("Value cannot be cached, must be " 78 "JSON serializable: %s" % value) 79 if not os.path.isdir(self._working_dir): 80 os.makedirs(self._working_dir) 81 with os.fdopen(os.open(full_key, 82 os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f: 83 f.write(file_content) 84 85 def _convert_cache_key(self, cache_key): 86 full_path = os.path.join(self._working_dir, cache_key + '.json') 87 return full_path 88 [end of awscli/customizations/assumerole.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awscli/customizations/assumerole.py b/awscli/customizations/assumerole.py --- a/awscli/customizations/assumerole.py +++ b/awscli/customizations/assumerole.py @@ -80,6 +80,7 @@ os.makedirs(self._working_dir) with os.fdopen(os.open(full_key, os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f: + f.truncate() f.write(file_content) def _convert_cache_key(self, cache_key):
{"golden_diff": "diff --git a/awscli/customizations/assumerole.py b/awscli/customizations/assumerole.py\n--- a/awscli/customizations/assumerole.py\n+++ b/awscli/customizations/assumerole.py\n@@ -80,6 +80,7 @@\n os.makedirs(self._working_dir)\n with os.fdopen(os.open(full_key,\n os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:\n+ f.truncate()\n f.write(file_content)\n \n def _convert_cache_key(self, cache_key):\n", "issue": "JSON Cache for AssumeRoleProvider not truncating files\nWhen we open a file for writing, if we're reusing the same file (same cache key) we don't truncate the file before writing. If the second JSON response is smaller it will result in extra data at the end of the JSON document.\n\nThis will trigger a json parsing error, which raises a KeyError, which causes the cred provider to retrieve a new set of temporary credentials because it thinks the file is not in the cache.\n\n", "before_files": [{"content": "import os\nimport json\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nLOG = logging.getLogger(__name__)\n\n\ndef register_assume_role_provider(event_handlers):\n event_handlers.register('session-initialized',\n inject_assume_role_provider_cache,\n unique_id='inject_assume_role_cred_provider_cache')\n\n\ndef inject_assume_role_provider_cache(session, **kwargs):\n try:\n cred_chain = session.get_component('credential_provider')\n except ProfileNotFound:\n # If a user has provided a profile that does not exist,\n # trying to retrieve components/config on the session\n # will raise ProfileNotFound. Sometimes this is invalid:\n #\n # \"ec2 describe-instances --profile unknown\"\n #\n # and sometimes this is perfectly valid:\n #\n # \"configure set region us-west-2 --profile brand-new-profile\"\n #\n # Because we can't know (and don't want to know) whether\n # the customer is trying to do something valid, we just\n # immediately return. If it's invalid something else\n # up the stack will raise ProfileNotFound, otherwise\n # the configure (and other) commands will work as expected.\n LOG.debug(\"ProfileNotFound caught when trying to inject \"\n \"assume-role cred provider cache. Not configuring \"\n \"JSONFileCache for assume-role.\")\n return\n provider = cred_chain.get_provider('assume-role')\n provider.cache = JSONFileCache()\n\n\nclass JSONFileCache(object):\n \"\"\"JSON file cache.\n\n This provides a dict like interface that stores JSON serializable\n objects.\n\n The objects are serialized to JSON and stored in a file. These\n values can be retrieved at a later time.\n\n \"\"\"\n\n CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache'))\n\n def __init__(self, working_dir=CACHE_DIR):\n self._working_dir = working_dir\n\n def __contains__(self, cache_key):\n actual_key = self._convert_cache_key(cache_key)\n return os.path.isfile(actual_key)\n\n def __getitem__(self, cache_key):\n \"\"\"Retrieve value from a cache key.\"\"\"\n actual_key = self._convert_cache_key(cache_key)\n try:\n with open(actual_key) as f:\n return json.load(f)\n except (OSError, ValueError, IOError):\n raise KeyError(cache_key)\n\n def __setitem__(self, cache_key, value):\n full_key = self._convert_cache_key(cache_key)\n try:\n file_content = json.dumps(value)\n except (TypeError, ValueError):\n raise ValueError(\"Value cannot be cached, must be \"\n \"JSON serializable: %s\" % value)\n if not os.path.isdir(self._working_dir):\n os.makedirs(self._working_dir)\n with os.fdopen(os.open(full_key,\n os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:\n f.write(file_content)\n\n def _convert_cache_key(self, cache_key):\n full_path = os.path.join(self._working_dir, cache_key + '.json')\n return full_path\n", "path": "awscli/customizations/assumerole.py"}]}
1,482
125
gh_patches_debug_49851
rasdani/github-patches
git_diff
netbox-community__netbox-15890
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> OpenIDC SSO through apache stopped working after update to 3.7.6 ### Deployment Type Self-hosted ### NetBox Version v3.7.6 ### Python Version 3.9 ### Steps to Reproduce This is a longstanding NetBox instance. It runs under gunicorn, proxied through apache which is configured to use mod_auth_openid for authentication. NetBox's configuration includes: REMOTE_AUTH_ENABLED = True REMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend' REMOTE_AUTH_HEADER = 'HTTP_OIDC_CLAIM_PREFERRED_USERNAME' REMOTE_AUTH_AUTO_CREATE_USER = True This was working fine until the update to 3.7.6 following our usual procedure: Pull and checkout v3.7.6. Run upgrade.sh Restart NetBox gunicorn service, netbox-rq and apache Since the upgrade, NetBox has presented a login box instead of logging in as the REMOTE_AUTH_HEADER user. Using tcpdump, I can see the "OIDC_CLAIM_preferred_username" header is being sent to gunicorn. Other instances using the same OpenIDC configuration are working. ### Expected Behavior REMOTE_AUTH login using OpenIDC credentials. ### Observed Behavior The web frontend prompts for username and password. </issue> <code> [start of contrib/gunicorn.py] 1 # The IP address (typically localhost) and port that the NetBox WSGI process should listen on 2 bind = '127.0.0.1:8001' 3 4 # Number of gunicorn workers to spawn. This should typically be 2n+1, where 5 # n is the number of CPU cores present. 6 workers = 5 7 8 # Number of threads per worker process 9 threads = 3 10 11 # Timeout (in seconds) for a request to complete 12 timeout = 120 13 14 # The maximum number of requests a worker can handle before being respawned 15 max_requests = 5000 16 max_requests_jitter = 500 17 [end of contrib/gunicorn.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/contrib/gunicorn.py b/contrib/gunicorn.py --- a/contrib/gunicorn.py +++ b/contrib/gunicorn.py @@ -14,3 +14,7 @@ # The maximum number of requests a worker can handle before being respawned max_requests = 5000 max_requests_jitter = 500 + +# Uncomment this line to accept HTTP headers containing underscores, e.g. for remote +# authentication support. See https://docs.gunicorn.org/en/stable/settings.html#header-map +# header-map = 'dangerous'
{"golden_diff": "diff --git a/contrib/gunicorn.py b/contrib/gunicorn.py\n--- a/contrib/gunicorn.py\n+++ b/contrib/gunicorn.py\n@@ -14,3 +14,7 @@\n # The maximum number of requests a worker can handle before being respawned\n max_requests = 5000\n max_requests_jitter = 500\n+\n+# Uncomment this line to accept HTTP headers containing underscores, e.g. for remote\n+# authentication support. See https://docs.gunicorn.org/en/stable/settings.html#header-map\n+# header-map = 'dangerous'\n", "issue": "OpenIDC SSO through apache stopped working after update to 3.7.6\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.6\n\n### Python Version\n\n3.9\n\n### Steps to Reproduce\n\nThis is a longstanding NetBox instance. It runs under gunicorn, proxied through apache which is configured to use mod_auth_openid for authentication. \r\n\r\nNetBox's configuration includes:\r\nREMOTE_AUTH_ENABLED = True\r\nREMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend'\r\nREMOTE_AUTH_HEADER = 'HTTP_OIDC_CLAIM_PREFERRED_USERNAME'\r\nREMOTE_AUTH_AUTO_CREATE_USER = True\r\n\r\nThis was working fine until the update to 3.7.6 following our usual procedure:\r\n\r\nPull and checkout v3.7.6.\r\n\r\nRun upgrade.sh\r\n\r\nRestart NetBox gunicorn service, netbox-rq and apache\r\n\r\nSince the upgrade, NetBox has presented a login box instead of logging in as the REMOTE_AUTH_HEADER user. Using tcpdump, I can see the \"OIDC_CLAIM_preferred_username\" header is being sent to gunicorn. Other instances using the same OpenIDC configuration are working.\r\n\n\n### Expected Behavior\n\nREMOTE_AUTH login using OpenIDC credentials.\n\n### Observed Behavior\n\nThe web frontend prompts for username and password.\n", "before_files": [{"content": "# The IP address (typically localhost) and port that the NetBox WSGI process should listen on\nbind = '127.0.0.1:8001'\n\n# Number of gunicorn workers to spawn. This should typically be 2n+1, where\n# n is the number of CPU cores present.\nworkers = 5\n\n# Number of threads per worker process\nthreads = 3\n\n# Timeout (in seconds) for a request to complete\ntimeout = 120\n\n# The maximum number of requests a worker can handle before being respawned\nmax_requests = 5000\nmax_requests_jitter = 500\n", "path": "contrib/gunicorn.py"}]}
971
125
gh_patches_debug_30377
rasdani/github-patches
git_diff
goauthentik__authentik-2845
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Using 'Have-I-been-pwned' policy breaks flows in Authentik 2022.4.1 **Describe the bug** Using a 'Have-I-been-pwned' policy on a password prompt within a flow breaks the flow. **To Reproduce** Steps to reproduce the behavior: 1. Use Authentik 2022.3.3 2. Use all the default settings/flows, so a clean install 3. Add a have-i-been-pwned policy to the default-password-change flow on the default-password-change-prompt stage. 4. This stage binding has the following settings: - _Evaluate on plan: True_ - _Re-evaluate policies: False_ - _Invalid respones action: RETRY returns the error message and a similar challenge to the executor._ - _Policy engine mode: ALL, all policies must match to include this stage access._ 5. Go to the Flow Overview and Execute flow with current user, see that the have-i-been pwned policy works correctly. 6. Use Authentik 2022.4.1 7. Repeat steps 2 - 5 described above 8. See that you will receive an error message 'Password not set in context'. **Expected behavior** The password should be checked, and the flow should not crash with the error 'Password not set in context'. **Version and Deployment (please complete the following information):** - authentik version: 2022.4.1 - Deployment: tested both Docker & K8S **Additional context** I repeated these steps multiple times and I keep getting the same issue. Therefore I think it is safe to assume that this is a bug introduced in the update from version 2022.3.3 to version 2022.4.1 </issue> <code> [start of authentik/policies/hibp/models.py] 1 """authentik HIBP Models""" 2 from hashlib import sha1 3 4 from django.db import models 5 from django.utils.translation import gettext as _ 6 from rest_framework.serializers import BaseSerializer 7 from structlog.stdlib import get_logger 8 9 from authentik.lib.utils.http import get_http_session 10 from authentik.policies.models import Policy, PolicyResult 11 from authentik.policies.types import PolicyRequest 12 13 LOGGER = get_logger() 14 15 16 class HaveIBeenPwendPolicy(Policy): 17 """Check if password is on HaveIBeenPwned's list by uploading the first 18 5 characters of the SHA1 Hash.""" 19 20 password_field = models.TextField( 21 default="password", 22 help_text=_("Field key to check, field keys defined in Prompt stages are available."), 23 ) 24 25 allowed_count = models.IntegerField(default=0) 26 27 @property 28 def serializer(self) -> BaseSerializer: 29 from authentik.policies.hibp.api import HaveIBeenPwendPolicySerializer 30 31 return HaveIBeenPwendPolicySerializer 32 33 @property 34 def component(self) -> str: 35 return "ak-policy-hibp-form" 36 37 def passes(self, request: PolicyRequest) -> PolicyResult: 38 """Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5 39 characters of Password in request and checks if full hash is in response. Returns 0 40 if Password is not in result otherwise the count of how many times it was used.""" 41 if self.password_field not in request.context: 42 LOGGER.warning( 43 "Password field not set in Policy Request", 44 field=self.password_field, 45 fields=request.context.keys(), 46 ) 47 return PolicyResult(False, _("Password not set in context")) 48 password = str(request.context[self.password_field]) 49 50 pw_hash = sha1(password.encode("utf-8")).hexdigest() # nosec 51 url = f"https://api.pwnedpasswords.com/range/{pw_hash[:5]}" 52 result = get_http_session().get(url).text 53 final_count = 0 54 for line in result.split("\r\n"): 55 full_hash, count = line.split(":") 56 if pw_hash[5:] == full_hash.lower(): 57 final_count = int(count) 58 LOGGER.debug("got hibp result", count=final_count, hash=pw_hash[:5]) 59 if final_count > self.allowed_count: 60 message = _("Password exists on %(count)d online lists." % {"count": final_count}) 61 return PolicyResult(False, message) 62 return PolicyResult(True) 63 64 class Meta: 65 66 verbose_name = _("Have I Been Pwned Policy") 67 verbose_name_plural = _("Have I Been Pwned Policies") 68 [end of authentik/policies/hibp/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/policies/hibp/models.py b/authentik/policies/hibp/models.py --- a/authentik/policies/hibp/models.py +++ b/authentik/policies/hibp/models.py @@ -9,6 +9,7 @@ from authentik.lib.utils.http import get_http_session from authentik.policies.models import Policy, PolicyResult from authentik.policies.types import PolicyRequest +from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT LOGGER = get_logger() @@ -38,14 +39,17 @@ """Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5 characters of Password in request and checks if full hash is in response. Returns 0 if Password is not in result otherwise the count of how many times it was used.""" - if self.password_field not in request.context: + password = request.context.get(PLAN_CONTEXT_PROMPT, {}).get( + self.password_field, request.context.get(self.password_field) + ) + if not password: LOGGER.warning( "Password field not set in Policy Request", field=self.password_field, fields=request.context.keys(), ) return PolicyResult(False, _("Password not set in context")) - password = str(request.context[self.password_field]) + password = str(password) pw_hash = sha1(password.encode("utf-8")).hexdigest() # nosec url = f"https://api.pwnedpasswords.com/range/{pw_hash[:5]}"
{"golden_diff": "diff --git a/authentik/policies/hibp/models.py b/authentik/policies/hibp/models.py\n--- a/authentik/policies/hibp/models.py\n+++ b/authentik/policies/hibp/models.py\n@@ -9,6 +9,7 @@\n from authentik.lib.utils.http import get_http_session\n from authentik.policies.models import Policy, PolicyResult\n from authentik.policies.types import PolicyRequest\n+from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n \n LOGGER = get_logger()\n \n@@ -38,14 +39,17 @@\n \"\"\"Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5\n characters of Password in request and checks if full hash is in response. Returns 0\n if Password is not in result otherwise the count of how many times it was used.\"\"\"\n- if self.password_field not in request.context:\n+ password = request.context.get(PLAN_CONTEXT_PROMPT, {}).get(\n+ self.password_field, request.context.get(self.password_field)\n+ )\n+ if not password:\n LOGGER.warning(\n \"Password field not set in Policy Request\",\n field=self.password_field,\n fields=request.context.keys(),\n )\n return PolicyResult(False, _(\"Password not set in context\"))\n- password = str(request.context[self.password_field])\n+ password = str(password)\n \n pw_hash = sha1(password.encode(\"utf-8\")).hexdigest() # nosec\n url = f\"https://api.pwnedpasswords.com/range/{pw_hash[:5]}\"\n", "issue": "Using 'Have-I-been-pwned' policy breaks flows in Authentik 2022.4.1\n**Describe the bug**\r\nUsing a 'Have-I-been-pwned' policy on a password prompt within a flow breaks the flow.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Use Authentik 2022.3.3\r\n2. Use all the default settings/flows, so a clean install\r\n3. Add a have-i-been-pwned policy to the default-password-change flow on the default-password-change-prompt stage.\r\n4. This stage binding has the following settings:\r\n- _Evaluate on plan: True_\r\n- _Re-evaluate policies: False_\r\n- _Invalid respones action: RETRY returns the error message and a similar challenge to the executor._\r\n- _Policy engine mode: ALL, all policies must match to include this stage access._\r\n5. Go to the Flow Overview and Execute flow with current user, see that the have-i-been pwned policy works correctly.\r\n6. Use Authentik 2022.4.1\r\n7. Repeat steps 2 - 5 described above\r\n8. See that you will receive an error message 'Password not set in context'.\r\n\r\n**Expected behavior**\r\nThe password should be checked, and the flow should not crash with the error 'Password not set in context'.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 2022.4.1\r\n - Deployment: tested both Docker & K8S\r\n\r\n**Additional context**\r\nI repeated these steps multiple times and I keep getting the same issue. Therefore I think it is safe to assume that this is a bug introduced in the update from version 2022.3.3 to version 2022.4.1\r\n\n", "before_files": [{"content": "\"\"\"authentik HIBP Models\"\"\"\nfrom hashlib import sha1\n\nfrom django.db import models\nfrom django.utils.translation import gettext as _\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.policies.models import Policy, PolicyResult\nfrom authentik.policies.types import PolicyRequest\n\nLOGGER = get_logger()\n\n\nclass HaveIBeenPwendPolicy(Policy):\n \"\"\"Check if password is on HaveIBeenPwned's list by uploading the first\n 5 characters of the SHA1 Hash.\"\"\"\n\n password_field = models.TextField(\n default=\"password\",\n help_text=_(\"Field key to check, field keys defined in Prompt stages are available.\"),\n )\n\n allowed_count = models.IntegerField(default=0)\n\n @property\n def serializer(self) -> BaseSerializer:\n from authentik.policies.hibp.api import HaveIBeenPwendPolicySerializer\n\n return HaveIBeenPwendPolicySerializer\n\n @property\n def component(self) -> str:\n return \"ak-policy-hibp-form\"\n\n def passes(self, request: PolicyRequest) -> PolicyResult:\n \"\"\"Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5\n characters of Password in request and checks if full hash is in response. Returns 0\n if Password is not in result otherwise the count of how many times it was used.\"\"\"\n if self.password_field not in request.context:\n LOGGER.warning(\n \"Password field not set in Policy Request\",\n field=self.password_field,\n fields=request.context.keys(),\n )\n return PolicyResult(False, _(\"Password not set in context\"))\n password = str(request.context[self.password_field])\n\n pw_hash = sha1(password.encode(\"utf-8\")).hexdigest() # nosec\n url = f\"https://api.pwnedpasswords.com/range/{pw_hash[:5]}\"\n result = get_http_session().get(url).text\n final_count = 0\n for line in result.split(\"\\r\\n\"):\n full_hash, count = line.split(\":\")\n if pw_hash[5:] == full_hash.lower():\n final_count = int(count)\n LOGGER.debug(\"got hibp result\", count=final_count, hash=pw_hash[:5])\n if final_count > self.allowed_count:\n message = _(\"Password exists on %(count)d online lists.\" % {\"count\": final_count})\n return PolicyResult(False, message)\n return PolicyResult(True)\n\n class Meta:\n\n verbose_name = _(\"Have I Been Pwned Policy\")\n verbose_name_plural = _(\"Have I Been Pwned Policies\")\n", "path": "authentik/policies/hibp/models.py"}]}
1,647
347
gh_patches_debug_19797
rasdani/github-patches
git_diff
DataDog__dd-trace-py-4220
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `ddtrace.opentracer` incorrectly raises `SpanContextCorruptedException` on `extract` of missing span context The documentation for `SpanContextCorruptedException` [says](https://opentracing-python.readthedocs.io/en/1.3.0/api.html#opentracing.SpanContextCorruptedException): > SpanContextCorruptedException should be used when the underlying span context state is seemingly present but not well-formed. `ddtrace.opentracer`'s `extract` is throwing an error whenever it fails to recover a span, whether or not it was malformed or simply missing. This completely breaks the normal pattern of "I received an HTTP request, so I'll throw the headers at `extract` and pass the result to `child_of` for my new span, expecting to get `None` and therefore make a new root span if I was called without tracing info". ### Which version of dd-trace-py are you using? Python 3.7 ddtrace 0.46.0 ### How can we reproduce your problem? ```py In [1]: from opentracing import Format In [2]: from ddtrace.opentracer import Tracer In [3]: tracer = Tracer() In [4]: tracer.extract(Format.HTTP_HEADERS, {}) --------------------------------------------------------------------------- SpanContextCorruptedException Traceback (most recent call last) <ipython-input-4-f497fe0c23a2> in <module> ----> 1 tracer.extract(Format.HTTP_HEADERS, {}) ~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/tracer.py in extract(self, format, carrier) 326 # we have to manually activate the returned context from a distributed 327 # trace --> 328 ot_span_ctx = propagator.extract(carrier) 329 dd_span_ctx = ot_span_ctx._dd_context 330 self._dd_tracer.context_provider.activate(dd_span_ctx) ~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/propagation/http.py in extract(self, carrier) 70 # if this occurs. 71 if not ddspan_ctx.trace_id: ---> 72 raise SpanContextCorruptedException("failed to extract span context") 73 74 baggage = {} SpanContextCorruptedException: failed to extract span context ``` ### What is the result that you expected? I expect to get a clean `None` with no error if no DataDog span context material was present. See Jaeger: ```py In [1]: from opentracing import Format In [2]: import jaeger_client In [3]: tracer = jaeger_client.Config({"service_name": "foo"}).initialize_tracer() In [4]: tracer.extract(Format.HTTP_HEADERS, {}) In [5]: print(tracer.extract(Format.HTTP_HEADERS, {})) None ``` </issue> <code> [start of ddtrace/opentracer/propagation/http.py] 1 from typing import Dict 2 3 from opentracing import InvalidCarrierException 4 from opentracing import SpanContextCorruptedException 5 6 from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator 7 8 from ...internal.logger import get_logger 9 from ..span_context import SpanContext 10 from .propagator import Propagator 11 12 13 log = get_logger(__name__) 14 15 HTTP_BAGGAGE_PREFIX = "ot-baggage-" 16 HTTP_BAGGAGE_PREFIX_LEN = len(HTTP_BAGGAGE_PREFIX) 17 18 19 class HTTPPropagator(Propagator): 20 """OpenTracing compatible HTTP_HEADER and TEXT_MAP format propagator. 21 22 `HTTPPropagator` provides compatibility by using existing OpenTracing 23 compatible methods from the ddtracer along with new logic supporting the 24 outstanding OpenTracing-defined functionality. 25 """ 26 27 @staticmethod 28 def inject(span_context, carrier): 29 # type: (SpanContext, Dict[str, str]) -> None 30 """Inject a span context into a carrier. 31 32 *span_context* is injected into the carrier by first using an 33 :class:`ddtrace.propagation.http.HTTPPropagator` to inject the ddtracer 34 specific fields. 35 36 Then the baggage is injected into *carrier*. 37 38 :param span_context: span context to inject. 39 40 :param carrier: carrier to inject into. 41 """ 42 if not isinstance(carrier, dict): 43 raise InvalidCarrierException("propagator expects carrier to be a dict") 44 45 DDHTTPPropagator.inject(span_context._dd_context, carrier) 46 47 # Add the baggage 48 if span_context.baggage is not None: 49 for key in span_context.baggage: 50 carrier[HTTP_BAGGAGE_PREFIX + key] = span_context.baggage[key] 51 52 @staticmethod 53 def extract(carrier): 54 # type: (Dict[str, str]) -> SpanContext 55 """Extract a span context from a carrier. 56 57 :class:`ddtrace.propagation.http.HTTPPropagator` is used to extract 58 ddtracer supported fields into a `ddtrace.Context` context which is 59 combined with new logic to extract the baggage which is returned in an 60 OpenTracing compatible span context. 61 62 :param carrier: carrier to extract from. 63 64 :return: extracted span context. 65 """ 66 if not isinstance(carrier, dict): 67 raise InvalidCarrierException("propagator expects carrier to be a dict") 68 69 ddspan_ctx = DDHTTPPropagator.extract(carrier) 70 71 # if the dd propagator fails then it will return a new empty span 72 # context (with trace_id=None), we however want to raise an exception 73 # if this occurs. 74 if not ddspan_ctx.trace_id: 75 raise SpanContextCorruptedException("failed to extract span context") 76 77 baggage = {} 78 for key in carrier: 79 if key.startswith(HTTP_BAGGAGE_PREFIX): 80 baggage[key[HTTP_BAGGAGE_PREFIX_LEN:]] = carrier[key] 81 82 return SpanContext(ddcontext=ddspan_ctx, baggage=baggage) 83 [end of ddtrace/opentracer/propagation/http.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/opentracer/propagation/http.py b/ddtrace/opentracer/propagation/http.py --- a/ddtrace/opentracer/propagation/http.py +++ b/ddtrace/opentracer/propagation/http.py @@ -1,7 +1,6 @@ from typing import Dict from opentracing import InvalidCarrierException -from opentracing import SpanContextCorruptedException from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator @@ -67,13 +66,6 @@ raise InvalidCarrierException("propagator expects carrier to be a dict") ddspan_ctx = DDHTTPPropagator.extract(carrier) - - # if the dd propagator fails then it will return a new empty span - # context (with trace_id=None), we however want to raise an exception - # if this occurs. - if not ddspan_ctx.trace_id: - raise SpanContextCorruptedException("failed to extract span context") - baggage = {} for key in carrier: if key.startswith(HTTP_BAGGAGE_PREFIX):
{"golden_diff": "diff --git a/ddtrace/opentracer/propagation/http.py b/ddtrace/opentracer/propagation/http.py\n--- a/ddtrace/opentracer/propagation/http.py\n+++ b/ddtrace/opentracer/propagation/http.py\n@@ -1,7 +1,6 @@\n from typing import Dict\n \n from opentracing import InvalidCarrierException\n-from opentracing import SpanContextCorruptedException\n \n from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator\n \n@@ -67,13 +66,6 @@\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n \n ddspan_ctx = DDHTTPPropagator.extract(carrier)\n-\n- # if the dd propagator fails then it will return a new empty span\n- # context (with trace_id=None), we however want to raise an exception\n- # if this occurs.\n- if not ddspan_ctx.trace_id:\n- raise SpanContextCorruptedException(\"failed to extract span context\")\n-\n baggage = {}\n for key in carrier:\n if key.startswith(HTTP_BAGGAGE_PREFIX):\n", "issue": "`ddtrace.opentracer` incorrectly raises `SpanContextCorruptedException` on `extract` of missing span context\nThe documentation for `SpanContextCorruptedException` [says](https://opentracing-python.readthedocs.io/en/1.3.0/api.html#opentracing.SpanContextCorruptedException):\r\n\r\n> SpanContextCorruptedException should be used when the underlying span context state is seemingly present but not well-formed.\r\n\r\n`ddtrace.opentracer`'s `extract` is throwing an error whenever it fails to recover a span, whether or not it was malformed or simply missing. This completely breaks the normal pattern of \"I received an HTTP request, so I'll throw the headers at `extract` and pass the result to `child_of` for my new span, expecting to get `None` and therefore make a new root span if I was called without tracing info\".\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\nPython 3.7\r\nddtrace 0.46.0\r\n\r\n### How can we reproduce your problem?\r\n\r\n```py\r\nIn [1]: from opentracing import Format\r\n\r\nIn [2]: from ddtrace.opentracer import Tracer\r\n\r\nIn [3]: tracer = Tracer()\r\n\r\nIn [4]: tracer.extract(Format.HTTP_HEADERS, {})\r\n---------------------------------------------------------------------------\r\nSpanContextCorruptedException Traceback (most recent call last)\r\n<ipython-input-4-f497fe0c23a2> in <module>\r\n----> 1 tracer.extract(Format.HTTP_HEADERS, {})\r\n\r\n~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/tracer.py in extract(self, format, carrier)\r\n 326 # we have to manually activate the returned context from a distributed\r\n 327 # trace\r\n--> 328 ot_span_ctx = propagator.extract(carrier)\r\n 329 dd_span_ctx = ot_span_ctx._dd_context\r\n 330 self._dd_tracer.context_provider.activate(dd_span_ctx)\r\n\r\n~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/propagation/http.py in extract(self, carrier)\r\n 70 # if this occurs.\r\n 71 if not ddspan_ctx.trace_id:\r\n---> 72 raise SpanContextCorruptedException(\"failed to extract span context\")\r\n 73 \r\n 74 baggage = {}\r\n\r\nSpanContextCorruptedException: failed to extract span context\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nI expect to get a clean `None` with no error if no DataDog span context material was present. See Jaeger:\r\n\r\n```py\r\nIn [1]: from opentracing import Format\r\n\r\nIn [2]: import jaeger_client\r\n\r\nIn [3]: tracer = jaeger_client.Config({\"service_name\": \"foo\"}).initialize_tracer()\r\n\r\nIn [4]: tracer.extract(Format.HTTP_HEADERS, {})\r\n\r\nIn [5]: print(tracer.extract(Format.HTTP_HEADERS, {}))\r\nNone\r\n```\r\n\n", "before_files": [{"content": "from typing import Dict\n\nfrom opentracing import InvalidCarrierException\nfrom opentracing import SpanContextCorruptedException\n\nfrom ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator\n\nfrom ...internal.logger import get_logger\nfrom ..span_context import SpanContext\nfrom .propagator import Propagator\n\n\nlog = get_logger(__name__)\n\nHTTP_BAGGAGE_PREFIX = \"ot-baggage-\"\nHTTP_BAGGAGE_PREFIX_LEN = len(HTTP_BAGGAGE_PREFIX)\n\n\nclass HTTPPropagator(Propagator):\n \"\"\"OpenTracing compatible HTTP_HEADER and TEXT_MAP format propagator.\n\n `HTTPPropagator` provides compatibility by using existing OpenTracing\n compatible methods from the ddtracer along with new logic supporting the\n outstanding OpenTracing-defined functionality.\n \"\"\"\n\n @staticmethod\n def inject(span_context, carrier):\n # type: (SpanContext, Dict[str, str]) -> None\n \"\"\"Inject a span context into a carrier.\n\n *span_context* is injected into the carrier by first using an\n :class:`ddtrace.propagation.http.HTTPPropagator` to inject the ddtracer\n specific fields.\n\n Then the baggage is injected into *carrier*.\n\n :param span_context: span context to inject.\n\n :param carrier: carrier to inject into.\n \"\"\"\n if not isinstance(carrier, dict):\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n\n DDHTTPPropagator.inject(span_context._dd_context, carrier)\n\n # Add the baggage\n if span_context.baggage is not None:\n for key in span_context.baggage:\n carrier[HTTP_BAGGAGE_PREFIX + key] = span_context.baggage[key]\n\n @staticmethod\n def extract(carrier):\n # type: (Dict[str, str]) -> SpanContext\n \"\"\"Extract a span context from a carrier.\n\n :class:`ddtrace.propagation.http.HTTPPropagator` is used to extract\n ddtracer supported fields into a `ddtrace.Context` context which is\n combined with new logic to extract the baggage which is returned in an\n OpenTracing compatible span context.\n\n :param carrier: carrier to extract from.\n\n :return: extracted span context.\n \"\"\"\n if not isinstance(carrier, dict):\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n\n ddspan_ctx = DDHTTPPropagator.extract(carrier)\n\n # if the dd propagator fails then it will return a new empty span\n # context (with trace_id=None), we however want to raise an exception\n # if this occurs.\n if not ddspan_ctx.trace_id:\n raise SpanContextCorruptedException(\"failed to extract span context\")\n\n baggage = {}\n for key in carrier:\n if key.startswith(HTTP_BAGGAGE_PREFIX):\n baggage[key[HTTP_BAGGAGE_PREFIX_LEN:]] = carrier[key]\n\n return SpanContext(ddcontext=ddspan_ctx, baggage=baggage)\n", "path": "ddtrace/opentracer/propagation/http.py"}]}
2,024
241
gh_patches_debug_15487
rasdani/github-patches
git_diff
mkdocs__mkdocs-3511
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Building MkDocs' documentation When I began working on #3493 I needed to be able to run the dev server with MkDocs' own documentation, which was more difficult than it should have been. First, let me say that I always work from a venv, and created a new one to start my work. In the past, one could simply do `pip install -r requirements/docs.txt` and then `mkdocs serve` and it worked. But I had to jump through a lot of hoops and learn new tools to get things working this time. To be clear, I am not suggesting that the old tools should be brought back. Nor am I suggesting that my preferred tools be used. However, I could find no documentation about how to proceed. Eventually, I did find documentation that hatch is used for tests, and looking at `pyproject.toml` I could see a config for a hatch `docs` env. But not having ever used that tool before, it took be multiple tries (and searches) to work out how to even use that env. But, I'm getting ahead of myself... After realizing that there were no requirements.txt files, I next looked for optional dependencies defined in `pyproject.toml`. I was hoping to maybe do `pip install .[docs]` (`.` rather than `markdown` because I was working from the working tree of the git repo). When I determined that that wasn't an option, I began looking into the hatch options. Finally in some random question in some forum I found an explanation of how to run the `shell` subcommand with an alternate env: `hatch -e docs shell`. And then I could finally run `mkdocs serve`. Except that I got an error about missing `po` files, which is weird because I am using English, which should work without any translations being defined. Finally, after generating `po` and `mo` files, I could run the dev server and begin my work. All of this led me to believe that the current maintainers are not ever running the dev server with MkDocs documentation. And I also could not find any automations for deploying the documentation, so I couldn't even use that as a reference. Again, I am not being critical of the tool choices. I see that a switch was made from tox to hatch. That's fine if that is the tool that the maintainers what to use. But for an occasional contributor, I would prefer to not need to learn these tools. I would prefer to be able to use standard Python tools that work with any project. Or if I do need to use your tool of choice, then I would expect the specific commands I would need to use to all be documented clearly. I could submit a PR which updated the documentation, but I'm not sure what the recommended best practices are here. I am simply bringing this to the attention of the maintainers with the hopes that more consideration will be given to this in the future. </issue> <code> [start of docs/hooks.py] 1 import re 2 from pathlib import Path 3 4 from mkdocs.config.defaults import MkDocsConfig 5 from mkdocs.structure.nav import Page 6 7 8 def _get_language_of_translation_file(path: Path) -> str: 9 with path.open(encoding='utf-8') as f: 10 translation_line = f.readline() 11 m = re.search('^# (.+) translations ', translation_line) 12 assert m 13 return m[1] 14 15 16 def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs): 17 if page.file.src_uri == 'user-guide/choosing-your-theme.md': 18 here = Path(config.config_file_path).parent 19 20 def replacement(m: re.Match) -> str: 21 lines = [] 22 for d in sorted(here.glob(m[2])): 23 lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po')) 24 lines.append(f'{m[1]}`{d.name}`: {lang}') 25 return '\n'.join(lines) 26 27 return re.sub( 28 r'^( *\* )\(see the list of existing directories `(.+)`\)$', 29 replacement, 30 markdown, 31 flags=re.MULTILINE, 32 ) 33 [end of docs/hooks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/hooks.py b/docs/hooks.py --- a/docs/hooks.py +++ b/docs/hooks.py @@ -1,8 +1,12 @@ +from __future__ import annotations + import re from pathlib import Path +from typing import TYPE_CHECKING -from mkdocs.config.defaults import MkDocsConfig -from mkdocs.structure.nav import Page +if TYPE_CHECKING: + from mkdocs.config.defaults import MkDocsConfig + from mkdocs.structure.nav import Page def _get_language_of_translation_file(path: Path) -> str: @@ -13,7 +17,7 @@ return m[1] -def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs): +def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None: if page.file.src_uri == 'user-guide/choosing-your-theme.md': here = Path(config.config_file_path).parent
{"golden_diff": "diff --git a/docs/hooks.py b/docs/hooks.py\n--- a/docs/hooks.py\n+++ b/docs/hooks.py\n@@ -1,8 +1,12 @@\n+from __future__ import annotations\n+\n import re\n from pathlib import Path\n+from typing import TYPE_CHECKING\n \n-from mkdocs.config.defaults import MkDocsConfig\n-from mkdocs.structure.nav import Page\n+if TYPE_CHECKING:\n+ from mkdocs.config.defaults import MkDocsConfig\n+ from mkdocs.structure.nav import Page\n \n \n def _get_language_of_translation_file(path: Path) -> str:\n@@ -13,7 +17,7 @@\n return m[1]\n \n \n-def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):\n+def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n", "issue": "Building MkDocs' documentation\nWhen I began working on #3493 I needed to be able to run the dev server with MkDocs' own documentation, which was more difficult than it should have been.\r\n\r\nFirst, let me say that I always work from a venv, and created a new one to start my work. In the past, one could simply do `pip install -r requirements/docs.txt` and then `mkdocs serve` and it worked. But I had to jump through a lot of hoops and learn new tools to get things working this time.\r\n\r\nTo be clear, I am not suggesting that the old tools should be brought back. Nor am I suggesting that my preferred tools be used. However, I could find no documentation about how to proceed. Eventually, I did find documentation that hatch is used for tests, and looking at `pyproject.toml` I could see a config for a hatch `docs` env. But not having ever used that tool before, it took be multiple tries (and searches) to work out how to even use that env. But, I'm getting ahead of myself...\r\n\r\nAfter realizing that there were no requirements.txt files, I next looked for optional dependencies defined in `pyproject.toml`. I was hoping to maybe do `pip install .[docs]` (`.` rather than `markdown` because I was working from the working tree of the git repo). When I determined that that wasn't an option, I began looking into the hatch options. Finally in some random question in some forum I found an explanation of how to run the `shell` subcommand with an alternate env: `hatch -e docs shell`.\r\n\r\nAnd then I could finally run `mkdocs serve`. Except that I got an error about missing `po` files, which is weird because I am using English, which should work without any translations being defined. Finally, after generating `po` and `mo` files, I could run the dev server and begin my work.\r\n\r\nAll of this led me to believe that the current maintainers are not ever running the dev server with MkDocs documentation. And I also could not find any automations for deploying the documentation, so I couldn't even use that as a reference.\r\n\r\nAgain, I am not being critical of the tool choices. I see that a switch was made from tox to hatch. That's fine if that is the tool that the maintainers what to use. But for an occasional contributor, I would prefer to not need to learn these tools. I would prefer to be able to use standard Python tools that work with any project. Or if I do need to use your tool of choice, then I would expect the specific commands I would need to use to all be documented clearly.\r\n\r\nI could submit a PR which updated the documentation, but I'm not sure what the recommended best practices are here. I am simply bringing this to the attention of the maintainers with the hopes that more consideration will be given to this in the future.\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom mkdocs.config.defaults import MkDocsConfig\nfrom mkdocs.structure.nav import Page\n\n\ndef _get_language_of_translation_file(path: Path) -> str:\n with path.open(encoding='utf-8') as f:\n translation_line = f.readline()\n m = re.search('^# (.+) translations ', translation_line)\n assert m\n return m[1]\n\n\ndef on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n\n def replacement(m: re.Match) -> str:\n lines = []\n for d in sorted(here.glob(m[2])):\n lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))\n lines.append(f'{m[1]}`{d.name}`: {lang}')\n return '\\n'.join(lines)\n\n return re.sub(\n r'^( *\\* )\\(see the list of existing directories `(.+)`\\)$',\n replacement,\n markdown,\n flags=re.MULTILINE,\n )\n", "path": "docs/hooks.py"}]}
1,454
221
gh_patches_debug_38652
rasdani/github-patches
git_diff
sagemath__sage-37422
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> make sage.parallel.ncpus.ncpus() use os.cpu_count() <div id="comment:0"></div> Currently, `sage.parallel.ncpus.ncpus()` uses platform-specific code to determine the number of available CPUs for some specific systems. This functionality is now available in the standard `os` module as `cpu_count()`. Component: **misc** Author: **Lorenz Panny** Branch/Commit: **[public/change_ncpus_to_os_module](https://github.com/sagemath/sagetrac-mirror/tree/public/change_ncpus_to_os_module) @ [`a509210`](https://github.com/sagemath/sagetrac-mirror/commit/a509210125fc50baf72dcb7f2248e96cddf61c8f)** _Issue created by migration from https://trac.sagemath.org/ticket/34328_ </issue> <code> [start of src/sage/parallel/ncpus.py] 1 """ 2 CPU Detection 3 """ 4 # Parallel Python Software: http://www.parallelpython.com 5 # Copyright (c) 2005-2008, Vitalii Vanovschi 6 # All rights reserved. 7 # Redistribution and use in source and binary forms, with or without 8 # modification, are permitted provided that the following conditions are met: 9 # * Redistributions of source code must retain the above copyright notice, 10 # this list of conditions and the following disclaimer. 11 # * Redistributions in binary form must reproduce the above copyright 12 # notice, this list of conditions and the following disclaimer in the 13 # documentation and/or other materials provided with the distribution. 14 # * Neither the name of the author nor the names of its contributors 15 # may be used to endorse or promote products derived from this software 16 # without specific prior written permission. 17 # 18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 # THE POSSIBILITY OF SUCH DAMAGE. 29 30 ###### 31 # This is from ParallelPython (the pp.py file). 32 33 import os 34 import subprocess 35 36 37 def ncpus(): 38 """ 39 Detects the number of effective CPUs in the system. 40 41 EXAMPLES:: 42 43 sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine. 44 2 45 """ 46 # Support Sage environment variable SAGE_NUM_THREADS 47 # NOTE: while doctesting, this is forced to be 2 by the 48 # sage-runtests script 49 try: 50 n = os.environ["SAGE_NUM_THREADS"] 51 except KeyError: 52 pass 53 else: 54 return int(n) 55 56 # for Linux, Unix and MacOS 57 if hasattr(os, "sysconf"): 58 if "SC_NPROCESSORS_ONLN" in os.sysconf_names: 59 # Linux and Unix 60 ncpus = os.sysconf("SC_NPROCESSORS_ONLN") 61 if isinstance(ncpus, int) and ncpus > 0: 62 return ncpus 63 else: 64 # MacOS X 65 # deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) 66 process = subprocess.Popen("sysctl -n hw.ncpu", shell=True, 67 stdin=subprocess.PIPE, 68 stdout=subprocess.PIPE, 69 stderr=subprocess.PIPE, close_fds=True) 70 return int(process.stdout.read()) 71 # for Windows 72 if "NUMBER_OF_PROCESSORS" in os.environ: 73 ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) 74 if ncpus > 0: 75 return ncpus 76 # return the default value 77 return 1 78 [end of src/sage/parallel/ncpus.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sage/parallel/ncpus.py b/src/sage/parallel/ncpus.py --- a/src/sage/parallel/ncpus.py +++ b/src/sage/parallel/ncpus.py @@ -1,46 +1,19 @@ """ CPU Detection """ -# Parallel Python Software: http://www.parallelpython.com -# Copyright (c) 2005-2008, Vitalii Vanovschi -# All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the author nor the names of its contributors -# may be used to endorse or promote products derived from this software -# without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -# THE POSSIBILITY OF SUCH DAMAGE. - -###### -# This is from ParallelPython (the pp.py file). import os -import subprocess def ncpus(): """ - Detects the number of effective CPUs in the system. + Return the number of available CPUs in the system. + + ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count` EXAMPLES:: - sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine. + sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine 2 """ # Support Sage environment variable SAGE_NUM_THREADS @@ -53,25 +26,9 @@ else: return int(n) - # for Linux, Unix and MacOS - if hasattr(os, "sysconf"): - if "SC_NPROCESSORS_ONLN" in os.sysconf_names: - # Linux and Unix - ncpus = os.sysconf("SC_NPROCESSORS_ONLN") - if isinstance(ncpus, int) and ncpus > 0: - return ncpus - else: - # MacOS X - # deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) - process = subprocess.Popen("sysctl -n hw.ncpu", shell=True, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, close_fds=True) - return int(process.stdout.read()) - # for Windows - if "NUMBER_OF_PROCESSORS" in os.environ: - ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) - if ncpus > 0: - return ncpus - # return the default value - return 1 + n = None + + if hasattr(os, 'sched_getaffinity'): + n = len(os.sched_getaffinity(0)) + + return n or os.cpu_count() or 1
{"golden_diff": "diff --git a/src/sage/parallel/ncpus.py b/src/sage/parallel/ncpus.py\n--- a/src/sage/parallel/ncpus.py\n+++ b/src/sage/parallel/ncpus.py\n@@ -1,46 +1,19 @@\n \"\"\"\n CPU Detection\n \"\"\"\n-# Parallel Python Software: http://www.parallelpython.com\n-# Copyright (c) 2005-2008, Vitalii Vanovschi\n-# All rights reserved.\n-# Redistribution and use in source and binary forms, with or without\n-# modification, are permitted provided that the following conditions are met:\n-# * Redistributions of source code must retain the above copyright notice,\n-# this list of conditions and the following disclaimer.\n-# * Redistributions in binary form must reproduce the above copyright\n-# notice, this list of conditions and the following disclaimer in the\n-# documentation and/or other materials provided with the distribution.\n-# * Neither the name of the author nor the names of its contributors\n-# may be used to endorse or promote products derived from this software\n-# without specific prior written permission.\n-#\n-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n-# THE POSSIBILITY OF SUCH DAMAGE.\n-\n-######\n-# This is from ParallelPython (the pp.py file).\n \n import os\n-import subprocess\n \n \n def ncpus():\n \"\"\"\n- Detects the number of effective CPUs in the system.\n+ Return the number of available CPUs in the system.\n+\n+ ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`\n \n EXAMPLES::\n \n- sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.\n+ sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n@@ -53,25 +26,9 @@\n else:\n return int(n)\n \n- # for Linux, Unix and MacOS\n- if hasattr(os, \"sysconf\"):\n- if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n- # Linux and Unix\n- ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n- if isinstance(ncpus, int) and ncpus > 0:\n- return ncpus\n- else:\n- # MacOS X\n- # deprecated: return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n- process = subprocess.Popen(\"sysctl -n hw.ncpu\", shell=True,\n- stdin=subprocess.PIPE,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.PIPE, close_fds=True)\n- return int(process.stdout.read())\n- # for Windows\n- if \"NUMBER_OF_PROCESSORS\" in os.environ:\n- ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n- if ncpus > 0:\n- return ncpus\n- # return the default value\n- return 1\n+ n = None\n+\n+ if hasattr(os, 'sched_getaffinity'):\n+ n = len(os.sched_getaffinity(0))\n+\n+ return n or os.cpu_count() or 1\n", "issue": "make sage.parallel.ncpus.ncpus() use os.cpu_count()\n<div id=\"comment:0\"></div>\n\nCurrently, `sage.parallel.ncpus.ncpus()` uses platform-specific code to determine the number of available CPUs for some specific systems. This functionality is now available in the standard `os` module as `cpu_count()`.\n\nComponent: **misc**\n\nAuthor: **Lorenz Panny**\n\nBranch/Commit: **[public/change_ncpus_to_os_module](https://github.com/sagemath/sagetrac-mirror/tree/public/change_ncpus_to_os_module) @ [`a509210`](https://github.com/sagemath/sagetrac-mirror/commit/a509210125fc50baf72dcb7f2248e96cddf61c8f)**\n\n_Issue created by migration from https://trac.sagemath.org/ticket/34328_\n\n\n", "before_files": [{"content": "\"\"\"\nCPU Detection\n\"\"\"\n# Parallel Python Software: http://www.parallelpython.com\n# Copyright (c) 2005-2008, Vitalii Vanovschi\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n\n######\n# This is from ParallelPython (the pp.py file).\n\nimport os\nimport subprocess\n\n\ndef ncpus():\n \"\"\"\n Detects the number of effective CPUs in the system.\n\n EXAMPLES::\n\n sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n # NOTE: while doctesting, this is forced to be 2 by the\n # sage-runtests script\n try:\n n = os.environ[\"SAGE_NUM_THREADS\"]\n except KeyError:\n pass\n else:\n return int(n)\n\n # for Linux, Unix and MacOS\n if hasattr(os, \"sysconf\"):\n if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n # Linux and Unix\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n return ncpus\n else:\n # MacOS X\n # deprecated: return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n process = subprocess.Popen(\"sysctl -n hw.ncpu\", shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n return int(process.stdout.read())\n # for Windows\n if \"NUMBER_OF_PROCESSORS\" in os.environ:\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n if ncpus > 0:\n return ncpus\n # return the default value\n return 1\n", "path": "src/sage/parallel/ncpus.py"}]}
1,583
868
gh_patches_debug_38751
rasdani/github-patches
git_diff
kserve__kserve-116
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python Model download for GCS and S3 Downloading from GCS and S3 needs to be completed. https://github.com/kubeflow/kfserving/blob/2f8d33d1a9773c5694a22ba749192163251fe287/python/kfserving/kfserving/storage.py#L27-L33 </issue> <code> [start of python/kfserving/kfserving/storage.py] 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 import tempfile 17 import os 18 19 _GCS_PREFIX = "gs://" 20 _S3_PREFIX = "s3://" 21 _LOCAL_PREFIX = "file://" 22 23 24 class Storage(object): 25 @staticmethod 26 def download(uri: str) -> str: 27 logging.info("Copying contents of %s to local" % uri) 28 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri): 29 return Storage._download_local(uri) 30 31 temp_dir = tempfile.mkdtemp() 32 if uri.startswith(_GCS_PREFIX): 33 Storage._download_gcs(uri, temp_dir) 34 elif uri.startswith(_S3_PREFIX): 35 Storage._download_s3(uri, temp_dir) 36 else: 37 raise Exception("Cannot recognize storage type for " + uri + 38 "\n'%s', '%s', and '%s' are the current available storage type." % 39 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX)) 40 41 logging.info("Successfully copied %s to %s" % (uri, temp_dir)) 42 return temp_dir 43 44 @staticmethod 45 def _download_s3(uri, temp_dir: str): 46 raise NotImplementedError 47 48 @staticmethod 49 def _download_gcs(uri, temp_dir: str): 50 raise NotImplementedError 51 52 @staticmethod 53 def _download_local(uri): 54 local_path = uri.replace(_LOCAL_PREFIX, "", 1) 55 if not os.path.exists(local_path): 56 raise Exception("Local path %s does not exist." % (uri)) 57 return local_path 58 [end of python/kfserving/kfserving/storage.py] [start of python/kfserving/setup.py] 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import setup, find_packages 16 17 tests_require = [ 18 'pytest', 19 'pytest-tornasync', 20 'mypy' 21 ] 22 23 setup( 24 name='kfserver', 25 version='0.1.0', 26 author_email='[email protected]', 27 license='../../LICENSE.txt', 28 url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving', 29 description='Model Server for arbitrary python ML frameworks.', 30 long_description=open('README.md').read(), 31 python_requires='>3.4', 32 packages=find_packages("kfserving"), 33 install_requires=[ 34 "tornado >= 1.4.1", 35 "argparse >= 1.4.0", 36 "numpy" 37 ], 38 tests_require=tests_require, 39 extras_require={'test': tests_require} 40 ) 41 [end of python/kfserving/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py --- a/python/kfserving/kfserving/storage.py +++ b/python/kfserving/kfserving/storage.py @@ -15,6 +15,10 @@ import logging import tempfile import os +import re +from minio import Minio +from google.cloud import storage +from google.auth import exceptions _GCS_PREFIX = "gs://" _S3_PREFIX = "s3://" @@ -43,11 +47,36 @@ @staticmethod def _download_s3(uri, temp_dir: str): - raise NotImplementedError + client = Storage._create_minio_client() + bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1) + bucket_name = bucket_args[0] + bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" + objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True) + for obj in objects: + # Replace any prefix from the object key with temp_dir + subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/") + client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key)) @staticmethod def _download_gcs(uri, temp_dir: str): - raise NotImplementedError + try: + storage_client = storage.Client() + except exceptions.DefaultCredentialsError as e: + storage_client = storage.Client.create_anonymous_client() + bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1) + bucket_name = bucket_args[0] + bucket_path = bucket_args[1] if len(bucket_args) > 1 else "" + bucket = storage_client.bucket(bucket_name) + blobs = bucket.list_blobs(prefix=bucket_path) + for blob in blobs: + # Replace any prefix from the object key with temp_dir + subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/") + # Create necessary subdirectory to store the object locally + if "/" in subdir_object_key: + local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0]) + if not os.path.isdir(local_object_dir): + os.makedirs(local_object_dir, exist_ok=True) + blob.download_to_filename(os.path.join(temp_dir, subdir_object_key)) @staticmethod def _download_local(uri): @@ -55,3 +84,13 @@ if not os.path.exists(local_path): raise Exception("Local path %s does not exist." % (uri)) return local_path + + @staticmethod + def _create_minio_client(): + # Remove possible http scheme for Minio + url = re.compile(r"https?://") + minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")), + access_key=os.getenv("AWS_ACCESS_KEY_ID", ""), + secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""), + secure=True) + return minioClient diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py --- a/python/kfserving/setup.py +++ b/python/kfserving/setup.py @@ -33,6 +33,8 @@ install_requires=[ "tornado >= 1.4.1", "argparse >= 1.4.0", + "minio >= 4.0.9", + "google-cloud-storage >= 1.16.0", "numpy" ], tests_require=tests_require,
{"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -15,6 +15,10 @@\n import logging\n import tempfile\n import os\n+import re\n+from minio import Minio\n+from google.cloud import storage\n+from google.auth import exceptions\n \n _GCS_PREFIX = \"gs://\"\n _S3_PREFIX = \"s3://\"\n@@ -43,11 +47,36 @@\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n- raise NotImplementedError\n+ client = Storage._create_minio_client()\n+ bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n+ bucket_name = bucket_args[0]\n+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n+ objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n+ for obj in objects:\n+ # Replace any prefix from the object key with temp_dir\n+ subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n+ client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))\n \n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n- raise NotImplementedError\n+ try:\n+ storage_client = storage.Client()\n+ except exceptions.DefaultCredentialsError as e:\n+ storage_client = storage.Client.create_anonymous_client()\n+ bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n+ bucket_name = bucket_args[0]\n+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n+ bucket = storage_client.bucket(bucket_name)\n+ blobs = bucket.list_blobs(prefix=bucket_path)\n+ for blob in blobs:\n+ # Replace any prefix from the object key with temp_dir\n+ subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n+ # Create necessary subdirectory to store the object locally\n+ if \"/\" in subdir_object_key:\n+ local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n+ if not os.path.isdir(local_object_dir):\n+ os.makedirs(local_object_dir, exist_ok=True)\n+ blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n \n @staticmethod\n def _download_local(uri):\n@@ -55,3 +84,13 @@\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n+\n+ @staticmethod\n+ def _create_minio_client():\n+ # Remove possible http scheme for Minio\n+ url = re.compile(r\"https?://\")\n+ minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n+ access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n+ secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n+ secure=True)\n+ return minioClient\ndiff --git a/python/kfserving/setup.py b/python/kfserving/setup.py\n--- a/python/kfserving/setup.py\n+++ b/python/kfserving/setup.py\n@@ -33,6 +33,8 @@\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n+ \"minio >= 4.0.9\",\n+ \"google-cloud-storage >= 1.16.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n", "issue": "Python Model download for GCS and S3\nDownloading from GCS and S3 needs to be completed.\r\n\r\nhttps://github.com/kubeflow/kfserving/blob/2f8d33d1a9773c5694a22ba749192163251fe287/python/kfserving/kfserving/storage.py#L27-L33\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object):\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\" % uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\" % (uri, temp_dir))\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n raise NotImplementedError\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n raise NotImplementedError\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n", "path": "python/kfserving/kfserving/storage.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='kfserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',\n description='Model Server for arbitrary python ML frameworks.',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"kfserving\"),\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/kfserving/setup.py"}]}
1,601
817
gh_patches_debug_17010
rasdani/github-patches
git_diff
mlflow__mlflow-4880
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Alembic migration for metrics table uses incorrect server default ## MLflow Roadmap Item This is an MLflow Roadmap item that has been prioritized by the MLflow maintainers. We're seeking help with the implementation of roadmap items tagged with the `help wanted` label. For requirements clarifications and implementation questions, or to request a PR review, please tag @harupy in your communications related to this issue. ### System information - **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: miniconda container - debian buster - **MLflow installed from (source or binary)**: mflow from pypi - **MLflow version (run ``mlflow --version``)**: mlflow 1.14.1 trying to upgrade to 1.16 or 1.17 - **Python version**: Python 3.9.2 - **npm version, if running the dev UI**: NA - **Exact command to reproduce**: mlflow db upgrade <MSSQL connection string > - Tracking server DB: Azure Microsoft SQL DB ### Describe the problem When I upgrade the database from 1.14.1 to a higher version I get an error. Currently use an Azure MSFT DB. Would like to upgrade to 1.16 or 1.17 ### Code to reproduce issue mlflow db upgrade "mssql+pyodbc://_rest_of_conn_string" ### Other info / logs sqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Column already has a DEFAULT bound to it. (1781) (SQLExecDirectW)') [SQL: ALTER TABLE metrics ADD DEFAULT '0' FOR is_nan] (Background on this error at: http://sqlalche.me/e/14/f405) ### What component(s), interfaces, languages, and integrations does this bug affect? Components - [ ] `area/artifacts`: Artifact stores and artifact logging - [ ] `area/build`: Build and test infrastructure for MLflow - [ ] `area/docs`: MLflow documentation pages - [ ] `area/examples`: Example code - [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry - [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors - [ ] `area/projects`: MLproject format, project running backends - [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs - [ ] `area/server-infra`: MLflow server, JavaScript dev server - [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging Interface - [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting - [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models - [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry - [ ] `area/windows`: Windows support Language - [ ] `language/r`: R APIs and clients - [ ] `language/java`: Java APIs and clients - [ ] `language/new`: Proposals for new client languages Integrations - [ ] `integrations/azure`: Azure and Azure ML integrations - [ ] `integrations/sagemaker`: SageMaker integrations - [ ] `integrations/databricks`: Databricks integrations </issue> <code> [start of mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py] 1 """reset_default_value_for_is_nan_in_metrics_table_for_mysql 2 3 Revision ID: c48cb773bb87 4 Revises: 39d1c3be5f05 5 Create Date: 2021-04-02 15:43:28.466043 6 7 """ 8 from alembic import op 9 import sqlalchemy as sa 10 11 12 # revision identifiers, used by Alembic. 13 revision = "c48cb773bb87" 14 down_revision = "39d1c3be5f05" 15 branch_labels = None 16 depends_on = None 17 18 19 def upgrade(): 20 # This part of the migration is only relevant for MySQL. 21 # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py 22 # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics` 23 # table without specifying `existing_server_default`. This alters the column default value to 24 # NULL in MySQL (see the doc below). 25 # 26 # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column 27 # 28 # To revert this change, set the default column value to "0" by specifying `server_default` 29 with op.batch_alter_table("metrics") as batch_op: 30 batch_op.alter_column( 31 "is_nan", 32 type_=sa.types.Boolean(create_constraint=True), 33 nullable=False, 34 server_default="0", 35 ) 36 37 38 def downgrade(): 39 pass 40 [end of mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py --- a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py +++ b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py @@ -26,13 +26,15 @@ # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column # # To revert this change, set the default column value to "0" by specifying `server_default` - with op.batch_alter_table("metrics") as batch_op: - batch_op.alter_column( - "is_nan", - type_=sa.types.Boolean(create_constraint=True), - nullable=False, - server_default="0", - ) + bind = op.get_bind() + if bind.engine.name == "mysql": + with op.batch_alter_table("metrics") as batch_op: + batch_op.alter_column( + "is_nan", + type_=sa.types.Boolean(create_constraint=True), + nullable=False, + server_default="0", + ) def downgrade():
{"golden_diff": "diff --git a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n--- a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n+++ b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n@@ -26,13 +26,15 @@\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n- with op.batch_alter_table(\"metrics\") as batch_op:\n- batch_op.alter_column(\n- \"is_nan\",\n- type_=sa.types.Boolean(create_constraint=True),\n- nullable=False,\n- server_default=\"0\",\n- )\n+ bind = op.get_bind()\n+ if bind.engine.name == \"mysql\":\n+ with op.batch_alter_table(\"metrics\") as batch_op:\n+ batch_op.alter_column(\n+ \"is_nan\",\n+ type_=sa.types.Boolean(create_constraint=True),\n+ nullable=False,\n+ server_default=\"0\",\n+ )\n \n \n def downgrade():\n", "issue": "[BUG] Alembic migration for metrics table uses incorrect server default\n## MLflow Roadmap Item\r\n\r\nThis is an MLflow Roadmap item that has been prioritized by the MLflow maintainers. We're seeking help with the implementation of roadmap items tagged with the `help wanted` label.\r\n\r\nFor requirements clarifications and implementation questions, or to request a PR review, please tag @harupy in your communications related to this issue.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: miniconda container - debian buster\r\n- **MLflow installed from (source or binary)**: mflow from pypi\r\n- **MLflow version (run ``mlflow --version``)**: mlflow 1.14.1 trying to upgrade to 1.16 or 1.17\r\n- **Python version**: Python 3.9.2\r\n- **npm version, if running the dev UI**: NA\r\n- **Exact command to reproduce**: mlflow db upgrade <MSSQL connection string >\r\n- Tracking server DB: Azure Microsoft SQL DB\r\n\r\n### Describe the problem\r\nWhen I upgrade the database from 1.14.1 to a higher version I get an error. Currently use an Azure MSFT DB. Would like to upgrade to 1.16 or 1.17\r\n\r\n### Code to reproduce issue\r\nmlflow db upgrade \"mssql+pyodbc://_rest_of_conn_string\"\r\n\r\n### Other info / logs\r\nsqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Column already has a DEFAULT bound to it. (1781) (SQLExecDirectW)')\r\n[SQL: ALTER TABLE metrics ADD DEFAULT '0' FOR is_nan]\r\n(Background on this error at: http://sqlalche.me/e/14/f405)\r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "\"\"\"reset_default_value_for_is_nan_in_metrics_table_for_mysql\n\nRevision ID: c48cb773bb87\nRevises: 39d1c3be5f05\nCreate Date: 2021-04-02 15:43:28.466043\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c48cb773bb87\"\ndown_revision = \"39d1c3be5f05\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # This part of the migration is only relevant for MySQL.\n # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py\n # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`\n # table without specifying `existing_server_default`. This alters the column default value to\n # NULL in MySQL (see the doc below).\n #\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n with op.batch_alter_table(\"metrics\") as batch_op:\n batch_op.alter_column(\n \"is_nan\",\n type_=sa.types.Boolean(create_constraint=True),\n nullable=False,\n server_default=\"0\",\n )\n\n\ndef downgrade():\n pass\n", "path": "mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py"}]}
1,815
329
gh_patches_debug_24334
rasdani/github-patches
git_diff
freedomofpress__securedrop-3688
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [reply refactor] Allow journalists to download replies from journalist interface After #3673 is implemented, we should allow journalists to download replies from the journalist interface UI. Note that for long-running SecureDrop instances, there will be old replies encrypted only to the source key that should be unavailable for download. Epic: #3097 </issue> <code> [start of securedrop/journalist_app/col.py] 1 # -*- coding: utf-8 -*- 2 3 from flask import (Blueprint, redirect, url_for, render_template, flash, 4 request, abort, send_file, current_app) 5 from flask_babel import gettext 6 from sqlalchemy.orm.exc import NoResultFound 7 8 from db import db 9 from models import Submission 10 from journalist_app.forms import ReplyForm 11 from journalist_app.utils import (make_star_true, make_star_false, get_source, 12 delete_collection, col_download_unread, 13 col_download_all, col_star, col_un_star, 14 col_delete) 15 16 17 def make_blueprint(config): 18 view = Blueprint('col', __name__) 19 20 @view.route('/add_star/<filesystem_id>', methods=('POST',)) 21 def add_star(filesystem_id): 22 make_star_true(filesystem_id) 23 db.session.commit() 24 return redirect(url_for('main.index')) 25 26 @view.route("/remove_star/<filesystem_id>", methods=('POST',)) 27 def remove_star(filesystem_id): 28 make_star_false(filesystem_id) 29 db.session.commit() 30 return redirect(url_for('main.index')) 31 32 @view.route('/<filesystem_id>') 33 def col(filesystem_id): 34 form = ReplyForm() 35 source = get_source(filesystem_id) 36 source.has_key = current_app.crypto_util.getkey(filesystem_id) 37 return render_template("col.html", filesystem_id=filesystem_id, 38 source=source, form=form) 39 40 @view.route('/delete/<filesystem_id>', methods=('POST',)) 41 def delete_single(filesystem_id): 42 """deleting a single collection from its /col page""" 43 source = get_source(filesystem_id) 44 delete_collection(filesystem_id) 45 flash(gettext("{source_name}'s collection deleted") 46 .format(source_name=source.journalist_designation), 47 "notification") 48 return redirect(url_for('main.index')) 49 50 @view.route('/process', methods=('POST',)) 51 def process(): 52 actions = {'download-unread': col_download_unread, 53 'download-all': col_download_all, 'star': col_star, 54 'un-star': col_un_star, 'delete': col_delete} 55 if 'cols_selected' not in request.form: 56 flash(gettext('No collections selected.'), 'error') 57 return redirect(url_for('main.index')) 58 59 # getlist is cgi.FieldStorage.getlist 60 cols_selected = request.form.getlist('cols_selected') 61 action = request.form['action'] 62 63 if action not in actions: 64 return abort(500) 65 66 method = actions[action] 67 return method(cols_selected) 68 69 @view.route('/<filesystem_id>/<fn>') 70 def download_single_submission(filesystem_id, fn): 71 """Sends a client the contents of a single submission.""" 72 if '..' in fn or fn.startswith('/'): 73 abort(404) 74 75 try: 76 Submission.query.filter( 77 Submission.filename == fn).one().downloaded = True 78 db.session.commit() 79 except NoResultFound as e: 80 current_app.logger.error( 81 "Could not mark " + fn + " as downloaded: %s" % (e,)) 82 83 return send_file(current_app.storage.path(filesystem_id, fn), 84 mimetype="application/pgp-encrypted") 85 86 return view 87 [end of securedrop/journalist_app/col.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py --- a/securedrop/journalist_app/col.py +++ b/securedrop/journalist_app/col.py @@ -67,18 +67,21 @@ return method(cols_selected) @view.route('/<filesystem_id>/<fn>') - def download_single_submission(filesystem_id, fn): - """Sends a client the contents of a single submission.""" + def download_single_file(filesystem_id, fn): + """Sends a client the contents of a single file, either a submission + or a journalist reply""" if '..' in fn or fn.startswith('/'): abort(404) - try: - Submission.query.filter( - Submission.filename == fn).one().downloaded = True - db.session.commit() - except NoResultFound as e: - current_app.logger.error( - "Could not mark " + fn + " as downloaded: %s" % (e,)) + # only mark as read when it's a submission (and not a journalist reply) + if not fn.endswith('reply.gpg'): + try: + Submission.query.filter( + Submission.filename == fn).one().downloaded = True + db.session.commit() + except NoResultFound as e: + current_app.logger.error( + "Could not mark " + fn + " as downloaded: %s" % (e,)) return send_file(current_app.storage.path(filesystem_id, fn), mimetype="application/pgp-encrypted")
{"golden_diff": "diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py\n--- a/securedrop/journalist_app/col.py\n+++ b/securedrop/journalist_app/col.py\n@@ -67,18 +67,21 @@\n return method(cols_selected)\n \n @view.route('/<filesystem_id>/<fn>')\n- def download_single_submission(filesystem_id, fn):\n- \"\"\"Sends a client the contents of a single submission.\"\"\"\n+ def download_single_file(filesystem_id, fn):\n+ \"\"\"Sends a client the contents of a single file, either a submission\n+ or a journalist reply\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n \n- try:\n- Submission.query.filter(\n- Submission.filename == fn).one().downloaded = True\n- db.session.commit()\n- except NoResultFound as e:\n- current_app.logger.error(\n- \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n+ # only mark as read when it's a submission (and not a journalist reply)\n+ if not fn.endswith('reply.gpg'):\n+ try:\n+ Submission.query.filter(\n+ Submission.filename == fn).one().downloaded = True\n+ db.session.commit()\n+ except NoResultFound as e:\n+ current_app.logger.error(\n+ \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n \n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n", "issue": "[reply refactor] Allow journalists to download replies from journalist interface\nAfter #3673 is implemented, we should allow journalists to download replies from the journalist interface UI. Note that for long-running SecureDrop instances, there will be old replies encrypted only to the source key that should be unavailable for download.\r\n\r\nEpic: #3097\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, redirect, url_for, render_template, flash,\n request, abort, send_file, current_app)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete)\n\n\ndef make_blueprint(config):\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id):\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id):\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id):\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.getkey(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n delete_collection(filesystem_id)\n flash(gettext(\"{source_name}'s collection deleted\")\n .format(source_name=source.journalist_designation),\n \"notification\")\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process():\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete}\n if 'cols_selected' not in request.form:\n flash(gettext('No collections selected.'), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_submission(filesystem_id, fn):\n \"\"\"Sends a client the contents of a single submission.\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n try:\n Submission.query.filter(\n Submission.filename == fn).one().downloaded = True\n db.session.commit()\n except NoResultFound as e:\n current_app.logger.error(\n \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}]}
1,462
355
gh_patches_debug_42292
rasdani/github-patches
git_diff
azavea__raster-vision-178
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add metrics for counting to eval.py We should add a new metric to the detection evaluation script so that it computes how close the counts are compared to ground truth. </issue> <code> [start of src/rv/detection/commands/eval_predictions.py] 1 import json 2 from os.path import join 3 4 import numpy as np 5 import rasterio 6 import click 7 8 from object_detection.utils import object_detection_evaluation, label_map_util 9 10 from rv.utils import ( 11 download_if_needed, make_empty_dir, get_local_path, upload_if_needed, 12 get_boxes_from_geojson, download_and_build_vrt) 13 from rv.detection.commands.settings import max_num_classes, temp_root_dir 14 15 16 def get_eval_result(ground_truth_path, predictions_path, image_dataset): 17 gt_boxes, gt_classes, _ = \ 18 get_boxes_from_geojson(ground_truth_path, image_dataset) 19 # Subtract one because class id's start at 1, but evaluation api assumes 20 # the start at 0. You might think we could just write the label_map.pbtxt 21 # so the class ids start at 0, but that throws an exception. 22 gt_classes -= 1 23 24 pred_boxes, pred_classes, pred_scores = \ 25 get_boxes_from_geojson(predictions_path, image_dataset) 26 pred_classes -= 1 27 28 nb_gt_classes = len(set(gt_classes)) 29 od_eval = object_detection_evaluation.ObjectDetectionEvaluation( 30 nb_gt_classes, matching_iou_threshold=0.1) 31 image_key = 'image' 32 od_eval.add_single_ground_truth_image_info( 33 image_key, gt_boxes, gt_classes) 34 od_eval.add_single_detected_image_info( 35 image_key, pred_boxes, pred_scores, pred_classes) 36 37 od_eval.evaluate() 38 return od_eval.get_eval_result() 39 40 41 def write_results(output_path, label_map_path, eval_result): 42 label_map = label_map_util.load_labelmap(label_map_path) 43 categories = label_map_util.convert_label_map_to_categories( 44 label_map, max_num_classes=max_num_classes, use_display_name=True) 45 category_index = label_map_util.create_category_index(categories) 46 47 results = [] 48 for class_id in range(1, len(category_index) + 1): 49 class_name = category_index[class_id]['name'] 50 # Subtract one to account for fact that class id's start at 1. 51 # precisions and recalls are lists with one element for each 52 # predicted box, assuming they are sorted by score. Each element is 53 # the precision or recall assuming that all predicted boxes with that 54 # score or above are used. So, the last element is the value assuming 55 # that all predictions are used. 56 57 precisions = eval_result.precisions[class_id - 1] 58 recalls = eval_result.recalls[class_id - 1] 59 # Get precision and recall assuming all predicted boxes are used. 60 class_results = { 61 'name': class_name, 62 'precision': precisions[-1], 63 'recall': recalls[-1] 64 } 65 results.append(class_results) 66 67 with open(output_path, 'w') as output_file: 68 output_file.write(json.dumps(results, indent=4)) 69 70 71 def _eval_predictions(image_uris, label_map_uri, ground_truth_uri, 72 predictions_uri, output_uri): 73 temp_dir = join(temp_root_dir, 'eval_predictions') 74 make_empty_dir(temp_dir) 75 76 image_path = download_and_build_vrt(temp_dir, image_uris) 77 image_dataset = rasterio.open(image_path) 78 79 ground_truth_path = download_if_needed(temp_dir, ground_truth_uri) 80 predictions_path = download_if_needed(temp_dir, predictions_uri) 81 label_map_path = download_if_needed(temp_dir, label_map_uri) 82 83 eval_result = get_eval_result( 84 ground_truth_path, predictions_path, image_dataset) 85 86 output_path = get_local_path(temp_dir, output_uri) 87 write_results(output_path, label_map_path, eval_result) 88 upload_if_needed(output_path, output_uri) 89 90 91 @click.command() 92 @click.argument('image_uris', nargs=-1) 93 @click.argument('label_map_uri') 94 @click.argument('ground_truth_uri') 95 @click.argument('predictions_uri') 96 @click.argument('output_uri') 97 def eval_predictions(image_uris, label_map_uri, ground_truth_uri, 98 predictions_uri, output_uri): 99 """Evaluate predictions against ground truth for a single predictions file. 100 101 Args: 102 ground_truth_uri: GeoJSON file with ground truth bounding boxes 103 predictions_uri: GeoJSON file with predicted bounding boxes 104 output_uri: JSON file with metrics 105 """ 106 _eval_predictions(image_uris, label_map_uri, ground_truth_uri, 107 predictions_uri, output_uri) 108 109 110 if __name__ == '__main__': 111 eval_predictions() 112 [end of src/rv/detection/commands/eval_predictions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/rv/detection/commands/eval_predictions.py b/src/rv/detection/commands/eval_predictions.py --- a/src/rv/detection/commands/eval_predictions.py +++ b/src/rv/detection/commands/eval_predictions.py @@ -1,7 +1,6 @@ import json -from os.path import join +from os.path import join, dirname -import numpy as np import rasterio import click @@ -13,7 +12,7 @@ from rv.detection.commands.settings import max_num_classes, temp_root_dir -def get_eval_result(ground_truth_path, predictions_path, image_dataset): +def get_od_eval(ground_truth_path, predictions_path, image_dataset): gt_boxes, gt_classes, _ = \ get_boxes_from_geojson(ground_truth_path, image_dataset) # Subtract one because class id's start at 1, but evaluation api assumes @@ -35,10 +34,12 @@ image_key, pred_boxes, pred_scores, pred_classes) od_eval.evaluate() - return od_eval.get_eval_result() + return od_eval -def write_results(output_path, label_map_path, eval_result): +def write_results(output_path, label_map_path, od_eval): + make_empty_dir(dirname(output_path), empty_dir=False) + label_map = label_map_util.load_labelmap(label_map_path) categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=max_num_classes, use_display_name=True) @@ -53,19 +54,30 @@ # the precision or recall assuming that all predicted boxes with that # score or above are used. So, the last element is the value assuming # that all predictions are used. - + eval_result = od_eval.get_eval_result() precisions = eval_result.precisions[class_id - 1] recalls = eval_result.recalls[class_id - 1] # Get precision and recall assuming all predicted boxes are used. + precision = precisions[-1] + recall = recalls[-1] + f1 = (2 * precision * recall) / (precision + recall) + + gt_count = od_eval.num_gt_instances_per_class[class_id -1] + pred_count = len(recalls) + count_error = pred_count - gt_count + norm_count_error = count_error / gt_count + class_results = { 'name': class_name, - 'precision': precisions[-1], - 'recall': recalls[-1] + 'precision': precision, + 'recall': recall, + 'f1': f1, + 'norm_count_error': norm_count_error } results.append(class_results) with open(output_path, 'w') as output_file: - output_file.write(json.dumps(results, indent=4)) + output_file.write(json.dumps(results, indent=4, sort_keys=True)) def _eval_predictions(image_uris, label_map_uri, ground_truth_uri, @@ -80,11 +92,11 @@ predictions_path = download_if_needed(temp_dir, predictions_uri) label_map_path = download_if_needed(temp_dir, label_map_uri) - eval_result = get_eval_result( + od_eval = get_od_eval( ground_truth_path, predictions_path, image_dataset) output_path = get_local_path(temp_dir, output_uri) - write_results(output_path, label_map_path, eval_result) + write_results(output_path, label_map_path, od_eval) upload_if_needed(output_path, output_uri)
{"golden_diff": "diff --git a/src/rv/detection/commands/eval_predictions.py b/src/rv/detection/commands/eval_predictions.py\n--- a/src/rv/detection/commands/eval_predictions.py\n+++ b/src/rv/detection/commands/eval_predictions.py\n@@ -1,7 +1,6 @@\n import json\n-from os.path import join\n+from os.path import join, dirname\n \n-import numpy as np\n import rasterio\n import click\n \n@@ -13,7 +12,7 @@\n from rv.detection.commands.settings import max_num_classes, temp_root_dir\n \n \n-def get_eval_result(ground_truth_path, predictions_path, image_dataset):\n+def get_od_eval(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n@@ -35,10 +34,12 @@\n image_key, pred_boxes, pred_scores, pred_classes)\n \n od_eval.evaluate()\n- return od_eval.get_eval_result()\n+ return od_eval\n \n \n-def write_results(output_path, label_map_path, eval_result):\n+def write_results(output_path, label_map_path, od_eval):\n+ make_empty_dir(dirname(output_path), empty_dir=False)\n+\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n@@ -53,19 +54,30 @@\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n-\n+ eval_result = od_eval.get_eval_result()\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n+ precision = precisions[-1]\n+ recall = recalls[-1]\n+ f1 = (2 * precision * recall) / (precision + recall)\n+\n+ gt_count = od_eval.num_gt_instances_per_class[class_id -1]\n+ pred_count = len(recalls)\n+ count_error = pred_count - gt_count\n+ norm_count_error = count_error / gt_count\n+\n class_results = {\n 'name': class_name,\n- 'precision': precisions[-1],\n- 'recall': recalls[-1]\n+ 'precision': precision,\n+ 'recall': recall,\n+ 'f1': f1,\n+ 'norm_count_error': norm_count_error\n }\n results.append(class_results)\n \n with open(output_path, 'w') as output_file:\n- output_file.write(json.dumps(results, indent=4))\n+ output_file.write(json.dumps(results, indent=4, sort_keys=True))\n \n \n def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n@@ -80,11 +92,11 @@\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n \n- eval_result = get_eval_result(\n+ od_eval = get_od_eval(\n ground_truth_path, predictions_path, image_dataset)\n \n output_path = get_local_path(temp_dir, output_uri)\n- write_results(output_path, label_map_path, eval_result)\n+ write_results(output_path, label_map_path, od_eval)\n upload_if_needed(output_path, output_uri)\n", "issue": "Add metrics for counting to eval.py\nWe should add a new metric to the detection evaluation script so that it computes how close the counts are compared to ground truth.\n", "before_files": [{"content": "import json\nfrom os.path import join\n\nimport numpy as np\nimport rasterio\nimport click\n\nfrom object_detection.utils import object_detection_evaluation, label_map_util\n\nfrom rv.utils import (\n download_if_needed, make_empty_dir, get_local_path, upload_if_needed,\n get_boxes_from_geojson, download_and_build_vrt)\nfrom rv.detection.commands.settings import max_num_classes, temp_root_dir\n\n\ndef get_eval_result(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n # the start at 0. You might think we could just write the label_map.pbtxt\n # so the class ids start at 0, but that throws an exception.\n gt_classes -= 1\n\n pred_boxes, pred_classes, pred_scores = \\\n get_boxes_from_geojson(predictions_path, image_dataset)\n pred_classes -= 1\n\n nb_gt_classes = len(set(gt_classes))\n od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n nb_gt_classes, matching_iou_threshold=0.1)\n image_key = 'image'\n od_eval.add_single_ground_truth_image_info(\n image_key, gt_boxes, gt_classes)\n od_eval.add_single_detected_image_info(\n image_key, pred_boxes, pred_scores, pred_classes)\n\n od_eval.evaluate()\n return od_eval.get_eval_result()\n\n\ndef write_results(output_path, label_map_path, eval_result):\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n results = []\n for class_id in range(1, len(category_index) + 1):\n class_name = category_index[class_id]['name']\n # Subtract one to account for fact that class id's start at 1.\n # precisions and recalls are lists with one element for each\n # predicted box, assuming they are sorted by score. Each element is\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n class_results = {\n 'name': class_name,\n 'precision': precisions[-1],\n 'recall': recalls[-1]\n }\n results.append(class_results)\n\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(results, indent=4))\n\n\ndef _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n temp_dir = join(temp_root_dir, 'eval_predictions')\n make_empty_dir(temp_dir)\n\n image_path = download_and_build_vrt(temp_dir, image_uris)\n image_dataset = rasterio.open(image_path)\n\n ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n\n eval_result = get_eval_result(\n ground_truth_path, predictions_path, image_dataset)\n\n output_path = get_local_path(temp_dir, output_uri)\n write_results(output_path, label_map_path, eval_result)\n upload_if_needed(output_path, output_uri)\n\n\[email protected]()\[email protected]('image_uris', nargs=-1)\[email protected]('label_map_uri')\[email protected]('ground_truth_uri')\[email protected]('predictions_uri')\[email protected]('output_uri')\ndef eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n \"\"\"Evaluate predictions against ground truth for a single predictions file.\n\n Args:\n ground_truth_uri: GeoJSON file with ground truth bounding boxes\n predictions_uri: GeoJSON file with predicted bounding boxes\n output_uri: JSON file with metrics\n \"\"\"\n _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri)\n\n\nif __name__ == '__main__':\n eval_predictions()\n", "path": "src/rv/detection/commands/eval_predictions.py"}]}
1,758
781
gh_patches_debug_524
rasdani/github-patches
git_diff
encode__uvicorn-660
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reload Behavior: Documentation != Code, Unnecessary Reloads Hey! I upgraded to 0.11.4 shortly after release today, and it has a couple of quirks. 1. The documentation [here](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-1819b1daaccb3d358620ade9c67e9118R22) says "python file changes" but the code says ["all non-dotfile changes"](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-b0da863c7164698a2ef0fa805e4a9197R40). 2. That behavior, while from the test cases seems to be intended to roll up things like `.graphql` files, also unfortunately rolls up `.pyc` files, meaning every restart is a double restart: ``` WARNING: Detected file change in 'app/main.py'. Reloading... INFO: Shutting down INFO: Waiting for application shutdown. INFO: Application shutdown complete. INFO: Finished server process [87024] INFO: Started server process [87080] INFO: Waiting for application startup. INFO: Application startup complete. WARNING: Detected file change in 'app/__pycache__/main.cpython-37.pyc'. Reloading... INFO: Shutting down INFO: Waiting for application shutdown. INFO: Application shutdown complete. INFO: Finished server process [87080] INFO: Started server process [87093] INFO: Waiting for application startup. INFO: Application startup complete. ``` It might be better to use [Path.glob](https://docs.python.org/3/library/pathlib.html#pathlib.Path.glob) so users can specify file extensions and paths more explicitly than with `os.walk`, but it's published already so maybe as another flag? 3. A minor point, but worth noting in the docs: `--reload_dir` on the CLI becomes `reload_dirs=['my_dir']` in code: `uvicorn.run('app.main:app', host="0.0.0.0", port=8000, reload=True, reload_dirs=['app'])` Thanks for making this great library! </issue> <code> [start of uvicorn/__init__.py] 1 from uvicorn.config import Config 2 from uvicorn.main import Server, main, run 3 4 __version__ = "0.11.4" 5 __all__ = ["main", "run", "Config", "Server"] 6 [end of uvicorn/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py --- a/uvicorn/__init__.py +++ b/uvicorn/__init__.py @@ -1,5 +1,5 @@ from uvicorn.config import Config from uvicorn.main import Server, main, run -__version__ = "0.11.4" +__version__ = "0.11.5" __all__ = ["main", "run", "Config", "Server"]
{"golden_diff": "diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py\n--- a/uvicorn/__init__.py\n+++ b/uvicorn/__init__.py\n@@ -1,5 +1,5 @@\n from uvicorn.config import Config\n from uvicorn.main import Server, main, run\n \n-__version__ = \"0.11.4\"\n+__version__ = \"0.11.5\"\n __all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "issue": "Reload Behavior: Documentation != Code, Unnecessary Reloads\nHey! I upgraded to 0.11.4 shortly after release today, and it has a couple of quirks.\r\n\r\n1. The documentation [here](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-1819b1daaccb3d358620ade9c67e9118R22) says \"python file changes\" but the code says [\"all non-dotfile changes\"](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-b0da863c7164698a2ef0fa805e4a9197R40).\r\n2. That behavior, while from the test cases seems to be intended to roll up things like `.graphql` files, also unfortunately rolls up `.pyc` files, meaning every restart is a double restart:\r\n\r\n```\r\nWARNING: Detected file change in 'app/main.py'. Reloading...\r\nINFO: Shutting down\r\nINFO: Waiting for application shutdown.\r\nINFO: Application shutdown complete.\r\nINFO: Finished server process [87024]\r\nINFO: Started server process [87080]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nWARNING: Detected file change in 'app/__pycache__/main.cpython-37.pyc'. Reloading...\r\nINFO: Shutting down\r\nINFO: Waiting for application shutdown.\r\nINFO: Application shutdown complete.\r\nINFO: Finished server process [87080]\r\nINFO: Started server process [87093]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\n```\r\n\r\nIt might be better to use [Path.glob](https://docs.python.org/3/library/pathlib.html#pathlib.Path.glob) so users can specify file extensions and paths more explicitly than with `os.walk`, but it's published already so maybe as another flag?\r\n\r\n3. A minor point, but worth noting in the docs: `--reload_dir` on the CLI becomes `reload_dirs=['my_dir']` in code: `uvicorn.run('app.main:app', host=\"0.0.0.0\", port=8000, reload=True, reload_dirs=['app'])`\r\n\r\nThanks for making this great library!\n", "before_files": [{"content": "from uvicorn.config import Config\nfrom uvicorn.main import Server, main, run\n\n__version__ = \"0.11.4\"\n__all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "path": "uvicorn/__init__.py"}]}
1,118
111
gh_patches_debug_64390
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2150
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider tmobile_us is broken During the global build at 2021-06-30-14-42-26, spider **tmobile_us** failed with **7563 features** and **2 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/tmobile_us.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson)) </issue> <code> [start of locations/spiders/tmobile_us.py] 1 # -*- coding: utf-8 -*- 2 import json 3 from urllib.parse import urlencode 4 5 import scrapy 6 7 from locations.items import GeojsonPointItem 8 from locations.hours import OpeningHours 9 10 DAY_MAPPING = {'Monday': 'Mo', 11 'Tuesday': 'Tu', 12 'Wednesday': 'We', 13 'Thursday': 'Th', 14 'Friday': 'Fr', 15 'Saturday': 'Sa', 16 'Sunday': 'Su'} 17 18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?' 19 20 21 class TMobileUSSpider(scrapy.Spider): 22 name = "tmobile_us" 23 item_attributes = { 'brand': "T-Mobile" } 24 allowed_domains = ["www.t-mobile.com"] 25 download_delay = 0.2 26 27 def parse_hours(self, store_hours): 28 opening_hours = OpeningHours() 29 if store_hours is None: 30 return 31 32 for store_day in store_hours: 33 day = DAY_MAPPING[store_day.get("day")] 34 open_time = store_day.get("opens") 35 close_time = store_day.get("closes") 36 if open_time is None and close_time is None: 37 continue 38 opening_hours.add_range(day=day, 39 open_time=open_time, 40 close_time=close_time, 41 time_format='%H:%M' 42 ) 43 44 return opening_hours.as_opening_hours() 45 46 def start_requests(self): 47 url = BASE_URL 48 49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points: 50 51 next(points) # Ignore the header 52 for point in points: 53 _, lat, lon = point.strip().split(',') 54 55 params = { 56 'latitude': '{}'.format(lat), 57 'longitude': '{}'.format(lon), 58 'count': '1000', 59 'radius': '25', 60 'ignoreLoadingBar': 'false' 61 } 62 63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse) 64 65 def parse(self, response): 66 data = json.loads(response.body_as_unicode()) 67 68 for store in data: 69 properties = { 70 'name': store["name"], 71 'ref': store["id"], 72 'addr_full': store["location"]["address"]["streetAddress"], 73 'city': store["location"]["address"]["addressLocality"], 74 'state': store["location"]["address"]["addressRegion"], 75 'postcode': store["location"]["address"]["postalCode"], 76 'phone': store.get("telephone"), 77 'website': store.get("url") or response.url, 78 'lat': float(store["location"]["latitude"]), 79 'lon': float(store["location"]["longitude"]), 80 } 81 82 hours = self.parse_hours(store.get("hours", [])) 83 if hours: 84 properties["opening_hours"] = hours 85 86 yield GeojsonPointItem(**properties) 87 [end of locations/spiders/tmobile_us.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py --- a/locations/spiders/tmobile_us.py +++ b/locations/spiders/tmobile_us.py @@ -15,7 +15,7 @@ 'Saturday': 'Sa', 'Sunday': 'Su'} -BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?' +BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?' class TMobileUSSpider(scrapy.Spider):
{"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,7 +15,7 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-06-30-14-42-26, spider **tmobile_us** failed with **7563 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/tmobile_us.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]}
1,506
151
gh_patches_debug_36368
rasdani/github-patches
git_diff
secdev__scapy-4017
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes when cryptography module is missing ### Brief description scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes in multiple environments ### Scapy version 2.5.0 ### Python version 3.11.2 ### Operating system macOS Ventura 13.3.1 (with M1 chip) ### Additional environment information _No response_ ### How to reproduce ``` $ python --version Python 3.11.2 $ pip install scapy $ pip show scapy Name: scapy Version: 2.5.0 ... $ python Python 3.11.2 (main, Feb 16 2023, 02:55:59) [Clang 14.0.0 (clang-1400.0.29.202)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF >>> TLS13_HKDF("sha256") ``` We can also reproduce from the default python docker image: ``` $ docker run -it --entrypoint bash python:latest # pip install scapy # python >>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF >>> TLS13_HKDF("sha256") ``` ### Actual result Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/homebrew/lib/python3.11/site-packages/scapy/layers/tls/crypto/hkdf.py", line 23, in __init__ self.hash = _get_hash(hash_name) ^^^^^^^^^^^^^^^^^^^^ TypeError: 'NoneType' object is not callable ### Expected result <scapy.layers.tls.crypto.hkdf.TLS13_HKDF object at 0x...> ### Related resources _No response_ </issue> <code> [start of scapy/layers/tls/crypto/hkdf.py] 1 # SPDX-License-Identifier: GPL-2.0-only 2 # This file is part of Scapy 3 # See https://scapy.net/ for more information 4 # Copyright (C) 2017 Maxence Tury 5 6 """ 7 Stateless HKDF for TLS 1.3. 8 """ 9 10 import struct 11 12 from scapy.config import conf 13 from scapy.layers.tls.crypto.pkcs1 import _get_hash 14 15 if conf.crypto_valid: 16 from cryptography.hazmat.backends import default_backend 17 from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand 18 from cryptography.hazmat.primitives.hashes import Hash 19 from cryptography.hazmat.primitives.hmac import HMAC 20 21 22 class TLS13_HKDF(object): 23 def __init__(self, hash_name="sha256"): 24 self.hash = _get_hash(hash_name) 25 26 def extract(self, salt, ikm): 27 h = self.hash 28 hkdf = HKDF(h, h.digest_size, salt, None, default_backend()) 29 if ikm is None: 30 ikm = b"\x00" * h.digest_size 31 return hkdf._extract(ikm) 32 33 def expand(self, prk, info, L): 34 h = self.hash 35 hkdf = HKDFExpand(h, L, info, default_backend()) 36 return hkdf.derive(prk) 37 38 def expand_label(self, secret, label, hash_value, length): 39 hkdf_label = struct.pack("!H", length) 40 hkdf_label += struct.pack("B", 6 + len(label)) 41 hkdf_label += b"tls13 " 42 hkdf_label += label 43 hkdf_label += struct.pack("B", len(hash_value)) 44 hkdf_label += hash_value 45 return self.expand(secret, hkdf_label, length) 46 47 def derive_secret(self, secret, label, messages): 48 h = Hash(self.hash, backend=default_backend()) 49 h.update(messages) 50 hash_messages = h.finalize() 51 hash_len = self.hash.digest_size 52 return self.expand_label(secret, label, hash_messages, hash_len) 53 54 def compute_verify_data(self, basekey, handshake_context): 55 hash_len = self.hash.digest_size 56 finished_key = self.expand_label(basekey, b"finished", b"", hash_len) 57 58 h = Hash(self.hash, backend=default_backend()) 59 h.update(handshake_context) 60 hash_value = h.finalize() 61 62 hm = HMAC(finished_key, self.hash, default_backend()) 63 hm.update(hash_value) 64 return hm.finalize() 65 [end of scapy/layers/tls/crypto/hkdf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scapy/layers/tls/crypto/hkdf.py b/scapy/layers/tls/crypto/hkdf.py --- a/scapy/layers/tls/crypto/hkdf.py +++ b/scapy/layers/tls/crypto/hkdf.py @@ -9,7 +9,7 @@ import struct -from scapy.config import conf +from scapy.config import conf, crypto_validator from scapy.layers.tls.crypto.pkcs1 import _get_hash if conf.crypto_valid: @@ -20,9 +20,11 @@ class TLS13_HKDF(object): + @crypto_validator def __init__(self, hash_name="sha256"): self.hash = _get_hash(hash_name) + @crypto_validator def extract(self, salt, ikm): h = self.hash hkdf = HKDF(h, h.digest_size, salt, None, default_backend()) @@ -30,11 +32,13 @@ ikm = b"\x00" * h.digest_size return hkdf._extract(ikm) + @crypto_validator def expand(self, prk, info, L): h = self.hash hkdf = HKDFExpand(h, L, info, default_backend()) return hkdf.derive(prk) + @crypto_validator def expand_label(self, secret, label, hash_value, length): hkdf_label = struct.pack("!H", length) hkdf_label += struct.pack("B", 6 + len(label)) @@ -44,6 +48,7 @@ hkdf_label += hash_value return self.expand(secret, hkdf_label, length) + @crypto_validator def derive_secret(self, secret, label, messages): h = Hash(self.hash, backend=default_backend()) h.update(messages) @@ -51,6 +56,7 @@ hash_len = self.hash.digest_size return self.expand_label(secret, label, hash_messages, hash_len) + @crypto_validator def compute_verify_data(self, basekey, handshake_context): hash_len = self.hash.digest_size finished_key = self.expand_label(basekey, b"finished", b"", hash_len)
{"golden_diff": "diff --git a/scapy/layers/tls/crypto/hkdf.py b/scapy/layers/tls/crypto/hkdf.py\n--- a/scapy/layers/tls/crypto/hkdf.py\n+++ b/scapy/layers/tls/crypto/hkdf.py\n@@ -9,7 +9,7 @@\n \n import struct\n \n-from scapy.config import conf\n+from scapy.config import conf, crypto_validator\n from scapy.layers.tls.crypto.pkcs1 import _get_hash\n \n if conf.crypto_valid:\n@@ -20,9 +20,11 @@\n \n \n class TLS13_HKDF(object):\n+ @crypto_validator\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n \n+ @crypto_validator\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n@@ -30,11 +32,13 @@\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n \n+ @crypto_validator\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n \n+ @crypto_validator\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n@@ -44,6 +48,7 @@\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n \n+ @crypto_validator\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n@@ -51,6 +56,7 @@\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n \n+ @crypto_validator\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n", "issue": "scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes when cryptography module is missing\n### Brief description\n\nscapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes in multiple environments\n\n### Scapy version\n\n2.5.0\n\n### Python version\n\n3.11.2\n\n### Operating system\n\nmacOS Ventura 13.3.1 (with M1 chip)\n\n### Additional environment information\n\n_No response_\n\n### How to reproduce\n\n```\r\n$ python --version\r\nPython 3.11.2\r\n$ pip install scapy\r\n$ pip show scapy\r\nName: scapy\r\nVersion: 2.5.0\r\n...\r\n$ python\r\nPython 3.11.2 (main, Feb 16 2023, 02:55:59) [Clang 14.0.0 (clang-1400.0.29.202)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF\r\n>>> TLS13_HKDF(\"sha256\")\r\n```\r\n\r\nWe can also reproduce from the default python docker image:\r\n```\r\n$ docker run -it --entrypoint bash python:latest\r\n# pip install scapy\r\n# python\r\n>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF\r\n>>> TLS13_HKDF(\"sha256\")\r\n```\n\n### Actual result\n\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/homebrew/lib/python3.11/site-packages/scapy/layers/tls/crypto/hkdf.py\", line 23, in __init__\r\n self.hash = _get_hash(hash_name)\r\n ^^^^^^^^^^^^^^^^^^^^\r\nTypeError: 'NoneType' object is not callable\n\n### Expected result\n\n<scapy.layers.tls.crypto.hkdf.TLS13_HKDF object at 0x...>\n\n### Related resources\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) 2017 Maxence Tury\n\n\"\"\"\nStateless HKDF for TLS 1.3.\n\"\"\"\n\nimport struct\n\nfrom scapy.config import conf\nfrom scapy.layers.tls.crypto.pkcs1 import _get_hash\n\nif conf.crypto_valid:\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand\n from cryptography.hazmat.primitives.hashes import Hash\n from cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass TLS13_HKDF(object):\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n if ikm is None:\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n hkdf_label += b\"tls13 \"\n hkdf_label += label\n hkdf_label += struct.pack(\"B\", len(hash_value))\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n hash_messages = h.finalize()\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n\n h = Hash(self.hash, backend=default_backend())\n h.update(handshake_context)\n hash_value = h.finalize()\n\n hm = HMAC(finished_key, self.hash, default_backend())\n hm.update(hash_value)\n return hm.finalize()\n", "path": "scapy/layers/tls/crypto/hkdf.py"}]}
1,679
495
gh_patches_debug_2919
rasdani/github-patches
git_diff
mesonbuild__meson-1538
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> VS 2017 backend emits bad WindowsTargetPlatformVersion value When I tried generating a VS 2017 solution, the generated app.vcxproj contained this: ``` <WindowsTargetPlatformVersion>10.0.14393.0\</WindowsTargetPlatformVersion> ``` Which then causes errors in other `.targets` files attempting to do a numeric comparison against that. This value is probably taken straight from one of these environment variables: ``` WindowsSDKLibVersion=10.0.14393.0\ WindowsSDKVersion=10.0.14393.0\ ``` The trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to ``` WindowsSdkDir=C:\Program Files (x86)\Windows Kits\10\ ``` directly. </issue> <code> [start of mesonbuild/backend/vs2017backend.py] 1 # Copyright 2014-2016 The Meson development team 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 17 from .vs2010backend import Vs2010Backend 18 19 20 class Vs2017Backend(Vs2010Backend): 21 def __init__(self, build): 22 super().__init__(build) 23 self.name = 'vs2017' 24 self.platform_toolset = 'v141' 25 self.vs_version = '2017' 26 # WindowsSDKVersion should be set by command prompt. 27 self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None) 28 [end of mesonbuild/backend/vs2017backend.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py --- a/mesonbuild/backend/vs2017backend.py +++ b/mesonbuild/backend/vs2017backend.py @@ -24,4 +24,4 @@ self.platform_toolset = 'v141' self.vs_version = '2017' # WindowsSDKVersion should be set by command prompt. - self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None) + self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\')
{"golden_diff": "diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py\n--- a/mesonbuild/backend/vs2017backend.py\n+++ b/mesonbuild/backend/vs2017backend.py\n@@ -24,4 +24,4 @@\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n- self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n+ self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\\\')\n", "issue": "VS 2017 backend emits bad WindowsTargetPlatformVersion value\nWhen I tried generating a VS 2017 solution, the generated app.vcxproj contained this:\r\n\r\n```\r\n<WindowsTargetPlatformVersion>10.0.14393.0\\</WindowsTargetPlatformVersion>\r\n```\r\n\r\nWhich then causes errors in other `.targets` files attempting to do a numeric comparison against that.\r\nThis value is probably taken straight from one of these environment variables:\r\n\r\n```\r\nWindowsSDKLibVersion=10.0.14393.0\\\r\nWindowsSDKVersion=10.0.14393.0\\\r\n```\r\n\r\nThe trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to \r\n```\r\nWindowsSdkDir=C:\\Program Files (x86)\\Windows Kits\\10\\\r\n```\r\ndirectly.\n", "before_files": [{"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n", "path": "mesonbuild/backend/vs2017backend.py"}]}
1,034
145
gh_patches_debug_10204
rasdani/github-patches
git_diff
deepchecks__deepchecks-1098
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FEAT][CV] Add a "per-class" option to property drift & heatmap comparison In this per class option, the drift would be shown per class for the top drifted classes. </issue> <code> [start of docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Image Property Drift Check 4 ************************** 5 This notebooks provides an overview for using and understanding the image property drift check. 6 7 **Structure:** 8 9 * `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__ 10 * `Which Image Properties Are Used? <#which-image-properties-are-used>`__ 11 * `Prepare data <#prepare-data>`__ 12 * `Run the check <#run-the-check>`__ 13 * `Define a condition <#define-a-condition>`__ 14 * `Check Parameters <#check-parameters>`__ 15 16 How Does the ImagePropertyDrift Check Work? 17 ================================= 18 Data drift is simply a change in the distribution of data over time. It is also one 19 of the top reasons that a machine learning model performance degrades over time. 20 21 In the context of machine learning, drift between the training set and the test set 22 will likely make the model prone to errors. In other words, if the model was trained 23 on data that is different from the current test data, it will probably make more mistakes 24 predicting the target variable. 25 26 The Image Property Drift check calculates a drift score for each image property in 27 the test dataset, by comparing its distribution to the train dataset. For this, we 28 use the Earth Movers Distance (Wasserstein distance). 29 30 Which Image Properties Are Used? 31 ================================= 32 ============================== ========== 33 Property name What is it 34 ============================== ========== 35 Aspect Ratio Ratio between height and width of image (height / width) 36 Area Area of image in pixels (height * width) 37 Brightness Average intensity of image pixels. Color channels have different weights according to 38 RGB-to-Grayscale formula 39 RMS Contrast Contrast of image, calculated by standard deviation of pixels 40 Mean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in 41 comparison to the other channels [r / (r + g + b)]. 42 Mean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in 43 comparison to the other channels [g / (r + g + b)]. 44 Mean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in 45 comparison to the other channels [b / (r + g + b)]. 46 ============================== ========== 47 48 Imports 49 ------- 50 """ 51 52 #%% 53 54 from deepchecks.vision.datasets.detection import coco 55 from deepchecks.vision.checks.distribution import ImagePropertyDrift 56 57 #%% 58 # Prepare data 59 # ------------ 60 from deepchecks.vision.utils import image_properties 61 62 train_dataset = coco.load_dataset(train=True, object_type='VisionData') 63 test_dataset = coco.load_dataset(train=False, object_type='VisionData') 64 65 #%% 66 # Run the check 67 # ------------- 68 69 check_result = ImagePropertyDrift().run(train_dataset, test_dataset) 70 check_result 71 72 #%% 73 # Observe the check’s output 74 # -------------------------- 75 # The result value is a pandas DataFrame that contains drift score for each image property. 76 77 check_result.value 78 79 #%% 80 # Define a condition 81 # ================== 82 # We can define a condition that make sure that image properties drift scores do not 83 # exceed allowed threshold. 84 85 check_result = ( 86 ImagePropertyDrift() 87 .add_condition_drift_score_not_greater_than(0.001) 88 .run(train_dataset, test_dataset) 89 ) 90 check_result.show(show_additional_outputs=False) 91 92 #%% 93 # Check Parameters 94 # ---------------- 95 # Image Property Drift Check accepts two parameters that allows us to control the look of the output: 96 # 97 # * `image_properties` - list of image properties that we are interested in 98 # * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index) 99 # 100 # Only next string values are allowed for the `image_properties` parameter: 101 # 102 # * `aspect_ratio` 103 # * `area` 104 # * `brightness` 105 # * `mean_red_relative_intensity` 106 # * `mean_green_relative_intensity` 107 # * `mean_blue_relative_intensity` 108 109 from typing import List 110 import numpy as np 111 112 113 def area(images: List[np.ndarray]) -> List[int]: 114 # Return list of integers of image areas (height multiplied by width) 115 return [img.shape[0] * img.shape[1] for img in images] 116 117 118 def aspect_ratio(images: List[np.ndarray]) -> List[float]: 119 # Return list of floats of image height to width ratio 120 return [img.shape[0] / img.shape[1] for img in images] 121 122 123 properties = [ 124 {'name': 'Area', 'method': area, 'output_type': 'continuous'}, 125 {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'} 126 ] 127 128 check_result = ImagePropertyDrift( 129 alternative_image_properties=properties, 130 max_num_categories=20 131 ).run(train_dataset, test_dataset) 132 133 check_result [end of docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py --- a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py +++ b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py @@ -76,6 +76,17 @@ check_result.value +#%% +# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only +# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the +# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class +# filtration) + +check_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5 + ).run(train_dataset, test_dataset) +check_result + + #%% # Define a condition # ==================
{"golden_diff": "diff --git a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n--- a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n+++ b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n@@ -76,6 +76,17 @@\n \n check_result.value\n \n+#%%\n+# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only\n+# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the\n+# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class\n+# filtration)\n+\n+check_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5\n+ ).run(train_dataset, test_dataset)\n+check_result\n+\n+\n #%%\n # Define a condition\n # ==================\n", "issue": "[FEAT][CV] Add a \"per-class\" option to property drift & heatmap comparison\nIn this per class option, the drift would be shown per class for the top drifted classes. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nImage Property Drift Check\n**************************\nThis notebooks provides an overview for using and understanding the image property drift check.\n\n**Structure:**\n\n* `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__\n* `Which Image Properties Are Used? <#which-image-properties-are-used>`__\n* `Prepare data <#prepare-data>`__\n* `Run the check <#run-the-check>`__\n* `Define a condition <#define-a-condition>`__\n* `Check Parameters <#check-parameters>`__\n\nHow Does the ImagePropertyDrift Check Work?\n=================================\nData drift is simply a change in the distribution of data over time. It is also one\nof the top reasons that a machine learning model performance degrades over time.\n\nIn the context of machine learning, drift between the training set and the test set\nwill likely make the model prone to errors. In other words, if the model was trained\non data that is different from the current test data, it will probably make more mistakes\npredicting the target variable.\n\nThe Image Property Drift check calculates a drift score for each image property in\nthe test dataset, by comparing its distribution to the train dataset. For this, we\nuse the Earth Movers Distance (Wasserstein distance).\n\nWhich Image Properties Are Used?\n=================================\n============================== ==========\nProperty name What is it\n============================== ==========\nAspect Ratio Ratio between height and width of image (height / width)\nArea Area of image in pixels (height * width)\nBrightness Average intensity of image pixels. Color channels have different weights according to\n RGB-to-Grayscale formula\nRMS Contrast Contrast of image, calculated by standard deviation of pixels\nMean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in\n comparison to the other channels [r / (r + g + b)].\nMean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in\n comparison to the other channels [g / (r + g + b)].\nMean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in\n comparison to the other channels [b / (r + g + b)].\n============================== ==========\n\nImports\n-------\n\"\"\"\n\n#%%\n\nfrom deepchecks.vision.datasets.detection import coco\nfrom deepchecks.vision.checks.distribution import ImagePropertyDrift\n\n#%%\n# Prepare data\n# ------------\nfrom deepchecks.vision.utils import image_properties\n\ntrain_dataset = coco.load_dataset(train=True, object_type='VisionData')\ntest_dataset = coco.load_dataset(train=False, object_type='VisionData')\n\n#%%\n# Run the check \n# -------------\n\ncheck_result = ImagePropertyDrift().run(train_dataset, test_dataset)\ncheck_result\n\n#%%\n# Observe the check\u2019s output \n# --------------------------\n# The result value is a pandas DataFrame that contains drift score for each image property.\n\ncheck_result.value\n\n#%%\n# Define a condition\n# ==================\n# We can define a condition that make sure that image properties drift scores do not\n# exceed allowed threshold.\n\ncheck_result = (\n ImagePropertyDrift()\n .add_condition_drift_score_not_greater_than(0.001)\n .run(train_dataset, test_dataset)\n)\ncheck_result.show(show_additional_outputs=False)\n\n#%%\n# Check Parameters\n# ----------------\n# Image Property Drift Check accepts two parameters that allows us to control the look of the output:\n#\n# * `image_properties` - list of image properties that we are interested in\n# * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)\n#\n# Only next string values are allowed for the `image_properties` parameter:\n#\n# * `aspect_ratio`\n# * `area`\n# * `brightness`\n# * `mean_red_relative_intensity`\n# * `mean_green_relative_intensity`\n# * `mean_blue_relative_intensity`\n\nfrom typing import List\nimport numpy as np\n\n\ndef area(images: List[np.ndarray]) -> List[int]:\n # Return list of integers of image areas (height multiplied by width)\n return [img.shape[0] * img.shape[1] for img in images]\n\n\ndef aspect_ratio(images: List[np.ndarray]) -> List[float]:\n # Return list of floats of image height to width ratio\n return [img.shape[0] / img.shape[1] for img in images]\n\n\nproperties = [\n {'name': 'Area', 'method': area, 'output_type': 'continuous'},\n {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}\n]\n\ncheck_result = ImagePropertyDrift(\n alternative_image_properties=properties,\n max_num_categories=20\n).run(train_dataset, test_dataset)\n\ncheck_result", "path": "docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py"}]}
1,934
227
gh_patches_debug_2854
rasdani/github-patches
git_diff
wger-project__wger-170
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BMI And Calorie Calculator Not Working Using this software in Linux Mint 13. When I enter my data into either the BMI calculator or the calorie estimator nothing happens. I have entered my height in cm and my weight in kgs. The BMI calculator says my BMI = 0. I'd be happy with 10. </issue> <code> [start of wger/nutrition/forms.py] 1 # -*- coding: utf-8 -*- 2 3 # This file is part of wger Workout Manager. 4 # 5 # wger Workout Manager is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU Affero General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # wger Workout Manager is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU Affero General Public License 16 17 import logging 18 19 from django import forms 20 from django.utils.translation import ugettext as _ 21 from wger.core.models import UserProfile 22 23 from wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem 24 from wger.utils.widgets import Html5NumberInput 25 26 27 logger = logging.getLogger(__name__) 28 29 30 class UnitChooserForm(forms.Form): 31 ''' 32 A small form to select an amount and a unit for an ingredient 33 ''' 34 amount = forms.DecimalField(decimal_places=2, 35 max_digits=5, 36 localize=True) 37 unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(), 38 empty_label="g", 39 required=False) 40 41 def __init__(self, *args, **kwargs): 42 super(UnitChooserForm, self).__init__(*args, **kwargs) 43 44 if len(args) and args[0].get('ingredient'): 45 ingredient_id = args[0]['ingredient'] 46 47 elif kwargs.get('data'): 48 ingredient_id = kwargs['data']['ingredient_id'] 49 50 else: 51 ingredient_id = -1 52 53 self.fields['unit'].queryset = IngredientWeightUnit.objects.filter( 54 ingredient_id=ingredient_id).select_related() 55 56 57 class BmiForm(forms.ModelForm): 58 weight = forms.DecimalField(widget=Html5NumberInput(), 59 max_value=999) 60 61 class Meta: 62 model = UserProfile 63 fields = ('height', ) 64 65 66 class BmrForm(forms.ModelForm): 67 ''' 68 Form for the basal metabolic rate 69 ''' 70 weight = forms.DecimalField(widget=Html5NumberInput()) 71 72 class Meta: 73 model = UserProfile 74 fields = ('age', 'height', 'gender') 75 76 77 class PhysicalActivitiesForm(forms.ModelForm): 78 ''' 79 Form for the additional physical activities 80 ''' 81 class Meta: 82 model = UserProfile 83 fields = ('sleep_hours', 84 'work_hours', 85 'work_intensity', 86 'sport_hours', 87 'sport_intensity', 88 'freetime_hours', 89 'freetime_intensity') 90 91 92 class DailyCaloriesForm(forms.ModelForm): 93 ''' 94 Form for the total daily calories needed 95 ''' 96 97 base_calories = forms.IntegerField(label=_('Basic caloric intake'), 98 help_text=_('Your basic caloric intake as calculated for ' 99 'your data'), 100 required=False, 101 widget=Html5NumberInput()) 102 additional_calories = forms.IntegerField(label=_('Additional calories'), 103 help_text=_('Additional calories to add to the base ' 104 'rate (to substract, enter a negative ' 105 'number)'), 106 initial=0, 107 required=False, 108 widget=Html5NumberInput()) 109 110 class Meta: 111 model = UserProfile 112 fields = ('calories',) 113 114 115 class MealItemForm(forms.ModelForm): 116 weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(), 117 empty_label="g", 118 required=False) 119 ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(), 120 widget=forms.HiddenInput) 121 122 class Meta: 123 model = MealItem 124 fields = '__all__' 125 126 def __init__(self, *args, **kwargs): 127 super(MealItemForm, self).__init__(*args, **kwargs) 128 129 # Get the ingredient_id 130 ingredient_id = None 131 132 if kwargs.get('instance'): 133 ingredient_id = kwargs['instance'].ingredient_id 134 135 if kwargs.get('data'): 136 ingredient_id = kwargs['data']['ingredient'] 137 138 # Filter the available ingredients 139 if ingredient_id: 140 self.fields['weight_unit'].queryset = \ 141 IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id) 142 [end of wger/nutrition/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py --- a/wger/nutrition/forms.py +++ b/wger/nutrition/forms.py @@ -55,6 +55,9 @@ class BmiForm(forms.ModelForm): + height = forms.DecimalField(widget=Html5NumberInput(), + max_value=999, + label=_('Height (cm)')) weight = forms.DecimalField(widget=Html5NumberInput(), max_value=999)
{"golden_diff": "diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py\n--- a/wger/nutrition/forms.py\n+++ b/wger/nutrition/forms.py\n@@ -55,6 +55,9 @@\n \n \n class BmiForm(forms.ModelForm):\n+ height = forms.DecimalField(widget=Html5NumberInput(),\n+ max_value=999,\n+ label=_('Height (cm)'))\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n", "issue": "BMI And Calorie Calculator Not Working\nUsing this software in Linux Mint 13.\nWhen I enter my data into either the BMI calculator or the calorie estimator nothing happens.\nI have entered my height in cm and my weight in kgs.\nThe BMI calculator says my BMI = 0.\nI'd be happy with 10.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py"}]}
1,829
112
gh_patches_debug_13384
rasdani/github-patches
git_diff
great-expectations__great_expectations-5460
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` </issue> <code> [start of great_expectations/expectations/metrics/query_metrics/query_table.py] 1 from typing import Any, Dict, List, Optional, Union 2 3 from great_expectations.core.metric_domain_types import MetricDomainTypes 4 from great_expectations.execution_engine import ( 5 SparkDFExecutionEngine, 6 SqlAlchemyExecutionEngine, 7 ) 8 from great_expectations.expectations.metrics.import_manager import ( 9 pyspark_sql_DataFrame, 10 pyspark_sql_Row, 11 pyspark_sql_SparkSession, 12 sa, 13 sqlalchemy_engine_Engine, 14 sqlalchemy_engine_Row, 15 ) 16 from great_expectations.expectations.metrics.metric_provider import metric_value 17 from great_expectations.expectations.metrics.query_metric_provider import ( 18 QueryMetricProvider, 19 ) 20 21 22 class QueryTable(QueryMetricProvider): 23 metric_name = "query.table" 24 value_keys = ("query",) 25 26 @metric_value(engine=SqlAlchemyExecutionEngine) 27 def _sqlalchemy( 28 cls, 29 execution_engine: SqlAlchemyExecutionEngine, 30 metric_domain_kwargs: dict, 31 metric_value_kwargs: dict, 32 metrics: Dict[str, Any], 33 runtime_configuration: dict, 34 ) -> List[sqlalchemy_engine_Row]: 35 query: Optional[str] = metric_value_kwargs.get( 36 "query" 37 ) or cls.default_kwarg_values.get("query") 38 39 selectable: Union[sa.sql.Selectable, str] 40 selectable, _, _ = execution_engine.get_compute_domain( 41 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE 42 ) 43 44 if isinstance(selectable, sa.Table): 45 query = query.format(active_batch=selectable) 46 elif isinstance( 47 selectable, sa.sql.Subquery 48 ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break 49 query = query.format(active_batch=f"({selectable})") 50 elif isinstance( 51 selectable, sa.sql.Select 52 ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query 53 query = query.format( 54 active_batch=f'({selectable.compile(compile_kwargs={"literal_binds": True})}) AS subselect', 55 ) 56 else: 57 query = query.format(active_batch=f"({selectable})") 58 59 engine: sqlalchemy_engine_Engine = execution_engine.engine 60 result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall() 61 62 return result 63 64 @metric_value(engine=SparkDFExecutionEngine) 65 def _spark( 66 cls, 67 execution_engine: SparkDFExecutionEngine, 68 metric_domain_kwargs: dict, 69 metric_value_kwargs: dict, 70 metrics: Dict[str, Any], 71 runtime_configuration: dict, 72 ) -> List[pyspark_sql_Row]: 73 query: Optional[str] = metric_value_kwargs.get( 74 "query" 75 ) or cls.default_kwarg_values.get("query") 76 77 df: pyspark_sql_DataFrame 78 df, _, _ = execution_engine.get_compute_domain( 79 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE 80 ) 81 82 df.createOrReplaceTempView("tmp_view") 83 query = query.format(active_batch="tmp_view") 84 85 engine: pyspark_sql_SparkSession = execution_engine.spark 86 result: List[pyspark_sql_Row] = engine.sql(query).collect() 87 88 return result 89 [end of great_expectations/expectations/metrics/query_metrics/query_table.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/great_expectations/expectations/metrics/query_metrics/query_table.py b/great_expectations/expectations/metrics/query_metrics/query_table.py --- a/great_expectations/expectations/metrics/query_metrics/query_table.py +++ b/great_expectations/expectations/metrics/query_metrics/query_table.py @@ -23,6 +23,7 @@ metric_name = "query.table" value_keys = ("query",) + # <snippet> @metric_value(engine=SqlAlchemyExecutionEngine) def _sqlalchemy( cls, @@ -60,6 +61,7 @@ result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall() return result + # </snippet> @metric_value(engine=SparkDFExecutionEngine) def _spark(
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/query_metrics/query_table.py b/great_expectations/expectations/metrics/query_metrics/query_table.py\n--- a/great_expectations/expectations/metrics/query_metrics/query_table.py\n+++ b/great_expectations/expectations/metrics/query_metrics/query_table.py\n@@ -23,6 +23,7 @@\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n \n+ # <snippet>\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n@@ -60,6 +61,7 @@\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n \n return result\n+ # </snippet>\n \n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Union\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import (\n pyspark_sql_DataFrame,\n pyspark_sql_Row,\n pyspark_sql_SparkSession,\n sa,\n sqlalchemy_engine_Engine,\n sqlalchemy_engine_Row,\n)\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.query_metric_provider import (\n QueryMetricProvider,\n)\n\n\nclass QueryTable(QueryMetricProvider):\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[sqlalchemy_engine_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n selectable: Union[sa.sql.Selectable, str]\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n if isinstance(selectable, sa.Table):\n query = query.format(active_batch=selectable)\n elif isinstance(\n selectable, sa.sql.Subquery\n ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break\n query = query.format(active_batch=f\"({selectable})\")\n elif isinstance(\n selectable, sa.sql.Select\n ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query\n query = query.format(\n active_batch=f'({selectable.compile(compile_kwargs={\"literal_binds\": True})}) AS subselect',\n )\n else:\n query = query.format(active_batch=f\"({selectable})\")\n\n engine: sqlalchemy_engine_Engine = execution_engine.engine\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n\n return result\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[pyspark_sql_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n df: pyspark_sql_DataFrame\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n df.createOrReplaceTempView(\"tmp_view\")\n query = query.format(active_batch=\"tmp_view\")\n\n engine: pyspark_sql_SparkSession = execution_engine.spark\n result: List[pyspark_sql_Row] = engine.sql(query).collect()\n\n return result\n", "path": "great_expectations/expectations/metrics/query_metrics/query_table.py"}]}
1,448
184
gh_patches_debug_31612
rasdani/github-patches
git_diff
tough-dev-school__education-backend-855
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Слать в лототрон только один ответ от одного пользователя Сейчас, если пользователь дал три ответа на домашку, то при кросс-проверке все три ответа уйдут разным студентам. Это — плохо, нужно либо слать только первый ответ, либо собирать все ответы в пачку и слать их одному пользователю. </issue> <code> [start of src/homework/services/answer_crosscheck_dispatcher.py] 1 from typing import Optional 2 3 from django.db import transaction 4 from django.db.models import Count, Q, QuerySet 5 6 from homework.models import Answer, AnswerCrossCheck 7 from users.models import User 8 9 10 class AnswerCrossCheckDispatcher: 11 """Given a bunch of answers and users, create a cross-check record 12 for each of them, making sure each answer has a user to check 13 and number of answers if equal for each user 14 """ 15 def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3): 16 self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers]) 17 self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?') 18 self.answers_per_user = answers_per_user 19 20 @transaction.atomic 21 def __call__(self) -> list[AnswerCrossCheck]: 22 crosschecks = list() 23 for user in self.users.iterator(): 24 for _ in range(self.answers_per_user): 25 answer = self.get_answer_to_check(user) 26 if answer is not None: 27 crosschecks.append( 28 self.give_answer_to_user(answer, user), 29 ) 30 31 return crosschecks 32 33 def get_answer_to_check(self, user: User) -> Optional[Answer]: 34 return self.get_answers_with_crosscheck_count() \ 35 .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \ 36 .exclude(already_checking__gte=1) \ 37 .exclude(author=user) \ 38 .exclude(do_not_crosscheck=True) \ 39 .order_by('crosscheck_count').first() 40 41 def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck: 42 return AnswerCrossCheck.objects.create(answer=answer, checker=user) 43 44 def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]: 45 return self.answers.annotate( 46 crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)), 47 ) 48 [end of src/homework/services/answer_crosscheck_dispatcher.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/homework/services/answer_crosscheck_dispatcher.py b/src/homework/services/answer_crosscheck_dispatcher.py --- a/src/homework/services/answer_crosscheck_dispatcher.py +++ b/src/homework/services/answer_crosscheck_dispatcher.py @@ -9,11 +9,12 @@ class AnswerCrossCheckDispatcher: """Given a bunch of answers and users, create a cross-check record - for each of them, making sure each answer has a user to check - and number of answers if equal for each user + for each of them, making sure the first answer of each user has a user to + check and number of answers if equal for each user """ def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3): self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers]) + self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id') self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?') self.answers_per_user = answers_per_user @@ -27,11 +28,11 @@ crosschecks.append( self.give_answer_to_user(answer, user), ) - return crosschecks def get_answer_to_check(self, user: User) -> Optional[Answer]: return self.get_answers_with_crosscheck_count() \ + .filter(id__in=self.unique_author_answers) \ .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \ .exclude(already_checking__gte=1) \ .exclude(author=user) \
{"golden_diff": "diff --git a/src/homework/services/answer_crosscheck_dispatcher.py b/src/homework/services/answer_crosscheck_dispatcher.py\n--- a/src/homework/services/answer_crosscheck_dispatcher.py\n+++ b/src/homework/services/answer_crosscheck_dispatcher.py\n@@ -9,11 +9,12 @@\n \n class AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n- for each of them, making sure each answer has a user to check\n- and number of answers if equal for each user\n+ for each of them, making sure the first answer of each user has a user to\n+ check and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n+ self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n \n@@ -27,11 +28,11 @@\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n-\n return crosschecks\n \n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n+ .filter(id__in=self.unique_author_answers) \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n", "issue": "\u0421\u043b\u0430\u0442\u044c \u0432 \u043b\u043e\u0442\u043e\u0442\u0440\u043e\u043d \u0442\u043e\u043b\u044c\u043a\u043e \u043e\u0434\u0438\u043d \u043e\u0442\u0432\u0435\u0442 \u043e\u0442 \u043e\u0434\u043d\u043e\u0433\u043e \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f\n\u0421\u0435\u0439\u0447\u0430\u0441, \u0435\u0441\u043b\u0438 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0434\u0430\u043b \u0442\u0440\u0438 \u043e\u0442\u0432\u0435\u0442\u0430 \u043d\u0430 \u0434\u043e\u043c\u0430\u0448\u043a\u0443, \u0442\u043e \u043f\u0440\u0438 \u043a\u0440\u043e\u0441\u0441-\u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0435 \u0432\u0441\u0435 \u0442\u0440\u0438 \u043e\u0442\u0432\u0435\u0442\u0430 \u0443\u0439\u0434\u0443\u0442 \u0440\u0430\u0437\u043d\u044b\u043c \u0441\u0442\u0443\u0434\u0435\u043d\u0442\u0430\u043c. \u042d\u0442\u043e \u2014 \u043f\u043b\u043e\u0445\u043e, \u043d\u0443\u0436\u043d\u043e \u043b\u0438\u0431\u043e \u0441\u043b\u0430\u0442\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u043f\u0435\u0440\u0432\u044b\u0439 \u043e\u0442\u0432\u0435\u0442, \u043b\u0438\u0431\u043e \u0441\u043e\u0431\u0438\u0440\u0430\u0442\u044c \u0432\u0441\u0435 \u043e\u0442\u0432\u0435\u0442\u044b \u0432 \u043f\u0430\u0447\u043a\u0443 \u0438 \u0441\u043b\u0430\u0442\u044c \u0438\u0445 \u043e\u0434\u043d\u043e\u043c\u0443 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044e.\n", "before_files": [{"content": "from typing import Optional\n\nfrom django.db import transaction\nfrom django.db.models import Count, Q, QuerySet\n\nfrom homework.models import Answer, AnswerCrossCheck\nfrom users.models import User\n\n\nclass AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n for each of them, making sure each answer has a user to check\n and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n\n @transaction.atomic\n def __call__(self) -> list[AnswerCrossCheck]:\n crosschecks = list()\n for user in self.users.iterator():\n for _ in range(self.answers_per_user):\n answer = self.get_answer_to_check(user)\n if answer is not None:\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n\n return crosschecks\n\n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n .exclude(do_not_crosscheck=True) \\\n .order_by('crosscheck_count').first()\n\n def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:\n return AnswerCrossCheck.objects.create(answer=answer, checker=user)\n\n def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:\n return self.answers.annotate(\n crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),\n )\n", "path": "src/homework/services/answer_crosscheck_dispatcher.py"}]}
1,163
383
gh_patches_debug_17240
rasdani/github-patches
git_diff
napari__napari-6139
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Removing comments from PR does not work ## 🐛 Bug After merging it looks like the action for removing comments does not work. I will be happy to fast merge potential bugfix without the standard 24 hours as it needs to be merged to test. </issue> <code> [start of tools/remove_html_comments_from_pr.py] 1 """ 2 Edit pull request description to remove HTML comments 3 4 We might want to remove section with markdown task lists that are completely empty 5 """ 6 7 import re 8 import sys 9 from os import environ 10 11 import requests 12 13 14 def remove_html_comments(text): 15 # Regular expression to remove HTML comments 16 # [^\S\r\n] is whitespace but not new line 17 html_comment_pattern = r"[^\S\r\n]*<!--(.*?)-->[^\S\r\n]*\n?" 18 return re.sub(html_comment_pattern, "", text, flags=re.DOTALL) 19 20 21 def edit_pull_request_description(repo, pull_request_number, access_token): 22 # GitHub API base URL 23 base_url = "https://api.github.com" 24 25 # Prepare the headers with the access token 26 headers = {"Authorization": f"token {access_token}"} 27 28 # Get the current pull request description 29 pr_url = f"{base_url}/repos/{repo}/pulls/{pull_request_number}" 30 response = requests.get(pr_url, headers=headers) 31 response.raise_for_status() 32 response_json = response.json() 33 current_description = response_json["body"] 34 35 # Remove HTML comments from the description 36 edited_description = remove_html_comments(current_description) 37 if edited_description == current_description: 38 print("No HTML comments found in the pull request description") 39 return 40 41 # Update the pull request description 42 update_pr_url = f"{base_url}/repos/{repo}/pulls/{pull_request_number}" 43 payload = {"body": edited_description} 44 response = requests.patch(update_pr_url, json=payload, headers=headers) 45 response.raise_for_status() 46 47 if response.status_code == 200: 48 print( 49 f"Pull request #{pull_request_number} description has been updated successfully!" 50 ) 51 else: 52 print( 53 f"Failed to update pull request description. Status code: {response.status_code}" 54 ) 55 56 57 if __name__ == "__main__": 58 # Replace with your repository and pull request number 59 # get cuurrent repository name from github actions 60 repository_name = environ.get("GITHUB_REPOSITORY") 61 if repository_name == "napari/napari": 62 sys.exit(0) 63 64 # get current PR number from github actions 65 github_ref = environ.get("GITHUB_REF") 66 refs, pull, number, merge = github_ref.split('/') 67 assert refs == 'refs' 68 assert pull == 'pull' 69 assert merge == 'merge' 70 71 # Replace with your GitHub access token 72 access_token = environ.get("GITHUB_TOKEN") 73 74 edit_pull_request_description(repository_name, number, access_token) 75 [end of tools/remove_html_comments_from_pr.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/remove_html_comments_from_pr.py b/tools/remove_html_comments_from_pr.py --- a/tools/remove_html_comments_from_pr.py +++ b/tools/remove_html_comments_from_pr.py @@ -10,6 +10,8 @@ import requests +REPO = 'napari/napari' + def remove_html_comments(text): # Regular expression to remove HTML comments @@ -55,10 +57,12 @@ if __name__ == "__main__": + print('Will inspect PR description to remove html comments.') # Replace with your repository and pull request number # get cuurrent repository name from github actions repository_name = environ.get("GITHUB_REPOSITORY") - if repository_name == "napari/napari": + if repository_name != REPO: + print('Not on main repo, aborting with success') sys.exit(0) # get current PR number from github actions
{"golden_diff": "diff --git a/tools/remove_html_comments_from_pr.py b/tools/remove_html_comments_from_pr.py\n--- a/tools/remove_html_comments_from_pr.py\n+++ b/tools/remove_html_comments_from_pr.py\n@@ -10,6 +10,8 @@\n \n import requests\n \n+REPO = 'napari/napari'\n+\n \n def remove_html_comments(text):\n # Regular expression to remove HTML comments\n@@ -55,10 +57,12 @@\n \n \n if __name__ == \"__main__\":\n+ print('Will inspect PR description to remove html comments.')\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n- if repository_name == \"napari/napari\":\n+ if repository_name != REPO:\n+ print('Not on main repo, aborting with success')\n sys.exit(0)\n \n # get current PR number from github actions\n", "issue": "Removing comments from PR does not work\n## \ud83d\udc1b Bug\r\nAfter merging it looks like the action for removing comments does not work. \r\n\r\nI will be happy to fast merge potential bugfix without the standard 24 hours as it needs to be merged to test. \r\n\n", "before_files": [{"content": "\"\"\"\nEdit pull request description to remove HTML comments\n\nWe might want to remove section with markdown task lists that are completely empty\n\"\"\"\n\nimport re\nimport sys\nfrom os import environ\n\nimport requests\n\n\ndef remove_html_comments(text):\n # Regular expression to remove HTML comments\n # [^\\S\\r\\n] is whitespace but not new line\n html_comment_pattern = r\"[^\\S\\r\\n]*<!--(.*?)-->[^\\S\\r\\n]*\\n?\"\n return re.sub(html_comment_pattern, \"\", text, flags=re.DOTALL)\n\n\ndef edit_pull_request_description(repo, pull_request_number, access_token):\n # GitHub API base URL\n base_url = \"https://api.github.com\"\n\n # Prepare the headers with the access token\n headers = {\"Authorization\": f\"token {access_token}\"}\n\n # Get the current pull request description\n pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n response = requests.get(pr_url, headers=headers)\n response.raise_for_status()\n response_json = response.json()\n current_description = response_json[\"body\"]\n\n # Remove HTML comments from the description\n edited_description = remove_html_comments(current_description)\n if edited_description == current_description:\n print(\"No HTML comments found in the pull request description\")\n return\n\n # Update the pull request description\n update_pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n payload = {\"body\": edited_description}\n response = requests.patch(update_pr_url, json=payload, headers=headers)\n response.raise_for_status()\n\n if response.status_code == 200:\n print(\n f\"Pull request #{pull_request_number} description has been updated successfully!\"\n )\n else:\n print(\n f\"Failed to update pull request description. Status code: {response.status_code}\"\n )\n\n\nif __name__ == \"__main__\":\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n if repository_name == \"napari/napari\":\n sys.exit(0)\n\n # get current PR number from github actions\n github_ref = environ.get(\"GITHUB_REF\")\n refs, pull, number, merge = github_ref.split('/')\n assert refs == 'refs'\n assert pull == 'pull'\n assert merge == 'merge'\n\n # Replace with your GitHub access token\n access_token = environ.get(\"GITHUB_TOKEN\")\n\n edit_pull_request_description(repository_name, number, access_token)\n", "path": "tools/remove_html_comments_from_pr.py"}]}
1,295
208
gh_patches_debug_14081
rasdani/github-patches
git_diff
pypi__warehouse-439
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Double check conditional HTTP implementation The conditional HTTP implementation doesn't check the status code of the response at all. Determine if it should, and if it should update it to do the right thing. </issue> <code> [start of warehouse/cache/http.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import collections.abc 14 import functools 15 16 from pyramid.tweens import EXCVIEW 17 18 19 def _add_vary_callback(*varies): 20 def inner(request, response): 21 vary = set(response.vary if response.vary is not None else []) 22 vary |= set(varies) 23 response.vary = vary 24 return inner 25 26 27 def add_vary(*varies): 28 def inner(view): 29 @functools.wraps(view) 30 def wrapped(context, request): 31 request.add_response_callback(_add_vary_callback(*varies)) 32 return view(context, request) 33 return wrapped 34 return inner 35 36 37 def cache_control(seconds, public=True): 38 def inner(view): 39 @functools.wraps(view) 40 def wrapped(context, request): 41 response = view(context, request) 42 43 if not request.registry.settings.get( 44 "pyramid.prevent_http_cache", False): 45 if seconds: 46 if public: 47 response.cache_control.public = True 48 else: 49 response.cache_control.private = True 50 51 response.cache_control.max_age = seconds 52 else: 53 response.cache_control.no_cache = True 54 response.cache_control.no_store = True 55 response.cache_control.must_revalidate = True 56 57 return response 58 return wrapped 59 return inner 60 61 62 def conditional_http_tween_factory(handler, registry): 63 def conditional_http_tween(request): 64 response = handler(request) 65 66 # If the Last-Modified header has been set, we want to enable the 67 # conditional response processing. 68 if response.last_modified is not None: 69 response.conditional_response = True 70 71 # We want to only enable the conditional machinery if either we 72 # were given an explicit ETag header by the view or we have a 73 # buffered response and can generate the ETag header ourself. 74 if response.etag is not None: 75 response.conditional_response = True 76 elif (isinstance(response.app_iter, collections.abc.Sequence) and 77 len(response.app_iter) == 1): 78 response.conditional_response = True 79 response.md5_etag() 80 81 return response 82 return conditional_http_tween 83 84 85 def includeme(config): 86 config.add_tween( 87 "warehouse.cache.http.conditional_http_tween_factory", 88 under=EXCVIEW, 89 ) 90 [end of warehouse/cache/http.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/cache/http.py b/warehouse/cache/http.py --- a/warehouse/cache/http.py +++ b/warehouse/cache/http.py @@ -75,8 +75,13 @@ response.conditional_response = True elif (isinstance(response.app_iter, collections.abc.Sequence) and len(response.app_iter) == 1): - response.conditional_response = True - response.md5_etag() + # We can only reasonably implement automatic ETags on 200 responses + # to GET or HEAD requests. The subtles of doing it in other cases + # are too hard to get right. + if (request.method in {"GET", "HEAD"} and + response.status_code == 200): + response.conditional_response = True + response.md5_etag() return response return conditional_http_tween
{"golden_diff": "diff --git a/warehouse/cache/http.py b/warehouse/cache/http.py\n--- a/warehouse/cache/http.py\n+++ b/warehouse/cache/http.py\n@@ -75,8 +75,13 @@\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n- response.conditional_response = True\n- response.md5_etag()\n+ # We can only reasonably implement automatic ETags on 200 responses\n+ # to GET or HEAD requests. The subtles of doing it in other cases\n+ # are too hard to get right.\n+ if (request.method in {\"GET\", \"HEAD\"} and\n+ response.status_code == 200):\n+ response.conditional_response = True\n+ response.md5_etag()\n \n return response\n return conditional_http_tween\n", "issue": "Double check conditional HTTP implementation\nThe conditional HTTP implementation doesn't check the status code of the response at all. Determine if it should, and if it should update it to do the right thing.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections.abc\nimport functools\n\nfrom pyramid.tweens import EXCVIEW\n\n\ndef _add_vary_callback(*varies):\n def inner(request, response):\n vary = set(response.vary if response.vary is not None else [])\n vary |= set(varies)\n response.vary = vary\n return inner\n\n\ndef add_vary(*varies):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request.add_response_callback(_add_vary_callback(*varies))\n return view(context, request)\n return wrapped\n return inner\n\n\ndef cache_control(seconds, public=True):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n response = view(context, request)\n\n if not request.registry.settings.get(\n \"pyramid.prevent_http_cache\", False):\n if seconds:\n if public:\n response.cache_control.public = True\n else:\n response.cache_control.private = True\n\n response.cache_control.max_age = seconds\n else:\n response.cache_control.no_cache = True\n response.cache_control.no_store = True\n response.cache_control.must_revalidate = True\n\n return response\n return wrapped\n return inner\n\n\ndef conditional_http_tween_factory(handler, registry):\n def conditional_http_tween(request):\n response = handler(request)\n\n # If the Last-Modified header has been set, we want to enable the\n # conditional response processing.\n if response.last_modified is not None:\n response.conditional_response = True\n\n # We want to only enable the conditional machinery if either we\n # were given an explicit ETag header by the view or we have a\n # buffered response and can generate the ETag header ourself.\n if response.etag is not None:\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n response.conditional_response = True\n response.md5_etag()\n\n return response\n return conditional_http_tween\n\n\ndef includeme(config):\n config.add_tween(\n \"warehouse.cache.http.conditional_http_tween_factory\",\n under=EXCVIEW,\n )\n", "path": "warehouse/cache/http.py"}]}
1,352
199
gh_patches_debug_35947
rasdani/github-patches
git_diff
opsdroid__opsdroid-1099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invert constraints I can imagine situations where it would be useful to be able to invert constraints so that they do the opposite of what they are designed to do. ```python from opsdroid.skill import Skill from opsdroid.matchers import match_regex from opsdroid.constraints import constrain_users class MySkill(Skill): @match_regex(r'hi') @constrain_users(['alice', 'bob'], invert=True) async def hello(self, message): """Says 'Hey' to anyone EXCEPT 'alice' and 'bob'.""" await message.respond('Hey') ``` </issue> <code> [start of opsdroid/constraints.py] 1 """Decorator functions to use when creating skill modules. 2 3 These decorators are for specifying when a skill should not be called despite 4 having a matcher which matches the current message. 5 """ 6 7 import logging 8 9 from opsdroid.helper import add_skill_attributes 10 11 12 _LOGGER = logging.getLogger(__name__) 13 14 15 def constrain_rooms(rooms): 16 """Return room constraint decorator.""" 17 18 def constraint_decorator(func): 19 """Add room constraint to skill.""" 20 21 def constraint_callback(message, rooms=rooms): 22 """Check if the room is correct.""" 23 return message.target in rooms 24 25 func = add_skill_attributes(func) 26 func.constraints.append(constraint_callback) 27 return func 28 29 return constraint_decorator 30 31 32 def constrain_users(users): 33 """Return user constraint decorator.""" 34 35 def constraint_decorator(func): 36 """Add user constraint to skill.""" 37 38 def constraint_callback(message, users=users): 39 """Check if the user is correct.""" 40 return message.user in users 41 42 func = add_skill_attributes(func) 43 func.constraints.append(constraint_callback) 44 return func 45 46 return constraint_decorator 47 48 49 def constrain_connectors(connectors): 50 """Return connector constraint decorator.""" 51 52 def constraint_decorator(func): 53 """Add connectors constraint to skill.""" 54 55 def constraint_callback(message, connectors=connectors): 56 """Check if the connectors is correct.""" 57 return message.connector and (message.connector.name in connectors) 58 59 func = add_skill_attributes(func) 60 func.constraints.append(constraint_callback) 61 return func 62 63 return constraint_decorator 64 [end of opsdroid/constraints.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/constraints.py b/opsdroid/constraints.py --- a/opsdroid/constraints.py +++ b/opsdroid/constraints.py @@ -5,6 +5,7 @@ """ import logging +from functools import wraps from opsdroid.helper import add_skill_attributes @@ -12,7 +13,17 @@ _LOGGER = logging.getLogger(__name__) -def constrain_rooms(rooms): +def invert_wrapper(func): + """Inverts the result of a function.""" + + @wraps(func) + def inverted_func(*args, **kwargs): + return not func(*args, **kwargs) + + return inverted_func + + +def constrain_rooms(rooms, invert=False): """Return room constraint decorator.""" def constraint_decorator(func): @@ -23,13 +34,15 @@ return message.target in rooms func = add_skill_attributes(func) + if invert: + constraint_callback = invert_wrapper(constraint_callback) func.constraints.append(constraint_callback) return func return constraint_decorator -def constrain_users(users): +def constrain_users(users, invert=False): """Return user constraint decorator.""" def constraint_decorator(func): @@ -40,13 +53,15 @@ return message.user in users func = add_skill_attributes(func) + if invert: + constraint_callback = invert_wrapper(constraint_callback) func.constraints.append(constraint_callback) return func return constraint_decorator -def constrain_connectors(connectors): +def constrain_connectors(connectors, invert=False): """Return connector constraint decorator.""" def constraint_decorator(func): @@ -57,6 +72,8 @@ return message.connector and (message.connector.name in connectors) func = add_skill_attributes(func) + if invert: + constraint_callback = invert_wrapper(constraint_callback) func.constraints.append(constraint_callback) return func
{"golden_diff": "diff --git a/opsdroid/constraints.py b/opsdroid/constraints.py\n--- a/opsdroid/constraints.py\n+++ b/opsdroid/constraints.py\n@@ -5,6 +5,7 @@\n \"\"\"\n \n import logging\n+from functools import wraps\n \n from opsdroid.helper import add_skill_attributes\n \n@@ -12,7 +13,17 @@\n _LOGGER = logging.getLogger(__name__)\n \n \n-def constrain_rooms(rooms):\n+def invert_wrapper(func):\n+ \"\"\"Inverts the result of a function.\"\"\"\n+\n+ @wraps(func)\n+ def inverted_func(*args, **kwargs):\n+ return not func(*args, **kwargs)\n+\n+ return inverted_func\n+\n+\n+def constrain_rooms(rooms, invert=False):\n \"\"\"Return room constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -23,13 +34,15 @@\n return message.target in rooms\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n \n return constraint_decorator\n \n \n-def constrain_users(users):\n+def constrain_users(users, invert=False):\n \"\"\"Return user constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -40,13 +53,15 @@\n return message.user in users\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n \n return constraint_decorator\n \n \n-def constrain_connectors(connectors):\n+def constrain_connectors(connectors, invert=False):\n \"\"\"Return connector constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -57,6 +72,8 @@\n return message.connector and (message.connector.name in connectors)\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n", "issue": "Invert constraints\nI can imagine situations where it would be useful to be able to invert constraints so that they do the opposite of what they are designed to do.\r\n\r\n```python\r\nfrom opsdroid.skill import Skill\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid.constraints import constrain_users\r\n\r\nclass MySkill(Skill):\r\n\r\n @match_regex(r'hi')\r\n @constrain_users(['alice', 'bob'], invert=True)\r\n async def hello(self, message):\r\n \"\"\"Says 'Hey' to anyone EXCEPT 'alice' and 'bob'.\"\"\"\r\n await message.respond('Hey')\r\n```\n", "before_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\n\nThese decorators are for specifying when a skill should not be called despite\nhaving a matcher which matches the current message.\n\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import add_skill_attributes\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef constrain_rooms(rooms):\n \"\"\"Return room constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add room constraint to skill.\"\"\"\n\n def constraint_callback(message, rooms=rooms):\n \"\"\"Check if the room is correct.\"\"\"\n return message.target in rooms\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_users(users):\n \"\"\"Return user constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add user constraint to skill.\"\"\"\n\n def constraint_callback(message, users=users):\n \"\"\"Check if the user is correct.\"\"\"\n return message.user in users\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_connectors(connectors):\n \"\"\"Return connector constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add connectors constraint to skill.\"\"\"\n\n def constraint_callback(message, connectors=connectors):\n \"\"\"Check if the connectors is correct.\"\"\"\n return message.connector and (message.connector.name in connectors)\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n", "path": "opsdroid/constraints.py"}]}
1,103
427
gh_patches_debug_8106
rasdani/github-patches
git_diff
aws__aws-sam-cli-815
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Region from Env Vars or profile are not respected for ALL commands but package and deploy The region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container. Current workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services. Fix is in #811 </issue> <code> [start of samcli/cli/options.py] 1 """ 2 This file contains common CLI options common to all commands. As we add more commands, this will 3 become a repository of options that other commands could use when needed. 4 """ 5 6 import click 7 8 from .context import Context 9 10 11 def debug_option(f): 12 """ 13 Configures --debug option for CLI 14 15 :param f: Callback Function to be passed to Click 16 """ 17 def callback(ctx, param, value): 18 state = ctx.ensure_object(Context) 19 state.debug = value 20 return value 21 22 return click.option('--debug', 23 expose_value=False, 24 is_flag=True, 25 envvar="SAM_DEBUG", 26 help='Turn on debug logging to print debug message generated by SAM CLI.', 27 callback=callback)(f) 28 29 30 def region_option(f): 31 """ 32 Configures --region option for CLI 33 34 :param f: Callback Function to be passed to Click 35 """ 36 def callback(ctx, param, value): 37 state = ctx.ensure_object(Context) 38 state.region = value 39 return value 40 41 return click.option('--region', 42 expose_value=False, 43 help='Set the AWS Region of the service (e.g. us-east-1).', 44 default='us-east-1', 45 callback=callback)(f) 46 47 48 def profile_option(f): 49 """ 50 Configures --profile option for CLI 51 52 :param f: Callback Function to be passed to Click 53 """ 54 def callback(ctx, param, value): 55 state = ctx.ensure_object(Context) 56 state.profile = value 57 return value 58 59 return click.option('--profile', 60 expose_value=False, 61 help='Select a specific profile from your credential file to get AWS credentials.', 62 callback=callback)(f) 63 [end of samcli/cli/options.py] [start of samcli/__init__.py] 1 """ 2 SAM CLI version 3 """ 4 5 __version__ = '0.8.0' 6 [end of samcli/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/samcli/__init__.py b/samcli/__init__.py --- a/samcli/__init__.py +++ b/samcli/__init__.py @@ -2,4 +2,4 @@ SAM CLI version """ -__version__ = '0.8.0' +__version__ = '0.8.1' diff --git a/samcli/cli/options.py b/samcli/cli/options.py --- a/samcli/cli/options.py +++ b/samcli/cli/options.py @@ -41,7 +41,6 @@ return click.option('--region', expose_value=False, help='Set the AWS Region of the service (e.g. us-east-1).', - default='us-east-1', callback=callback)(f)
{"golden_diff": "diff --git a/samcli/__init__.py b/samcli/__init__.py\n--- a/samcli/__init__.py\n+++ b/samcli/__init__.py\n@@ -2,4 +2,4 @@\n SAM CLI version\n \"\"\"\n \n-__version__ = '0.8.0'\n+__version__ = '0.8.1'\ndiff --git a/samcli/cli/options.py b/samcli/cli/options.py\n--- a/samcli/cli/options.py\n+++ b/samcli/cli/options.py\n@@ -41,7 +41,6 @@\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n- default='us-east-1',\n callback=callback)(f)\n", "issue": "Region from Env Vars or profile are not respected for ALL commands but package and deploy\nThe region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container.\r\n\r\nCurrent workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services.\r\n\r\nFix is in #811\n", "before_files": [{"content": "\"\"\"\nThis file contains common CLI options common to all commands. As we add more commands, this will\nbecome a repository of options that other commands could use when needed.\n\"\"\"\n\nimport click\n\nfrom .context import Context\n\n\ndef debug_option(f):\n \"\"\"\n Configures --debug option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.debug = value\n return value\n\n return click.option('--debug',\n expose_value=False,\n is_flag=True,\n envvar=\"SAM_DEBUG\",\n help='Turn on debug logging to print debug message generated by SAM CLI.',\n callback=callback)(f)\n\n\ndef region_option(f):\n \"\"\"\n Configures --region option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n default='us-east-1',\n callback=callback)(f)\n\n\ndef profile_option(f):\n \"\"\"\n Configures --profile option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.profile = value\n return value\n\n return click.option('--profile',\n expose_value=False,\n help='Select a specific profile from your credential file to get AWS credentials.',\n callback=callback)(f)\n", "path": "samcli/cli/options.py"}, {"content": "\"\"\"\nSAM CLI version\n\"\"\"\n\n__version__ = '0.8.0'\n", "path": "samcli/__init__.py"}]}
1,200
177
gh_patches_debug_29338
rasdani/github-patches
git_diff
enthought__chaco-498
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bar_plot_stacked example unfinished? **Problem Description** The example in https://github.com/enthought/chaco/blob/master/examples/demo/basic/bar_plot_stacked.py doesn't do any stacking. **Expected behavior:** I the bars were really stacked, I would expect the sum of all bars to reach (10+5+2) * array([1,2,3,4,5]) (the sum of all values) respectively. Instead, I am getting the following: ![Screen Shot 2019-05-31 at 10 19 28 AM](https://user-images.githubusercontent.com/593945/58716239-31772f80-838e-11e9-84a8-9a2588263a15.png) Looking at the code, it doesn't use the bar plot's `starting_value` as expected, so the demo doesn't even seem to try to do the right thing. </issue> <code> [start of examples/demo/basic/bar_plot_stacked.py] 1 """ 2 Simple example of a stacked bar chart 3 """ 4 5 # Major library imports 6 import numpy 7 8 # Enthought library imports 9 from enable.api import ComponentEditor 10 from traits.api import HasTraits, Instance 11 from traitsui.api import UItem, View 12 13 # Chaco imports 14 from chaco.api import LabelAxis, Plot, ArrayPlotData 15 16 class PlotExample(HasTraits): 17 plot = Instance(Plot) 18 traits_view = View(UItem('plot', editor=ComponentEditor()), 19 width=400, height=400, resizable=True, 20 ) 21 22 def __init__(self, index, series_a, series_b, series_c, **kw): 23 super(PlotExample, self).__init__(**kw) 24 25 plot_data = ArrayPlotData(index=index) 26 plot_data.set_data('series_a', series_a) 27 plot_data.set_data('series_b', series_b) 28 plot_data.set_data('series_c', series_c) 29 self.plot = Plot(plot_data) 30 self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto') 31 self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto') 32 self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto') 33 34 # set the plot's value range to 0, otherwise it may pad too much 35 self.plot.value_range.low = 0 36 37 # replace the index values with some nicer labels 38 label_axis = LabelAxis(self.plot, orientation='bottom', 39 title='Months', 40 positions = list(range(1, 10)), 41 labels = ['jan', 'feb', 'march', 'april', 'may'], 42 small_haxis_style=True) 43 44 self.plot.underlays.remove(self.plot.index_axis) 45 self.plot.index_axis = label_axis 46 self.plot.underlays.append(label_axis) 47 48 49 index = numpy.array([1,2,3,4,5]) 50 demo = PlotExample(index, index*10, index*5, index*2) 51 52 if __name__ == "__main__": 53 demo.configure_traits() 54 [end of examples/demo/basic/bar_plot_stacked.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/demo/basic/bar_plot_stacked.py b/examples/demo/basic/bar_plot_stacked.py --- a/examples/demo/basic/bar_plot_stacked.py +++ b/examples/demo/basic/bar_plot_stacked.py @@ -11,7 +11,7 @@ from traitsui.api import UItem, View # Chaco imports -from chaco.api import LabelAxis, Plot, ArrayPlotData +from chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource class PlotExample(HasTraits): plot = Instance(Plot) @@ -22,14 +22,18 @@ def __init__(self, index, series_a, series_b, series_c, **kw): super(PlotExample, self).__init__(**kw) + # Stack them up + series_c = series_c + series_b + series_a + series_b = series_b + series_a + plot_data = ArrayPlotData(index=index) plot_data.set_data('series_a', series_a) plot_data.set_data('series_b', series_b) plot_data.set_data('series_c', series_c) self.plot = Plot(plot_data) self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto') - self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto') - self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto') + self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a)) + self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b)) # set the plot's value range to 0, otherwise it may pad too much self.plot.value_range.low = 0
{"golden_diff": "diff --git a/examples/demo/basic/bar_plot_stacked.py b/examples/demo/basic/bar_plot_stacked.py\n--- a/examples/demo/basic/bar_plot_stacked.py\n+++ b/examples/demo/basic/bar_plot_stacked.py\n@@ -11,7 +11,7 @@\n from traitsui.api import UItem, View\n \n # Chaco imports\n-from chaco.api import LabelAxis, Plot, ArrayPlotData\n+from chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource\n \n class PlotExample(HasTraits):\n plot = Instance(Plot)\n@@ -22,14 +22,18 @@\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n \n+ # Stack them up\n+ series_c = series_c + series_b + series_a\n+ series_b = series_b + series_a\n+\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n- self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')\n- self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')\n+ self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))\n+ self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))\n \n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n", "issue": "bar_plot_stacked example unfinished?\n**Problem Description**\r\nThe example in https://github.com/enthought/chaco/blob/master/examples/demo/basic/bar_plot_stacked.py\r\ndoesn't do any stacking.\r\n\r\n**Expected behavior:**\r\nI the bars were really stacked, I would expect the sum of all bars to reach (10+5+2) * array([1,2,3,4,5]) (the sum of all values) respectively. Instead, I am getting the following:\r\n![Screen Shot 2019-05-31 at 10 19 28 AM](https://user-images.githubusercontent.com/593945/58716239-31772f80-838e-11e9-84a8-9a2588263a15.png)\r\n\r\nLooking at the code, it doesn't use the bar plot's `starting_value` as expected, so the demo doesn't even seem to try to do the right thing.\r\n\n", "before_files": [{"content": "\"\"\"\nSimple example of a stacked bar chart\n\"\"\"\n\n# Major library imports\nimport numpy\n\n# Enthought library imports\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import UItem, View\n\n# Chaco imports\nfrom chaco.api import LabelAxis, Plot, ArrayPlotData\n\nclass PlotExample(HasTraits):\n plot = Instance(Plot)\n traits_view = View(UItem('plot', editor=ComponentEditor()),\n width=400, height=400, resizable=True, \n )\n\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')\n\n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n\n # replace the index values with some nicer labels\n label_axis = LabelAxis(self.plot, orientation='bottom',\n title='Months',\n positions = list(range(1, 10)),\n labels = ['jan', 'feb', 'march', 'april', 'may'],\n small_haxis_style=True)\n\n self.plot.underlays.remove(self.plot.index_axis)\n self.plot.index_axis = label_axis\n self.plot.underlays.append(label_axis)\n\n\nindex = numpy.array([1,2,3,4,5])\ndemo = PlotExample(index, index*10, index*5, index*2)\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/demo/basic/bar_plot_stacked.py"}]}
1,335
434
gh_patches_debug_44
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6179
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Retirer les dernier restes de Travis **Description du bug** J'ai l'impression qu'il reste quelques miettes de Travis : * https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py * https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66 **Comportement attendu** A priori, on ne se sert plus de Travis, donc tout ça devrait disparaître. </issue> <code> [start of zds/settings/travis_fixture.py] 1 from .ci_test import * 2 3 LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = { 4 "level": "INFO", 5 "handlers": ["console"], 6 } 7 [end of zds/settings/travis_fixture.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py deleted file mode 100644 --- a/zds/settings/travis_fixture.py +++ /dev/null @@ -1,6 +0,0 @@ -from .ci_test import * - -LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = { - "level": "INFO", - "handlers": ["console"], -}
{"golden_diff": "diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py\ndeleted file mode 100644\n--- a/zds/settings/travis_fixture.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-from .ci_test import *\n-\n-LOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n- \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n-}\n", "issue": "Retirer les dernier restes de Travis\n**Description du bug**\r\n\r\nJ'ai l'impression qu'il reste quelques miettes de Travis :\r\n\r\n* https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py\r\n* https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66\r\n\r\n**Comportement attendu**\r\n\r\nA priori, on ne se sert plus de Travis, donc tout \u00e7a devrait dispara\u00eetre.\r\n\n", "before_files": [{"content": "from .ci_test import *\n\nLOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n}\n", "path": "zds/settings/travis_fixture.py"}]}
742
103
gh_patches_debug_5603
rasdani/github-patches
git_diff
netbox-community__netbox-15611
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Empty search entries are being created for device asset tags ### Deployment Type NetBox Cloud ### NetBox Version v3.7.4 ### Python Version 3.11 ### Steps to Reproduce 1. Create a new device and note its database ID 2. In the NetBox shell, inspect all search entries associated with it: ```python ct = ContentType.objects.get_for_model(Device) device_id = 107 entries = CachedValue.objects.filter(object_type=ct, object_id=device_id) for entry in entries: print(f'{entry.field}: {entry.value}') ``` ### Expected Behavior Only fields which have a meaningful value set should have search entries created. ### Observed Behavior After creating a device with a description, I see three entries for it: ``` asset_tag: None name: device1 description: asdasdasd ``` The value of `asset_tag` is null. </issue> <code> [start of netbox/netbox/search/__init__.py] 1 from collections import namedtuple 2 3 from django.db import models 4 5 from ipam.fields import IPAddressField, IPNetworkField 6 from netbox.registry import registry 7 8 ObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value')) 9 10 11 class FieldTypes: 12 FLOAT = 'float' 13 INTEGER = 'int' 14 STRING = 'str' 15 INET = 'inet' 16 CIDR = 'cidr' 17 18 19 class LookupTypes: 20 PARTIAL = 'icontains' 21 EXACT = 'iexact' 22 STARTSWITH = 'istartswith' 23 ENDSWITH = 'iendswith' 24 REGEX = 'iregex' 25 26 27 class SearchIndex: 28 """ 29 Base class for building search indexes. 30 31 Attributes: 32 model: The model class for which this index is used. 33 category: The label of the group under which this indexer is categorized (for form field display). If none, 34 the name of the model's app will be used. 35 fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each. 36 display_attrs: An iterable of additional object attributes to include when displaying search results. 37 """ 38 model = None 39 category = None 40 fields = () 41 display_attrs = () 42 43 @staticmethod 44 def get_field_type(instance, field_name): 45 """ 46 Return the data type of the specified model field. 47 """ 48 field_cls = instance._meta.get_field(field_name).__class__ 49 if issubclass(field_cls, (models.FloatField, models.DecimalField)): 50 return FieldTypes.FLOAT 51 if issubclass(field_cls, IPAddressField): 52 return FieldTypes.INET 53 if issubclass(field_cls, IPNetworkField): 54 return FieldTypes.CIDR 55 if issubclass(field_cls, models.IntegerField): 56 return FieldTypes.INTEGER 57 return FieldTypes.STRING 58 59 @staticmethod 60 def get_field_value(instance, field_name): 61 """ 62 Return the value of the specified model field as a string. 63 """ 64 return str(getattr(instance, field_name)) 65 66 @classmethod 67 def get_category(cls): 68 return cls.category or cls.model._meta.app_config.verbose_name 69 70 @classmethod 71 def to_cache(cls, instance, custom_fields=None): 72 """ 73 Return a list of ObjectFieldValue representing the instance fields to be cached. 74 75 Args: 76 instance: The instance being cached. 77 custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields 78 defined for the model will be included. (This can also be provided during bulk caching to avoid looking 79 up the available custom fields for each instance.) 80 """ 81 values = [] 82 83 # Capture built-in fields 84 for name, weight in cls.fields: 85 type_ = cls.get_field_type(instance, name) 86 value = cls.get_field_value(instance, name) 87 if type_ and value: 88 values.append( 89 ObjectFieldValue(name, type_, weight, value) 90 ) 91 92 # Capture custom fields 93 if getattr(instance, 'custom_field_data', None): 94 if custom_fields is None: 95 custom_fields = instance.custom_fields 96 for cf in custom_fields: 97 type_ = cf.search_type 98 value = instance.custom_field_data.get(cf.name) 99 weight = cf.search_weight 100 if type_ and value and weight: 101 values.append( 102 ObjectFieldValue(f'cf_{cf.name}', type_, weight, value) 103 ) 104 105 return values 106 107 108 def get_indexer(model): 109 """ 110 Get the SearchIndex class for the given model. 111 """ 112 label = f'{model._meta.app_label}.{model._meta.model_name}' 113 114 return registry['search'][label] 115 116 117 def register_search(cls): 118 """ 119 Decorator for registering a SearchIndex class. 120 """ 121 model = cls.model 122 label = f'{model._meta.app_label}.{model._meta.model_name}' 123 registry['search'][label] = cls 124 125 return cls 126 [end of netbox/netbox/search/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/netbox/search/__init__.py b/netbox/netbox/search/__init__.py --- a/netbox/netbox/search/__init__.py +++ b/netbox/netbox/search/__init__.py @@ -59,9 +59,10 @@ @staticmethod def get_field_value(instance, field_name): """ - Return the value of the specified model field as a string. + Return the value of the specified model field as a string (or None). """ - return str(getattr(instance, field_name)) + if value := getattr(instance, field_name): + return str(value) @classmethod def get_category(cls):
{"golden_diff": "diff --git a/netbox/netbox/search/__init__.py b/netbox/netbox/search/__init__.py\n--- a/netbox/netbox/search/__init__.py\n+++ b/netbox/netbox/search/__init__.py\n@@ -59,9 +59,10 @@\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n- Return the value of the specified model field as a string.\n+ Return the value of the specified model field as a string (or None).\n \"\"\"\n- return str(getattr(instance, field_name))\n+ if value := getattr(instance, field_name):\n+ return str(value)\n \n @classmethod\n def get_category(cls):\n", "issue": "Empty search entries are being created for device asset tags\n### Deployment Type\n\nNetBox Cloud\n\n### NetBox Version\n\nv3.7.4\n\n### Python Version\n\n3.11\n\n### Steps to Reproduce\n\n1. Create a new device and note its database ID\r\n2. In the NetBox shell, inspect all search entries associated with it:\r\n\r\n```python\r\nct = ContentType.objects.get_for_model(Device)\r\ndevice_id = 107\r\nentries = CachedValue.objects.filter(object_type=ct, object_id=device_id)\r\nfor entry in entries:\r\n print(f'{entry.field}: {entry.value}')\r\n```\n\n### Expected Behavior\n\nOnly fields which have a meaningful value set should have search entries created.\n\n### Observed Behavior\n\nAfter creating a device with a description, I see three entries for it:\r\n\r\n```\r\nasset_tag: None\r\nname: device1\r\ndescription: asdasdasd\r\n```\r\n\r\nThe value of `asset_tag` is null.\n", "before_files": [{"content": "from collections import namedtuple\n\nfrom django.db import models\n\nfrom ipam.fields import IPAddressField, IPNetworkField\nfrom netbox.registry import registry\n\nObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))\n\n\nclass FieldTypes:\n FLOAT = 'float'\n INTEGER = 'int'\n STRING = 'str'\n INET = 'inet'\n CIDR = 'cidr'\n\n\nclass LookupTypes:\n PARTIAL = 'icontains'\n EXACT = 'iexact'\n STARTSWITH = 'istartswith'\n ENDSWITH = 'iendswith'\n REGEX = 'iregex'\n\n\nclass SearchIndex:\n \"\"\"\n Base class for building search indexes.\n\n Attributes:\n model: The model class for which this index is used.\n category: The label of the group under which this indexer is categorized (for form field display). If none,\n the name of the model's app will be used.\n fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.\n display_attrs: An iterable of additional object attributes to include when displaying search results.\n \"\"\"\n model = None\n category = None\n fields = ()\n display_attrs = ()\n\n @staticmethod\n def get_field_type(instance, field_name):\n \"\"\"\n Return the data type of the specified model field.\n \"\"\"\n field_cls = instance._meta.get_field(field_name).__class__\n if issubclass(field_cls, (models.FloatField, models.DecimalField)):\n return FieldTypes.FLOAT\n if issubclass(field_cls, IPAddressField):\n return FieldTypes.INET\n if issubclass(field_cls, IPNetworkField):\n return FieldTypes.CIDR\n if issubclass(field_cls, models.IntegerField):\n return FieldTypes.INTEGER\n return FieldTypes.STRING\n\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n Return the value of the specified model field as a string.\n \"\"\"\n return str(getattr(instance, field_name))\n\n @classmethod\n def get_category(cls):\n return cls.category or cls.model._meta.app_config.verbose_name\n\n @classmethod\n def to_cache(cls, instance, custom_fields=None):\n \"\"\"\n Return a list of ObjectFieldValue representing the instance fields to be cached.\n\n Args:\n instance: The instance being cached.\n custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields\n defined for the model will be included. (This can also be provided during bulk caching to avoid looking\n up the available custom fields for each instance.)\n \"\"\"\n values = []\n\n # Capture built-in fields\n for name, weight in cls.fields:\n type_ = cls.get_field_type(instance, name)\n value = cls.get_field_value(instance, name)\n if type_ and value:\n values.append(\n ObjectFieldValue(name, type_, weight, value)\n )\n\n # Capture custom fields\n if getattr(instance, 'custom_field_data', None):\n if custom_fields is None:\n custom_fields = instance.custom_fields\n for cf in custom_fields:\n type_ = cf.search_type\n value = instance.custom_field_data.get(cf.name)\n weight = cf.search_weight\n if type_ and value and weight:\n values.append(\n ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)\n )\n\n return values\n\n\ndef get_indexer(model):\n \"\"\"\n Get the SearchIndex class for the given model.\n \"\"\"\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n\n return registry['search'][label]\n\n\ndef register_search(cls):\n \"\"\"\n Decorator for registering a SearchIndex class.\n \"\"\"\n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls\n", "path": "netbox/netbox/search/__init__.py"}]}
1,854
150
gh_patches_debug_11540
rasdani/github-patches
git_diff
plotly__dash-1493
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] dash doesn't get imported when a file named "org.py", "dash.py", or "test.py" with specific content is present in the current directory // "AttributeError: module 'dash' has no attribute 'Dash'" **Describe your context** ``` dash (1.9.1) dash-core-components (1.8.1) dash-html-components (1.0.2) dash-renderer (1.2.4) dash-table (4.6.1) ``` **Describe the bug** If a file named ``org.py`` is present in the current directory with the following content: ``` import dash_core_components as dcc ``` then dash doesn't import and I get the following message: ``` >>> import dash Dash was not successfully imported. Make sure you don't have a file named 'dash.py' in your current directory. ``` **Expected behavior** dash should import without any error. **Additional info** - The org.py is never imported - If I rename the file to a different name dash get imported without any problem. - The problem is shown also with ``import dash_html_components as html``` - The problem is shown either on Windows and in Linux - Tested with python3.4, python3.6, python3.8 **Steps to replicate the problem on Linux** ``` $ mkdir mytest $ cd mytest $ echo "import dash_core_components as dcc" > org.py $ python3 -m venv venv $ . venv/bin/activate (venv) $ pip install dash (venv) $ python Python 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import dash Dash was not successfully imported. Make sure you don't have a file named 'dash.py' in your current directory. (venv) $ ``` if I rename the file the import works: ``` (venv) $ mv org.py othername.py (venv) $ python Python 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import dash >>> ``` </issue> <code> [start of dash/__init__.py] 1 from .dash import Dash, no_update # noqa: F401 2 from . import dependencies # noqa: F401 3 from . import development # noqa: F401 4 from . import exceptions # noqa: F401 5 from . import resources # noqa: F401 6 from .version import __version__ # noqa: F401 7 from ._callback_context import callback_context # noqa: F401 8 [end of dash/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dash/__init__.py b/dash/__init__.py --- a/dash/__init__.py +++ b/dash/__init__.py @@ -1,7 +1,11 @@ -from .dash import Dash, no_update # noqa: F401 -from . import dependencies # noqa: F401 -from . import development # noqa: F401 -from . import exceptions # noqa: F401 -from . import resources # noqa: F401 -from .version import __version__ # noqa: F401 -from ._callback_context import callback_context # noqa: F401 +# pylint: disable=C0413 +# __plotly_dash is for the "make sure you don't have a dash.py" check +# must come before any other imports. +__plotly_dash = True +from .dash import Dash, no_update # noqa: F401,E402 +from . import dependencies # noqa: F401,E402 +from . import development # noqa: F401,E402 +from . import exceptions # noqa: F401,E402 +from . import resources # noqa: F401,E402 +from .version import __version__ # noqa: F401,E402 +from ._callback_context import callback_context # noqa: F401,E402
{"golden_diff": "diff --git a/dash/__init__.py b/dash/__init__.py\n--- a/dash/__init__.py\n+++ b/dash/__init__.py\n@@ -1,7 +1,11 @@\n-from .dash import Dash, no_update # noqa: F401\n-from . import dependencies # noqa: F401\n-from . import development # noqa: F401\n-from . import exceptions # noqa: F401\n-from . import resources # noqa: F401\n-from .version import __version__ # noqa: F401\n-from ._callback_context import callback_context # noqa: F401\n+# pylint: disable=C0413\n+# __plotly_dash is for the \"make sure you don't have a dash.py\" check\n+# must come before any other imports.\n+__plotly_dash = True\n+from .dash import Dash, no_update # noqa: F401,E402\n+from . import dependencies # noqa: F401,E402\n+from . import development # noqa: F401,E402\n+from . import exceptions # noqa: F401,E402\n+from . import resources # noqa: F401,E402\n+from .version import __version__ # noqa: F401,E402\n+from ._callback_context import callback_context # noqa: F401,E402\n", "issue": "[BUG] dash doesn't get imported when a file named \"org.py\", \"dash.py\", or \"test.py\" with specific content is present in the current directory // \"AttributeError: module 'dash' has no attribute 'Dash'\"\n**Describe your context**\r\n\r\n```\r\ndash (1.9.1)\r\ndash-core-components (1.8.1)\r\ndash-html-components (1.0.2)\r\ndash-renderer (1.2.4)\r\ndash-table (4.6.1)\r\n\r\n```\r\n\r\n**Describe the bug**\r\n\r\nIf a file named ``org.py`` is present in the current directory with the following content:\r\n\r\n```\r\nimport dash_core_components as dcc\r\n```\r\n\r\nthen dash doesn't import and I get the following message:\r\n```\r\n>>> import dash\r\nDash was not successfully imported. Make sure you don't have a file named\r\n'dash.py' in your current directory.\r\n```\r\n\r\n**Expected behavior**\r\ndash should import without any error.\r\n\r\n**Additional info**\r\n- The org.py is never imported\r\n- If I rename the file to a different name dash get imported without any problem.\r\n- The problem is shown also with ``import dash_html_components as html```\r\n- The problem is shown either on Windows and in Linux\r\n- Tested with python3.4, python3.6, python3.8\r\n\r\n**Steps to replicate the problem on Linux**\r\n```\r\n$ mkdir mytest\r\n$ cd mytest\r\n$ echo \"import dash_core_components as dcc\" > org.py\r\n$ python3 -m venv venv\r\n$ . venv/bin/activate\r\n(venv) $ pip install dash\r\n(venv) $ python\r\nPython 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import dash\r\nDash was not successfully imported. Make sure you don't have a file named\r\n'dash.py' in your current directory.\r\n(venv) $\r\n```\r\n\r\nif I rename the file the import works:\r\n```\r\n(venv) $ mv org.py othername.py\r\n(venv) $ python\r\nPython 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import dash\r\n>>>\r\n```\r\n\n", "before_files": [{"content": "from .dash import Dash, no_update # noqa: F401\nfrom . import dependencies # noqa: F401\nfrom . import development # noqa: F401\nfrom . import exceptions # noqa: F401\nfrom . import resources # noqa: F401\nfrom .version import __version__ # noqa: F401\nfrom ._callback_context import callback_context # noqa: F401\n", "path": "dash/__init__.py"}]}
1,170
332
gh_patches_debug_25606
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-521
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: all_members_are_administrators fails I don;t know if telegram api changed but the parameter to tell if all administrators in a group are admin has changed: to `all_members_are_administrators` Chat's fail to update with this parameter ### Steps to reproduce 1. Create a group with "all members are administrators enabled 2. Add a bot to it 3. send the bot a message 4. ` assert print(update.message.chat.all_members_are_admins)==True` ### Expected behaviour It should pass the assert ### Actual behaviour This failes the assert ### Way to fix rename to `all_members_are_administrators` </issue> <code> [start of telegram/chat.py] 1 #!/usr/bin/env python 2 # pylint: disable=C0103,W0622 3 # 4 # A library that provides a Python interface to the Telegram Bot API 5 # Copyright (C) 2015-2016 6 # Leandro Toledo de Souza <[email protected]> 7 # 8 # This program is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU Lesser Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU Lesser Public License for more details. 17 # 18 # You should have received a copy of the GNU Lesser Public License 19 # along with this program. If not, see [http://www.gnu.org/licenses/]. 20 """This module contains an object that represents a Telegram Chat.""" 21 22 from telegram import TelegramObject 23 24 25 class Chat(TelegramObject): 26 """This object represents a Telegram Chat. 27 28 Attributes: 29 id (int): 30 type (str): Can be 'private', 'group', 'supergroup' or 'channel' 31 title (str): Title, for channels and group chats 32 username (str): Username, for private chats and channels if available 33 first_name (str): First name of the other party in a private chat 34 last_name (str): Last name of the other party in a private chat 35 all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled. 36 37 Args: 38 id (int): 39 type (str): 40 title (Optional[str]): 41 username(Optional[str]): 42 first_name(Optional[str]): 43 last_name(Optional[str]): 44 bot (Optional[Bot]): The Bot to use for instance methods 45 **kwargs (dict): Arbitrary keyword arguments. 46 47 """ 48 PRIVATE = 'private' 49 GROUP = 'group' 50 SUPERGROUP = 'supergroup' 51 CHANNEL = 'channel' 52 53 def __init__(self, 54 id, 55 type, 56 title='', 57 username='', 58 first_name='', 59 last_name='', 60 all_members_are_admins=False, 61 bot=None, 62 **kwargs): 63 # Required 64 self.id = int(id) 65 self.type = type 66 # Optionals 67 self.title = title 68 self.username = username 69 self.first_name = first_name 70 self.last_name = last_name 71 self.all_members_are_admins = all_members_are_admins 72 73 self.bot = bot 74 75 @staticmethod 76 def de_json(data, bot): 77 """ 78 Args: 79 data (dict): 80 bot (telegram.Bot): 81 82 Returns: 83 telegram.Chat: 84 """ 85 if not data: 86 return None 87 88 return Chat(bot=bot, **data) 89 90 def send_action(self, *args, **kwargs): 91 """Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``""" 92 return self.bot.sendChatAction(self.id, *args, **kwargs) 93 94 def leave(self, *args, **kwargs): 95 """Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``""" 96 return self.bot.leaveChat(self.id, *args, **kwargs) 97 98 def get_administrators(self, *args, **kwargs): 99 """Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``""" 100 return self.bot.getChatAdministrators(self.id, *args, **kwargs) 101 102 def get_members_count(self, *args, **kwargs): 103 """Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``""" 104 return self.bot.getChatMembersCount(self.id, *args, **kwargs) 105 106 def get_member(self, *args, **kwargs): 107 """Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``""" 108 return self.bot.getChatMember(self.id, *args, **kwargs) 109 110 def kick_member(self, *args, **kwargs): 111 """Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``""" 112 return self.bot.kickChatMember(self.id, *args, **kwargs) 113 114 def unban_member(self, *args, **kwargs): 115 """Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``""" 116 return self.bot.unbanChatMember(self.id, *args, **kwargs) 117 [end of telegram/chat.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/chat.py b/telegram/chat.py --- a/telegram/chat.py +++ b/telegram/chat.py @@ -32,7 +32,7 @@ username (str): Username, for private chats and channels if available first_name (str): First name of the other party in a private chat last_name (str): Last name of the other party in a private chat - all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled. + all_members_are_administrators (bool): True if group has 'All Members Are Administrators' Args: id (int): @@ -57,7 +57,7 @@ username='', first_name='', last_name='', - all_members_are_admins=False, + all_members_are_administrators=False, bot=None, **kwargs): # Required @@ -68,7 +68,7 @@ self.username = username self.first_name = first_name self.last_name = last_name - self.all_members_are_admins = all_members_are_admins + self.all_members_are_administrators = all_members_are_administrators self.bot = bot
{"golden_diff": "diff --git a/telegram/chat.py b/telegram/chat.py\n--- a/telegram/chat.py\n+++ b/telegram/chat.py\n@@ -32,7 +32,7 @@\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n- all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.\n+ all_members_are_administrators (bool): True if group has 'All Members Are Administrators'\n \n Args:\n id (int):\n@@ -57,7 +57,7 @@\n username='',\n first_name='',\n last_name='',\n- all_members_are_admins=False,\n+ all_members_are_administrators=False,\n bot=None,\n **kwargs):\n # Required\n@@ -68,7 +68,7 @@\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n- self.all_members_are_admins = all_members_are_admins\n+ self.all_members_are_administrators = all_members_are_administrators\n \n self.bot = bot\n", "issue": "BUG: all_members_are_administrators fails\nI don;t know if telegram api changed but the parameter to tell if all administrators in a group are admin has changed: to `all_members_are_administrators` Chat's fail to update with this parameter\r\n\r\n### Steps to reproduce\r\n1. Create a group with \"all members are administrators enabled\r\n2. Add a bot to it\r\n3. send the bot a message\r\n4. ` assert print(update.message.chat.all_members_are_admins)==True`\r\n\r\n### Expected behaviour\r\nIt should pass the assert\r\n\r\n### Actual behaviour\r\nThis failes the assert\r\n\r\n### Way to fix\r\nrename to `all_members_are_administrators`\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram Chat.\"\"\"\n\nfrom telegram import TelegramObject\n\n\nclass Chat(TelegramObject):\n \"\"\"This object represents a Telegram Chat.\n\n Attributes:\n id (int):\n type (str): Can be 'private', 'group', 'supergroup' or 'channel'\n title (str): Title, for channels and group chats\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.\n\n Args:\n id (int):\n type (str):\n title (Optional[str]):\n username(Optional[str]):\n first_name(Optional[str]):\n last_name(Optional[str]):\n bot (Optional[Bot]): The Bot to use for instance methods\n **kwargs (dict): Arbitrary keyword arguments.\n\n \"\"\"\n PRIVATE = 'private'\n GROUP = 'group'\n SUPERGROUP = 'supergroup'\n CHANNEL = 'channel'\n\n def __init__(self,\n id,\n type,\n title='',\n username='',\n first_name='',\n last_name='',\n all_members_are_admins=False,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.type = type\n # Optionals\n self.title = title\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.all_members_are_admins = all_members_are_admins\n\n self.bot = bot\n\n @staticmethod\n def de_json(data, bot):\n \"\"\"\n Args:\n data (dict):\n bot (telegram.Bot):\n\n Returns:\n telegram.Chat:\n \"\"\"\n if not data:\n return None\n\n return Chat(bot=bot, **data)\n\n def send_action(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.sendChatAction(self.id, *args, **kwargs)\n\n def leave(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.leaveChat(self.id, *args, **kwargs)\n\n def get_administrators(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatAdministrators(self.id, *args, **kwargs)\n\n def get_members_count(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMembersCount(self.id, *args, **kwargs)\n\n def get_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMember(self.id, *args, **kwargs)\n\n def kick_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.kickChatMember(self.id, *args, **kwargs)\n\n def unban_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.unbanChatMember(self.id, *args, **kwargs)\n", "path": "telegram/chat.py"}]}
1,910
271
gh_patches_debug_14246
rasdani/github-patches
git_diff
pyca__cryptography-1554
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> raises_unsupported_algorithm gives very unhelpful errors When the error tag is wrong you get errors along the lines of `assert <object object at 0xf0000000> is not <object object as 0xb0000000>`. This is not very helpful, it's not even particularly obvious that the error tag is actually what's wrong until you go and read the code. Should probably generate a useful error message or somehow give the tag objects a more useful `repr` output. </issue> <code> [start of src/cryptography/exceptions.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 8 class _Reasons(object): 9 BACKEND_MISSING_INTERFACE = object() 10 UNSUPPORTED_HASH = object() 11 UNSUPPORTED_CIPHER = object() 12 UNSUPPORTED_PADDING = object() 13 UNSUPPORTED_MGF = object() 14 UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object() 15 UNSUPPORTED_ELLIPTIC_CURVE = object() 16 UNSUPPORTED_SERIALIZATION = object() 17 UNSUPPORTED_X509 = object() 18 19 20 class UnsupportedAlgorithm(Exception): 21 def __init__(self, message, reason=None): 22 super(UnsupportedAlgorithm, self).__init__(message) 23 self._reason = reason 24 25 26 class AlreadyFinalized(Exception): 27 pass 28 29 30 class AlreadyUpdated(Exception): 31 pass 32 33 34 class NotYetFinalized(Exception): 35 pass 36 37 38 class InvalidTag(Exception): 39 pass 40 41 42 class InvalidSignature(Exception): 43 pass 44 45 46 class InternalError(Exception): 47 pass 48 49 50 class InvalidKey(Exception): 51 pass 52 53 54 class InvalidToken(Exception): 55 pass 56 [end of src/cryptography/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cryptography/exceptions.py b/src/cryptography/exceptions.py --- a/src/cryptography/exceptions.py +++ b/src/cryptography/exceptions.py @@ -4,17 +4,19 @@ from __future__ import absolute_import, division, print_function - -class _Reasons(object): - BACKEND_MISSING_INTERFACE = object() - UNSUPPORTED_HASH = object() - UNSUPPORTED_CIPHER = object() - UNSUPPORTED_PADDING = object() - UNSUPPORTED_MGF = object() - UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object() - UNSUPPORTED_ELLIPTIC_CURVE = object() - UNSUPPORTED_SERIALIZATION = object() - UNSUPPORTED_X509 = object() +from enum import Enum + + +class _Reasons(Enum): + BACKEND_MISSING_INTERFACE = 0 + UNSUPPORTED_HASH = 1 + UNSUPPORTED_CIPHER = 2 + UNSUPPORTED_PADDING = 3 + UNSUPPORTED_MGF = 4 + UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5 + UNSUPPORTED_ELLIPTIC_CURVE = 6 + UNSUPPORTED_SERIALIZATION = 7 + UNSUPPORTED_X509 = 8 class UnsupportedAlgorithm(Exception):
{"golden_diff": "diff --git a/src/cryptography/exceptions.py b/src/cryptography/exceptions.py\n--- a/src/cryptography/exceptions.py\n+++ b/src/cryptography/exceptions.py\n@@ -4,17 +4,19 @@\n \n from __future__ import absolute_import, division, print_function\n \n-\n-class _Reasons(object):\n- BACKEND_MISSING_INTERFACE = object()\n- UNSUPPORTED_HASH = object()\n- UNSUPPORTED_CIPHER = object()\n- UNSUPPORTED_PADDING = object()\n- UNSUPPORTED_MGF = object()\n- UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()\n- UNSUPPORTED_ELLIPTIC_CURVE = object()\n- UNSUPPORTED_SERIALIZATION = object()\n- UNSUPPORTED_X509 = object()\n+from enum import Enum\n+\n+\n+class _Reasons(Enum):\n+ BACKEND_MISSING_INTERFACE = 0\n+ UNSUPPORTED_HASH = 1\n+ UNSUPPORTED_CIPHER = 2\n+ UNSUPPORTED_PADDING = 3\n+ UNSUPPORTED_MGF = 4\n+ UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5\n+ UNSUPPORTED_ELLIPTIC_CURVE = 6\n+ UNSUPPORTED_SERIALIZATION = 7\n+ UNSUPPORTED_X509 = 8\n \n \n class UnsupportedAlgorithm(Exception):\n", "issue": "raises_unsupported_algorithm gives very unhelpful errors\nWhen the error tag is wrong you get errors along the lines of `assert <object object at 0xf0000000> is not <object object as 0xb0000000>`. This is not very helpful, it's not even particularly obvious that the error tag is actually what's wrong until you go and read the code.\n\nShould probably generate a useful error message or somehow give the tag objects a more useful `repr` output.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n\nclass _Reasons(object):\n BACKEND_MISSING_INTERFACE = object()\n UNSUPPORTED_HASH = object()\n UNSUPPORTED_CIPHER = object()\n UNSUPPORTED_PADDING = object()\n UNSUPPORTED_MGF = object()\n UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()\n UNSUPPORTED_ELLIPTIC_CURVE = object()\n UNSUPPORTED_SERIALIZATION = object()\n UNSUPPORTED_X509 = object()\n\n\nclass UnsupportedAlgorithm(Exception):\n def __init__(self, message, reason=None):\n super(UnsupportedAlgorithm, self).__init__(message)\n self._reason = reason\n\n\nclass AlreadyFinalized(Exception):\n pass\n\n\nclass AlreadyUpdated(Exception):\n pass\n\n\nclass NotYetFinalized(Exception):\n pass\n\n\nclass InvalidTag(Exception):\n pass\n\n\nclass InvalidSignature(Exception):\n pass\n\n\nclass InternalError(Exception):\n pass\n\n\nclass InvalidKey(Exception):\n pass\n\n\nclass InvalidToken(Exception):\n pass\n", "path": "src/cryptography/exceptions.py"}]}
1,015
276
gh_patches_debug_3149
rasdani/github-patches
git_diff
huggingface__dataset-viewer-479
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use main instead of master to load the datasets The main branch in https://github.com/huggingface/datasets is now `main`, not `master` anymore. Note that it's backward compatible, so no need to hurry </issue> <code> [start of services/worker/src/worker/constants.py] 1 from typing import Optional 2 3 DEFAULT_ASSETS_BASE_URL: str = "assets" 4 DEFAULT_ASSETS_DIRECTORY: None = None 5 DEFAULT_DATASETS_REVISION: str = "master" 6 DEFAULT_HF_TOKEN: Optional[str] = None 7 DEFAULT_LOG_LEVEL: str = "INFO" 8 DEFAULT_MAX_JOB_RETRIES: int = 3 9 DEFAULT_MAX_JOBS_PER_DATASET: int = 1 10 DEFAULT_MAX_LOAD_PCT: int = 70 11 DEFAULT_MAX_MEMORY_PCT: int = 80 12 DEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000 13 DEFAULT_MIN_CELL_BYTES: int = 100 14 DEFAULT_MONGO_CACHE_DATABASE: str = "datasets_server_cache" 15 DEFAULT_MONGO_QUEUE_DATABASE: str = "datasets_server_queue" 16 DEFAULT_MONGO_URL: str = "mongodb://localhost:27018" 17 DEFAULT_ROWS_MAX_BYTES: int = 1_000_000 18 DEFAULT_ROWS_MAX_NUMBER: int = 100 19 DEFAULT_ROWS_MIN_NUMBER: int = 10 20 DEFAULT_WORKER_SLEEP_SECONDS: int = 15 21 DEFAULT_WORKER_QUEUE: str = "datasets" 22 [end of services/worker/src/worker/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/services/worker/src/worker/constants.py b/services/worker/src/worker/constants.py --- a/services/worker/src/worker/constants.py +++ b/services/worker/src/worker/constants.py @@ -2,7 +2,7 @@ DEFAULT_ASSETS_BASE_URL: str = "assets" DEFAULT_ASSETS_DIRECTORY: None = None -DEFAULT_DATASETS_REVISION: str = "master" +DEFAULT_DATASETS_REVISION: str = "main" DEFAULT_HF_TOKEN: Optional[str] = None DEFAULT_LOG_LEVEL: str = "INFO" DEFAULT_MAX_JOB_RETRIES: int = 3
{"golden_diff": "diff --git a/services/worker/src/worker/constants.py b/services/worker/src/worker/constants.py\n--- a/services/worker/src/worker/constants.py\n+++ b/services/worker/src/worker/constants.py\n@@ -2,7 +2,7 @@\n \n DEFAULT_ASSETS_BASE_URL: str = \"assets\"\n DEFAULT_ASSETS_DIRECTORY: None = None\n-DEFAULT_DATASETS_REVISION: str = \"master\"\n+DEFAULT_DATASETS_REVISION: str = \"main\"\n DEFAULT_HF_TOKEN: Optional[str] = None\n DEFAULT_LOG_LEVEL: str = \"INFO\"\n DEFAULT_MAX_JOB_RETRIES: int = 3\n", "issue": "Use main instead of master to load the datasets\nThe main branch in https://github.com/huggingface/datasets is now `main`, not `master` anymore. Note that it's backward compatible, so no need to hurry\n", "before_files": [{"content": "from typing import Optional\n\nDEFAULT_ASSETS_BASE_URL: str = \"assets\"\nDEFAULT_ASSETS_DIRECTORY: None = None\nDEFAULT_DATASETS_REVISION: str = \"master\"\nDEFAULT_HF_TOKEN: Optional[str] = None\nDEFAULT_LOG_LEVEL: str = \"INFO\"\nDEFAULT_MAX_JOB_RETRIES: int = 3\nDEFAULT_MAX_JOBS_PER_DATASET: int = 1\nDEFAULT_MAX_LOAD_PCT: int = 70\nDEFAULT_MAX_MEMORY_PCT: int = 80\nDEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000\nDEFAULT_MIN_CELL_BYTES: int = 100\nDEFAULT_MONGO_CACHE_DATABASE: str = \"datasets_server_cache\"\nDEFAULT_MONGO_QUEUE_DATABASE: str = \"datasets_server_queue\"\nDEFAULT_MONGO_URL: str = \"mongodb://localhost:27018\"\nDEFAULT_ROWS_MAX_BYTES: int = 1_000_000\nDEFAULT_ROWS_MAX_NUMBER: int = 100\nDEFAULT_ROWS_MIN_NUMBER: int = 10\nDEFAULT_WORKER_SLEEP_SECONDS: int = 15\nDEFAULT_WORKER_QUEUE: str = \"datasets\"\n", "path": "services/worker/src/worker/constants.py"}]}
870
133
gh_patches_debug_11689
rasdani/github-patches
git_diff
python-poetry__poetry-1140
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Poetry fails to install p4python due to read-only files <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **OS version and name**: Windows 10 - **Poetry version**: poetry 0.12.2 - **Link of a [pyproject.toml Gist](https://gist.github.com/epage/5f28e3b1e5eeb9a30697363e369a5fde) - **Link of a [backtrace Gist](https://gist.github.com/epage/2584ad981ff5d9f175d55212b0192987) ## Issue In digging into the problem, it seems that p4python's files are all marked read-only, causing windows to error when trying to delete them via `shutil.rmtree` which is invoked by poetry's custom temp directory handling. </issue> <code> [start of poetry/utils/helpers.py] 1 import os 2 import re 3 import shutil 4 import stat 5 import tempfile 6 7 from contextlib import contextmanager 8 from typing import List 9 from typing import Optional 10 11 from poetry.config.config import Config 12 from poetry.utils._compat import Path 13 from poetry.version import Version 14 15 16 try: 17 from collections.abc import Mapping 18 except ImportError: 19 from collections import Mapping 20 21 22 _canonicalize_regex = re.compile("[-_]+") 23 24 25 def canonicalize_name(name): # type: (str) -> str 26 return _canonicalize_regex.sub("-", name).lower() 27 28 29 def module_name(name): # type: (str) -> str 30 return canonicalize_name(name).replace(".", "_").replace("-", "_") 31 32 33 def normalize_version(version): # type: (str) -> str 34 return str(Version(version)) 35 36 37 @contextmanager 38 def temporary_directory(*args, **kwargs): 39 try: 40 from tempfile import TemporaryDirectory 41 42 with TemporaryDirectory(*args, **kwargs) as name: 43 yield name 44 except ImportError: 45 name = tempfile.mkdtemp(*args, **kwargs) 46 47 yield name 48 49 shutil.rmtree(name) 50 51 52 def parse_requires(requires): # type: (str) -> List[str] 53 lines = requires.split("\n") 54 55 requires_dist = [] 56 in_section = False 57 current_marker = None 58 for line in lines: 59 line = line.strip() 60 if not line: 61 if in_section: 62 in_section = False 63 64 continue 65 66 if line.startswith("["): 67 # extras or conditional dependencies 68 marker = line.lstrip("[").rstrip("]") 69 if ":" not in marker: 70 extra, marker = marker, None 71 else: 72 extra, marker = marker.split(":") 73 74 if extra: 75 if marker: 76 marker = '{} and extra == "{}"'.format(marker, extra) 77 else: 78 marker = 'extra == "{}"'.format(extra) 79 80 if marker: 81 current_marker = marker 82 83 continue 84 85 if current_marker: 86 line = "{} ; {}".format(line, current_marker) 87 88 requires_dist.append(line) 89 90 return requires_dist 91 92 93 def get_cert(config, repository_name): # type: (Config, str) -> Optional[Path] 94 cert = config.get("certificates.{}.cert".format(repository_name)) 95 if cert: 96 return Path(cert) 97 else: 98 return None 99 100 101 def get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path] 102 client_cert = config.get("certificates.{}.client-cert".format(repository_name)) 103 if client_cert: 104 return Path(client_cert) 105 else: 106 return None 107 108 109 def _on_rm_error(func, path, exc_info): 110 if not os.path.exists(path): 111 return 112 113 os.chmod(path, stat.S_IWRITE) 114 func(path) 115 116 117 def safe_rmtree(path): 118 if Path(path).is_symlink(): 119 return os.unlink(str(path)) 120 121 shutil.rmtree(path, onerror=_on_rm_error) 122 123 124 def merge_dicts(d1, d2): 125 for k, v in d2.items(): 126 if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping): 127 merge_dicts(d1[k], d2[k]) 128 else: 129 d1[k] = d2[k] 130 [end of poetry/utils/helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/poetry/utils/helpers.py b/poetry/utils/helpers.py --- a/poetry/utils/helpers.py +++ b/poetry/utils/helpers.py @@ -34,19 +34,18 @@ return str(Version(version)) +def _del_ro(action, name, exc): + os.chmod(name, stat.S_IWRITE) + os.remove(name) + + @contextmanager def temporary_directory(*args, **kwargs): - try: - from tempfile import TemporaryDirectory - - with TemporaryDirectory(*args, **kwargs) as name: - yield name - except ImportError: - name = tempfile.mkdtemp(*args, **kwargs) + name = tempfile.mkdtemp(*args, **kwargs) - yield name + yield name - shutil.rmtree(name) + shutil.rmtree(name, onerror=_del_ro) def parse_requires(requires): # type: (str) -> List[str]
{"golden_diff": "diff --git a/poetry/utils/helpers.py b/poetry/utils/helpers.py\n--- a/poetry/utils/helpers.py\n+++ b/poetry/utils/helpers.py\n@@ -34,19 +34,18 @@\n return str(Version(version))\n \n \n+def _del_ro(action, name, exc):\n+ os.chmod(name, stat.S_IWRITE)\n+ os.remove(name)\n+\n+\n @contextmanager\n def temporary_directory(*args, **kwargs):\n- try:\n- from tempfile import TemporaryDirectory\n-\n- with TemporaryDirectory(*args, **kwargs) as name:\n- yield name\n- except ImportError:\n- name = tempfile.mkdtemp(*args, **kwargs)\n+ name = tempfile.mkdtemp(*args, **kwargs)\n \n- yield name\n+ yield name\n \n- shutil.rmtree(name)\n+ shutil.rmtree(name, onerror=_del_ro)\n \n \n def parse_requires(requires): # type: (str) -> List[str]\n", "issue": "Poetry fails to install p4python due to read-only files\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Windows 10\r\n- **Poetry version**: poetry 0.12.2\r\n- **Link of a [pyproject.toml Gist](https://gist.github.com/epage/5f28e3b1e5eeb9a30697363e369a5fde)\r\n- **Link of a [backtrace Gist](https://gist.github.com/epage/2584ad981ff5d9f175d55212b0192987)\r\n\r\n## Issue\r\n\r\nIn digging into the problem, it seems that p4python's files are all marked read-only, causing windows to error when trying to delete them via `shutil.rmtree` which is invoked by poetry's custom temp directory handling.\n", "before_files": [{"content": "import os\nimport re\nimport shutil\nimport stat\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom typing import List\nfrom typing import Optional\n\nfrom poetry.config.config import Config\nfrom poetry.utils._compat import Path\nfrom poetry.version import Version\n\n\ntry:\n from collections.abc import Mapping\nexcept ImportError:\n from collections import Mapping\n\n\n_canonicalize_regex = re.compile(\"[-_]+\")\n\n\ndef canonicalize_name(name): # type: (str) -> str\n return _canonicalize_regex.sub(\"-\", name).lower()\n\n\ndef module_name(name): # type: (str) -> str\n return canonicalize_name(name).replace(\".\", \"_\").replace(\"-\", \"_\")\n\n\ndef normalize_version(version): # type: (str) -> str\n return str(Version(version))\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef parse_requires(requires): # type: (str) -> List[str]\n lines = requires.split(\"\\n\")\n\n requires_dist = []\n in_section = False\n current_marker = None\n for line in lines:\n line = line.strip()\n if not line:\n if in_section:\n in_section = False\n\n continue\n\n if line.startswith(\"[\"):\n # extras or conditional dependencies\n marker = line.lstrip(\"[\").rstrip(\"]\")\n if \":\" not in marker:\n extra, marker = marker, None\n else:\n extra, marker = marker.split(\":\")\n\n if extra:\n if marker:\n marker = '{} and extra == \"{}\"'.format(marker, extra)\n else:\n marker = 'extra == \"{}\"'.format(extra)\n\n if marker:\n current_marker = marker\n\n continue\n\n if current_marker:\n line = \"{} ; {}\".format(line, current_marker)\n\n requires_dist.append(line)\n\n return requires_dist\n\n\ndef get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n cert = config.get(\"certificates.{}.cert\".format(repository_name))\n if cert:\n return Path(cert)\n else:\n return None\n\n\ndef get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n client_cert = config.get(\"certificates.{}.client-cert\".format(repository_name))\n if client_cert:\n return Path(client_cert)\n else:\n return None\n\n\ndef _on_rm_error(func, path, exc_info):\n if not os.path.exists(path):\n return\n\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef safe_rmtree(path):\n if Path(path).is_symlink():\n return os.unlink(str(path))\n\n shutil.rmtree(path, onerror=_on_rm_error)\n\n\ndef merge_dicts(d1, d2):\n for k, v in d2.items():\n if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):\n merge_dicts(d1[k], d2[k])\n else:\n d1[k] = d2[k]\n", "path": "poetry/utils/helpers.py"}]}
1,931
215
gh_patches_debug_23207
rasdani/github-patches
git_diff
getsentry__snuba-1794
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Snuba cleanup for sentry onpremise ### Environment Sentry self-hosted 21.3.0 (based on docker-compose from here https://github.com/getsentry/onpremise/blob/21.3.0/docker-compose.yml) ### Steps to Reproduce 1) Setup all containers and up snuba-cleanup container 2) Check logs for snuba-cleanup: Every 5 minutes in log - `Dropped 0 partitions on None` It looks like variable CLICKHOUSE_HOST is ignored here https://github.com/getsentry/snuba/blob/41d7fe76aaf8a594e8f6e84015607dcde3f67ad4/snuba/cli/cleanup.py#L13 After manual run command in container - `snuba cleanup --clickhouse-host CLICKHOUSE_HOST_HERE --dry-run True` i got `Dropped 0 partitions on CLICKHOUSE_HOST_HERE` ### Expected Result Pass variable https://github.com/getsentry/onpremise/blob/bdd2686021cfea07507bc07d2756ac34a775c680/docker-compose.yml#L44 into cleanup command ### Actual Result variable is `None` instead of clickhouse host I'am not sure, bug this or not. </issue> <code> [start of snuba/cli/cleanup.py] 1 from typing import Optional 2 3 import click 4 5 from snuba.clusters.cluster import ClickhouseClientSettings 6 from snuba.datasets.storages import StorageKey 7 from snuba.datasets.storages.factory import get_writable_storage 8 from snuba.environment import setup_logging 9 10 11 @click.command() 12 @click.option( 13 "--clickhouse-host", help="Clickhouse server to write to.", 14 ) 15 @click.option( 16 "--clickhouse-port", type=int, help="Clickhouse native port to write to.", 17 ) 18 @click.option( 19 "--dry-run", 20 type=bool, 21 default=True, 22 help="If true, only print which partitions would be dropped.", 23 ) 24 @click.option( 25 "--storage", 26 "storage_name", 27 default="events", 28 type=click.Choice(["events", "errors", "transactions"]), 29 help="The storage to target", 30 ) 31 @click.option("--log-level", help="Logging level to use.") 32 def cleanup( 33 *, 34 clickhouse_host: Optional[str], 35 clickhouse_port: Optional[int], 36 dry_run: bool, 37 storage_name: str, 38 log_level: Optional[str] = None, 39 ) -> None: 40 """ 41 Deletes stale partitions for ClickHouse tables 42 """ 43 44 setup_logging(log_level) 45 46 from snuba.cleanup import run_cleanup, logger 47 from snuba.clickhouse.native import ClickhousePool 48 49 storage = get_writable_storage(StorageKey(storage_name)) 50 51 (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials() 52 53 database = storage.get_cluster().get_database() 54 55 if clickhouse_host and clickhouse_port: 56 connection = ClickhousePool( 57 clickhouse_host, 58 clickhouse_port, 59 clickhouse_user, 60 clickhouse_password, 61 database, 62 ) 63 elif not storage.get_cluster().is_single_node(): 64 raise click.ClickException("Provide ClickHouse host and port for cleanup") 65 else: 66 connection = storage.get_cluster().get_query_connection( 67 ClickhouseClientSettings.CLEANUP 68 ) 69 70 num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run) 71 logger.info("Dropped %s partitions on %s" % (num_dropped, clickhouse_host)) 72 [end of snuba/cli/cleanup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/snuba/cli/cleanup.py b/snuba/cli/cleanup.py --- a/snuba/cli/cleanup.py +++ b/snuba/cli/cleanup.py @@ -50,7 +50,8 @@ (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials() - database = storage.get_cluster().get_database() + cluster = storage.get_cluster() + database = cluster.get_database() if clickhouse_host and clickhouse_port: connection = ClickhousePool( @@ -60,12 +61,12 @@ clickhouse_password, database, ) - elif not storage.get_cluster().is_single_node(): + elif not cluster.is_single_node(): raise click.ClickException("Provide ClickHouse host and port for cleanup") else: - connection = storage.get_cluster().get_query_connection( + connection = cluster.get_query_connection( ClickhouseClientSettings.CLEANUP ) num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run) - logger.info("Dropped %s partitions on %s" % (num_dropped, clickhouse_host)) + logger.info("Dropped %s partitions on %s" % (num_dropped, cluster))
{"golden_diff": "diff --git a/snuba/cli/cleanup.py b/snuba/cli/cleanup.py\n--- a/snuba/cli/cleanup.py\n+++ b/snuba/cli/cleanup.py\n@@ -50,7 +50,8 @@\n \n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n \n- database = storage.get_cluster().get_database()\n+ cluster = storage.get_cluster()\n+ database = cluster.get_database()\n \n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n@@ -60,12 +61,12 @@\n clickhouse_password,\n database,\n )\n- elif not storage.get_cluster().is_single_node():\n+ elif not cluster.is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n- connection = storage.get_cluster().get_query_connection(\n+ connection = cluster.get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n \n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n- logger.info(\"Dropped %s partitions on %s\" % (num_dropped, clickhouse_host))\n+ logger.info(\"Dropped %s partitions on %s\" % (num_dropped, cluster))\n", "issue": "Snuba cleanup for sentry onpremise\n### Environment\r\n\r\nSentry self-hosted 21.3.0 (based on docker-compose from here https://github.com/getsentry/onpremise/blob/21.3.0/docker-compose.yml)\r\n\r\n### Steps to Reproduce\r\n\r\n1) Setup all containers and up snuba-cleanup container\r\n2) Check logs for snuba-cleanup: Every 5 minutes in log - `Dropped 0 partitions on None`\r\nIt looks like variable CLICKHOUSE_HOST is ignored here\r\nhttps://github.com/getsentry/snuba/blob/41d7fe76aaf8a594e8f6e84015607dcde3f67ad4/snuba/cli/cleanup.py#L13\r\nAfter manual run command in container - `snuba cleanup --clickhouse-host CLICKHOUSE_HOST_HERE --dry-run True`\r\ni got `Dropped 0 partitions on CLICKHOUSE_HOST_HERE`\r\n\r\n### Expected Result\r\n\r\nPass variable https://github.com/getsentry/onpremise/blob/bdd2686021cfea07507bc07d2756ac34a775c680/docker-compose.yml#L44 into cleanup command\r\n\r\n### Actual Result\r\n\r\nvariable is `None` instead of clickhouse host\r\n\r\nI'am not sure, bug this or not.\n", "before_files": [{"content": "from typing import Optional\n\nimport click\n\nfrom snuba.clusters.cluster import ClickhouseClientSettings\nfrom snuba.datasets.storages import StorageKey\nfrom snuba.datasets.storages.factory import get_writable_storage\nfrom snuba.environment import setup_logging\n\n\[email protected]()\[email protected](\n \"--clickhouse-host\", help=\"Clickhouse server to write to.\",\n)\[email protected](\n \"--clickhouse-port\", type=int, help=\"Clickhouse native port to write to.\",\n)\[email protected](\n \"--dry-run\",\n type=bool,\n default=True,\n help=\"If true, only print which partitions would be dropped.\",\n)\[email protected](\n \"--storage\",\n \"storage_name\",\n default=\"events\",\n type=click.Choice([\"events\", \"errors\", \"transactions\"]),\n help=\"The storage to target\",\n)\[email protected](\"--log-level\", help=\"Logging level to use.\")\ndef cleanup(\n *,\n clickhouse_host: Optional[str],\n clickhouse_port: Optional[int],\n dry_run: bool,\n storage_name: str,\n log_level: Optional[str] = None,\n) -> None:\n \"\"\"\n Deletes stale partitions for ClickHouse tables\n \"\"\"\n\n setup_logging(log_level)\n\n from snuba.cleanup import run_cleanup, logger\n from snuba.clickhouse.native import ClickhousePool\n\n storage = get_writable_storage(StorageKey(storage_name))\n\n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n\n database = storage.get_cluster().get_database()\n\n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n clickhouse_host,\n clickhouse_port,\n clickhouse_user,\n clickhouse_password,\n database,\n )\n elif not storage.get_cluster().is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n connection = storage.get_cluster().get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n\n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n logger.info(\"Dropped %s partitions on %s\" % (num_dropped, clickhouse_host))\n", "path": "snuba/cli/cleanup.py"}]}
1,432
275
gh_patches_debug_11354
rasdani/github-patches
git_diff
getnikola__nikola-2523
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Nikola generates invalid html5 when markdown footnote extension is used The [default output type](http://pythonhosted.org/Markdown/reference.html#markdown) of the python markdown library is xhtml1. The 4 templates that ship with Nikola have <!DOCTYPE html> which defines them as html5, so I'm assuming that we're intending to generate html5. When the footnote markdown extension is used, it generates invalid html5 according to the w3c validator. `<a class="footnote-ref" href="..." rev="footnote">...</a>` (rev="footnote" is valid html4, but not html5) The markdown library indicates that this is invalid html5 (https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/footnotes.py#L149) so we can trigger the correct behaviour by setting the output_format. Given the markdown library does not make much use of the output_format variable, I don't think this is likely to materially change the output for many people at all - https://github.com/waylan/Python-Markdown/search?utf8=%E2%9C%93&q=output_format) </issue> <code> [start of nikola/plugins/compile/markdown/__init__.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2016 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Implementation of compile_html based on markdown.""" 28 29 from __future__ import unicode_literals 30 31 import io 32 import os 33 34 try: 35 from markdown import markdown 36 except ImportError: 37 markdown = None # NOQA 38 nikola_extension = None 39 gist_extension = None 40 podcast_extension = None 41 42 from nikola.plugin_categories import PageCompiler 43 from nikola.utils import makedirs, req_missing, write_metadata 44 45 46 class CompileMarkdown(PageCompiler): 47 """Compile Markdown into HTML.""" 48 49 name = "markdown" 50 friendly_name = "Markdown" 51 demote_headers = True 52 extensions = [] 53 site = None 54 55 def set_site(self, site): 56 """Set Nikola site.""" 57 super(CompileMarkdown, self).set_site(site) 58 self.config_dependencies = [] 59 for plugin_info in self.get_compiler_extensions(): 60 self.config_dependencies.append(plugin_info.name) 61 self.extensions.append(plugin_info.plugin_object) 62 plugin_info.plugin_object.short_help = plugin_info.description 63 64 self.config_dependencies.append(str(sorted(site.config.get("MARKDOWN_EXTENSIONS")))) 65 66 def compile_html(self, source, dest, is_two_file=True): 67 """Compile source file into HTML and save as dest.""" 68 if markdown is None: 69 req_missing(['markdown'], 'build this site (compile Markdown)') 70 makedirs(os.path.dirname(dest)) 71 self.extensions += self.site.config.get("MARKDOWN_EXTENSIONS") 72 try: 73 post = self.site.post_per_input_file[source] 74 except KeyError: 75 post = None 76 with io.open(dest, "w+", encoding="utf8") as out_file: 77 with io.open(source, "r", encoding="utf8") as in_file: 78 data = in_file.read() 79 if not is_two_file: 80 _, data = self.split_metadata(data) 81 output = markdown(data, self.extensions) 82 output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post)) 83 out_file.write(output) 84 if post is None: 85 if shortcode_deps: 86 self.logger.error( 87 "Cannot save dependencies for post {0} due to unregistered source file name", 88 source) 89 else: 90 post._depfile[dest] += shortcode_deps 91 92 def create_post(self, path, **kw): 93 """Create a new post.""" 94 content = kw.pop('content', None) 95 onefile = kw.pop('onefile', False) 96 # is_page is not used by create_post as of now. 97 kw.pop('is_page', False) 98 99 metadata = {} 100 metadata.update(self.default_metadata) 101 metadata.update(kw) 102 makedirs(os.path.dirname(path)) 103 if not content.endswith('\n'): 104 content += '\n' 105 with io.open(path, "w+", encoding="utf8") as fd: 106 if onefile: 107 fd.write('<!-- \n') 108 fd.write(write_metadata(metadata)) 109 fd.write('-->\n\n') 110 fd.write(content) 111 [end of nikola/plugins/compile/markdown/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py --- a/nikola/plugins/compile/markdown/__init__.py +++ b/nikola/plugins/compile/markdown/__init__.py @@ -78,7 +78,7 @@ data = in_file.read() if not is_two_file: _, data = self.split_metadata(data) - output = markdown(data, self.extensions) + output = markdown(data, self.extensions, output_format="html5") output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post)) out_file.write(output) if post is None:
{"golden_diff": "diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py\n--- a/nikola/plugins/compile/markdown/__init__.py\n+++ b/nikola/plugins/compile/markdown/__init__.py\n@@ -78,7 +78,7 @@\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n- output = markdown(data, self.extensions)\n+ output = markdown(data, self.extensions, output_format=\"html5\")\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n", "issue": "Nikola generates invalid html5 when markdown footnote extension is used\nThe [default output type](http://pythonhosted.org/Markdown/reference.html#markdown) of the python markdown library is xhtml1. The 4 templates that ship with Nikola have <!DOCTYPE html> which defines them as html5, so I'm assuming that we're intending to generate html5.\n\nWhen the footnote markdown extension is used, it generates invalid html5 according to the w3c validator.\n\n`<a class=\"footnote-ref\" href=\"...\" rev=\"footnote\">...</a>`\n\n(rev=\"footnote\" is valid html4, but not html5)\n\nThe markdown library indicates that this is invalid html5 (https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/footnotes.py#L149) so we can trigger the correct behaviour by setting the output_format.\n\nGiven the markdown library does not make much use of the output_format variable, I don't think this is likely to materially change the output for many people at all - https://github.com/waylan/Python-Markdown/search?utf8=%E2%9C%93&q=output_format)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on markdown.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport io\nimport os\n\ntry:\n from markdown import markdown\nexcept ImportError:\n markdown = None # NOQA\n nikola_extension = None\n gist_extension = None\n podcast_extension = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, write_metadata\n\n\nclass CompileMarkdown(PageCompiler):\n \"\"\"Compile Markdown into HTML.\"\"\"\n\n name = \"markdown\"\n friendly_name = \"Markdown\"\n demote_headers = True\n extensions = []\n site = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n super(CompileMarkdown, self).set_site(site)\n self.config_dependencies = []\n for plugin_info in self.get_compiler_extensions():\n self.config_dependencies.append(plugin_info.name)\n self.extensions.append(plugin_info.plugin_object)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.config_dependencies.append(str(sorted(site.config.get(\"MARKDOWN_EXTENSIONS\"))))\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n if markdown is None:\n req_missing(['markdown'], 'build this site (compile Markdown)')\n makedirs(os.path.dirname(dest))\n self.extensions += self.site.config.get(\"MARKDOWN_EXTENSIONS\")\n try:\n post = self.site.post_per_input_file[source]\n except KeyError:\n post = None\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n output = markdown(data, self.extensions)\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} due to unregistered source file name\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write('<!-- \\n')\n fd.write(write_metadata(metadata))\n fd.write('-->\\n\\n')\n fd.write(content)\n", "path": "nikola/plugins/compile/markdown/__init__.py"}]}
1,902
161
gh_patches_debug_10501
rasdani/github-patches
git_diff
pypa__virtualenv-1964
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'NoneType' object has no attribute 'group' with virtualenv 20.0.32 on CygWin **Issue** We are also testing on CygWin (using appveyor), and since this morning, tox fails creating a virtualenv with an AttributeError. Unfortunately, tox does not display the entire traceback, but just the exception. Since virtualenv 20.0.32 was released just 4h ago, I suspect that to be the culprit. From https://ci.appveyor.com/project/andy-maier/pywbem/builds/35526352/job/l3k6a2vb39bweqsw#L936: ``` if "%UNIX_PATH%"=="C:\cygwin64\bin" ( bash -c "which tox && tox -vv -e %TOX_ENV% && echo appveyor.yml: tox rc=$?" ) /usr/bin/tox using tox.ini: /cygdrive/c/projects/pywbem/tox.ini (pid 1822) using tox-3.20.0 from /usr/lib/python3.8/site-packages/tox/__init__.py (pid 1822) skipping sdist step cygwin64_py38 uses /usr/bin/python3.8.exe cygwin64_py38 start: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 cygwin64_py38 cannot reuse: no previous config /cygdrive/c/projects/pywbem/.tox/cygwin64_py38/.tox-config1 cygwin64_py38 create: /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 setting PATH=/cygdrive/c/projects/pywbem/.tox/cygwin64_py38/bin:/usr/bin:/cygdrive/c/Windows/system32:/cygdrive/c/Windows:/cygdrive/c/ProgramData/chocolatey/bin [1825] /cygdrive/c/projects/pywbem/.tox$ /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 AttributeError: 'NoneType' object has no attribute 'group' ERROR: invocation failed (exit code 1) ERROR: InvocationError for command /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 (exited with code 1) cygwin64_py38 finish: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 after 4.23 seconds ``` I am setting up a direct invocation of virtualenv in that environment, in order to get the full traceback, and will post that here. **Environment** Provide at least: - OS: CygWin64 - ``pip list`` of the host python where ``virtualenv`` is installed: ```console See next comment, below ``` **Output of the virtual environment creation** Make sure to run the creation with `-vvv --with-traceback`: ```console See next comment, below ``` </issue> <code> [start of src/virtualenv/activation/via_template.py] 1 from __future__ import absolute_import, unicode_literals 2 3 import os 4 import re 5 import sys 6 import sysconfig 7 from abc import ABCMeta, abstractmethod 8 9 from six import add_metaclass 10 11 from virtualenv.util.six import ensure_text 12 13 from .activator import Activator 14 15 if sys.version_info >= (3, 7): 16 from importlib.resources import read_binary 17 else: 18 from importlib_resources import read_binary 19 20 21 @add_metaclass(ABCMeta) 22 class ViaTemplateActivator(Activator): 23 @abstractmethod 24 def templates(self): 25 raise NotImplementedError 26 27 def generate(self, creator): 28 dest_folder = creator.bin_dir 29 replacements = self.replacements(creator, dest_folder) 30 generated = self._generate(replacements, self.templates(), dest_folder, creator) 31 if self.flag_prompt is not None: 32 creator.pyenv_cfg["prompt"] = self.flag_prompt 33 return generated 34 35 def replacements(self, creator, dest_folder): 36 current_platform = sysconfig.get_platform() 37 platforms = ["mingw", "cygwin", "msys"] 38 if any(platform in current_platform for platform in platforms): 39 pattern = re.compile("^([A-Za-z]):(.*)") 40 match = pattern.match(str(creator.dest)) 41 virtual_env = "/" + match.group(1).lower() + match.group(2) 42 else: 43 virtual_env = str(creator.dest) 44 return { 45 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt, 46 "__VIRTUAL_ENV__": ensure_text(virtual_env), 47 "__VIRTUAL_NAME__": creator.env_name, 48 "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))), 49 "__PATH_SEP__": ensure_text(os.pathsep), 50 } 51 52 def _generate(self, replacements, templates, to_folder, creator): 53 generated = [] 54 for template in templates: 55 text = self.instantiate_template(replacements, template, creator) 56 dest = to_folder / self.as_name(template) 57 # use write_bytes to avoid platform specific line normalization (\n -> \r\n) 58 dest.write_bytes(text.encode("utf-8")) 59 generated.append(dest) 60 return generated 61 62 def as_name(self, template): 63 return template.name 64 65 def instantiate_template(self, replacements, template, creator): 66 # read content as binary to avoid platform specific line normalization (\n -> \r\n) 67 binary = read_binary(self.__module__, str(template)) 68 text = binary.decode("utf-8", errors="strict") 69 for key, value in replacements.items(): 70 value = self._repr_unicode(creator, value) 71 text = text.replace(key, value) 72 return text 73 74 @staticmethod 75 def _repr_unicode(creator, value): 76 # by default we just let it be unicode 77 return value 78 [end of src/virtualenv/activation/via_template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py --- a/src/virtualenv/activation/via_template.py +++ b/src/virtualenv/activation/via_template.py @@ -38,7 +38,10 @@ if any(platform in current_platform for platform in platforms): pattern = re.compile("^([A-Za-z]):(.*)") match = pattern.match(str(creator.dest)) - virtual_env = "/" + match.group(1).lower() + match.group(2) + if match: + virtual_env = "/" + match.group(1).lower() + match.group(2) + else: + virtual_env = str(creator.dest) else: virtual_env = str(creator.dest) return {
{"golden_diff": "diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -38,7 +38,10 @@\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n- virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n+ if match:\n+ virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n+ else:\n+ virtual_env = str(creator.dest)\n else:\n virtual_env = str(creator.dest)\n return {\n", "issue": "AttributeError: 'NoneType' object has no attribute 'group' with virtualenv 20.0.32 on CygWin\n**Issue**\r\nWe are also testing on CygWin (using appveyor), and since this morning, tox fails creating a virtualenv with an AttributeError. Unfortunately, tox does not display the entire traceback, but just the exception.\r\nSince virtualenv 20.0.32 was released just 4h ago, I suspect that to be the culprit.\r\n\r\nFrom https://ci.appveyor.com/project/andy-maier/pywbem/builds/35526352/job/l3k6a2vb39bweqsw#L936:\r\n```\r\nif \"%UNIX_PATH%\"==\"C:\\cygwin64\\bin\" ( bash -c \"which tox && tox -vv -e %TOX_ENV% && echo appveyor.yml: tox rc=$?\" )\r\n/usr/bin/tox\r\nusing tox.ini: /cygdrive/c/projects/pywbem/tox.ini (pid 1822)\r\nusing tox-3.20.0 from /usr/lib/python3.8/site-packages/tox/__init__.py (pid 1822)\r\nskipping sdist step\r\ncygwin64_py38 uses /usr/bin/python3.8.exe\r\ncygwin64_py38 start: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38\r\ncygwin64_py38 cannot reuse: no previous config /cygdrive/c/projects/pywbem/.tox/cygwin64_py38/.tox-config1\r\ncygwin64_py38 create: /cygdrive/c/projects/pywbem/.tox/cygwin64_py38\r\nsetting PATH=/cygdrive/c/projects/pywbem/.tox/cygwin64_py38/bin:/usr/bin:/cygdrive/c/Windows/system32:/cygdrive/c/Windows:/cygdrive/c/ProgramData/chocolatey/bin\r\n[1825] /cygdrive/c/projects/pywbem/.tox$ /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\nERROR: invocation failed (exit code 1)\r\nERROR: InvocationError for command /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 (exited with code 1)\r\ncygwin64_py38 finish: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 after 4.23 seconds\r\n```\r\n\r\nI am setting up a direct invocation of virtualenv in that environment, in order to get the full traceback, and will post that here.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: CygWin64\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n ```console\r\n See next comment, below\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\n\r\n```console\r\nSee next comment, below\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport re\nimport sys\nimport sysconfig\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_binary\nelse:\n from importlib_resources import read_binary\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n generated = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return generated\n\n def replacements(self, creator, dest_folder):\n current_platform = sysconfig.get_platform()\n platforms = [\"mingw\", \"cygwin\", \"msys\"]\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n else:\n virtual_env = str(creator.dest)\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(virtual_env),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n generated = []\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n # use write_bytes to avoid platform specific line normalization (\\n -> \\r\\n)\n dest.write_bytes(text.encode(\"utf-8\"))\n generated.append(dest)\n return generated\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read content as binary to avoid platform specific line normalization (\\n -> \\r\\n)\n binary = read_binary(self.__module__, str(template))\n text = binary.decode(\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]}
2,016
176
gh_patches_debug_22280
rasdani/github-patches
git_diff
airctic__icevision-878
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix download_mmdet_configs No need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation. </issue> <code> [start of icevision/models/mmdet/download_configs.py] 1 __all__ = ["download_mmdet_configs"] 2 3 from icevision.imports import * 4 from icevision.utils import * 5 6 VERSION = "v2.10.0" 7 BASE_URL = "https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags" 8 9 10 def download_mmdet_configs() -> Path: 11 save_dir = get_root_dir() / f"mmdetection_configs" 12 save_dir.mkdir(parents=True, exist_ok=True) 13 14 download_path = save_dir / f"{VERSION}.zip" 15 if not download_path.exists(): 16 logger.info("Downloading mmdet configs") 17 18 download_and_extract(f"{BASE_URL}/{VERSION}", download_path) 19 20 return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs" 21 [end of icevision/models/mmdet/download_configs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py --- a/icevision/models/mmdet/download_configs.py +++ b/icevision/models/mmdet/download_configs.py @@ -9,12 +9,26 @@ def download_mmdet_configs() -> Path: save_dir = get_root_dir() / f"mmdetection_configs" - save_dir.mkdir(parents=True, exist_ok=True) + mmdet_config_path = save_dir / f"mmdetection_configs-{VERSION[1:]}/configs" download_path = save_dir / f"{VERSION}.zip" - if not download_path.exists(): - logger.info("Downloading mmdet configs") - download_and_extract(f"{BASE_URL}/{VERSION}", download_path) + if mmdet_config_path.exists(): + logger.info( + f"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}" + ) + elif download_path.exists(): + # The zip file was downloaded by not extracted yet + # Extract zip file + logger.info(f"Extracting the {VERSION}.zip file.") + save_dir = Path(download_path).parent + shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir)) + else: + save_dir.mkdir(parents=True, exist_ok=True) - return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs" + download_path = save_dir / f"{VERSION}.zip" + if not download_path.exists(): + logger.info("Downloading mmdet configs") + download_and_extract(f"{BASE_URL}/{VERSION}", download_path) + + return mmdet_config_path
{"golden_diff": "diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py\n--- a/icevision/models/mmdet/download_configs.py\n+++ b/icevision/models/mmdet/download_configs.py\n@@ -9,12 +9,26 @@\n \n def download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n- save_dir.mkdir(parents=True, exist_ok=True)\n \n+ mmdet_config_path = save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n download_path = save_dir / f\"{VERSION}.zip\"\n- if not download_path.exists():\n- logger.info(\"Downloading mmdet configs\")\n \n- download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n+ if mmdet_config_path.exists():\n+ logger.info(\n+ f\"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}\"\n+ )\n+ elif download_path.exists():\n+ # The zip file was downloaded by not extracted yet\n+ # Extract zip file\n+ logger.info(f\"Extracting the {VERSION}.zip file.\")\n+ save_dir = Path(download_path).parent\n+ shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))\n+ else:\n+ save_dir.mkdir(parents=True, exist_ok=True)\n \n- return save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n+ download_path = save_dir / f\"{VERSION}.zip\"\n+ if not download_path.exists():\n+ logger.info(\"Downloading mmdet configs\")\n+ download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n+\n+ return mmdet_config_path\n", "issue": "Fix download_mmdet_configs\nNo need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation.\r\n\n", "before_files": [{"content": "__all__ = [\"download_mmdet_configs\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\n\nVERSION = \"v2.10.0\"\nBASE_URL = \"https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags\"\n\n\ndef download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n save_dir.mkdir(parents=True, exist_ok=True)\n\n download_path = save_dir / f\"{VERSION}.zip\"\n if not download_path.exists():\n logger.info(\"Downloading mmdet configs\")\n\n download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n\n return save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n", "path": "icevision/models/mmdet/download_configs.py"}]}
777
389
gh_patches_debug_28282
rasdani/github-patches
git_diff
getpelican__pelican-1040
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow nice URLs on devserver When you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website: ``` index index.php index.html index.htm index.shtml; root /var/www/example.com/public_html; try_files $uri $uri.html $uri/index.html index.html; ``` Now the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this. ``` /output/index.html http://localhost:8000 /output/contact.html http://localhost:8000/contact /output/sales/index.html http://localhost:8000/sales /output/sales/index.html http://localhost:8000/sales/ (yes the same) ``` </issue> <code> [start of pelican/server.py] 1 from __future__ import print_function 2 import sys 3 try: 4 import SimpleHTTPServer as srvmod 5 except ImportError: 6 import http.server as srvmod # NOQA 7 8 try: 9 import SocketServer as socketserver 10 except ImportError: 11 import socketserver # NOQA 12 13 PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000 14 15 Handler = srvmod.SimpleHTTPRequestHandler 16 17 try: 18 httpd = socketserver.TCPServer(("", PORT), Handler) 19 except OSError as e: 20 print("Could not listen on port", PORT) 21 sys.exit(getattr(e, 'exitcode', 1)) 22 23 24 print("serving at port", PORT) 25 try: 26 httpd.serve_forever() 27 except KeyboardInterrupt as e: 28 print("shutting down server") 29 httpd.socket.close() 30 [end of pelican/server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pelican/server.py b/pelican/server.py --- a/pelican/server.py +++ b/pelican/server.py @@ -1,5 +1,7 @@ from __future__ import print_function +import os import sys +import logging try: import SimpleHTTPServer as srvmod except ImportError: @@ -11,19 +13,36 @@ import socketserver # NOQA PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000 +SUFFIXES = ['','.html','/index.html'] -Handler = srvmod.SimpleHTTPRequestHandler +class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler): + def do_GET(self): + # we are trying to detect the file by having a fallback mechanism + r = None + for suffix in SUFFIXES: + if not hasattr(self,'original_path'): + self.original_path = self.path + self.path = self.original_path + suffix + path = self.translate_path(self.path) + if os.path.exists(path): + r = srvmod.SimpleHTTPRequestHandler.do_GET(self) + if r is not None: + break + logging.warning("Unable to find %s file." % self.path) + return r + +Handler = ComplexHTTPRequestHandler try: httpd = socketserver.TCPServer(("", PORT), Handler) except OSError as e: - print("Could not listen on port", PORT) + logging.error("Could not listen on port %s" % PORT) sys.exit(getattr(e, 'exitcode', 1)) -print("serving at port", PORT) +logging.info("serving at port %s" % PORT) try: httpd.serve_forever() except KeyboardInterrupt as e: - print("shutting down server") - httpd.socket.close() + logging.info("shutting down server") + httpd.socket.close() \ No newline at end of file
{"golden_diff": "diff --git a/pelican/server.py b/pelican/server.py\n--- a/pelican/server.py\n+++ b/pelican/server.py\n@@ -1,5 +1,7 @@\n from __future__ import print_function\n+import os\n import sys\n+import logging\n try:\n import SimpleHTTPServer as srvmod\n except ImportError:\n@@ -11,19 +13,36 @@\n import socketserver # NOQA\n \n PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n+SUFFIXES = ['','.html','/index.html']\n \n-Handler = srvmod.SimpleHTTPRequestHandler\n+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n+ def do_GET(self):\n+ # we are trying to detect the file by having a fallback mechanism\n+ r = None\n+ for suffix in SUFFIXES:\n+ if not hasattr(self,'original_path'):\n+ self.original_path = self.path\n+ self.path = self.original_path + suffix\n+ path = self.translate_path(self.path)\n+ if os.path.exists(path):\n+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)\n+ if r is not None:\n+ break\n+ logging.warning(\"Unable to find %s file.\" % self.path)\n+ return r\n+\n+Handler = ComplexHTTPRequestHandler\n \n try:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\n except OSError as e:\n- print(\"Could not listen on port\", PORT)\n+ logging.error(\"Could not listen on port %s\" % PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n \n \n-print(\"serving at port\", PORT)\n+logging.info(\"serving at port %s\" % PORT)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt as e:\n- print(\"shutting down server\")\n- httpd.socket.close()\n+ logging.info(\"shutting down server\")\n+ httpd.socket.close()\n\\ No newline at end of file\n", "issue": "Allow nice URLs on devserver\nWhen you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:\n\n```\nindex index.php index.html index.htm index.shtml;\nroot /var/www/example.com/public_html;\ntry_files $uri $uri.html $uri/index.html index.html;\n```\n\nNow the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.\n\n```\n/output/index.html http://localhost:8000\n/output/contact.html http://localhost:8000/contact\n/output/sales/index.html http://localhost:8000/sales\n/output/sales/index.html http://localhost:8000/sales/ (yes the same) \n```\n\n", "before_files": [{"content": "from __future__ import print_function\nimport sys\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n\nHandler = srvmod.SimpleHTTPRequestHandler\n\ntry:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\nexcept OSError as e:\n print(\"Could not listen on port\", PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nprint(\"serving at port\", PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n print(\"shutting down server\")\n httpd.socket.close()\n", "path": "pelican/server.py"}]}
947
441
gh_patches_debug_25372
rasdani/github-patches
git_diff
facebookresearch__CompilerGym-739
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks ## 🐛 Bug If an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return "done" as True. ## To Reproduce Steps to reproduce the behavior: ``` env = TimeLimit(env, step_limit) env = CycleOverBenchmarks(env, benchmarks) _, done, _, _ = env.reset() while not done: _, done, _, _ = env.step(0) ``` This will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior. ## Additional context Assign it to me, I will fix it when I got time. </issue> <code> [start of compiler_gym/wrappers/time_limit.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 from typing import Optional 6 7 from compiler_gym.envs import CompilerEnv 8 from compiler_gym.util.gym_type_hints import ActionType 9 from compiler_gym.wrappers.core import CompilerEnvWrapper 10 11 12 class TimeLimit(CompilerEnvWrapper): 13 """A step-limited wrapper that is compatible with CompilerGym. 14 15 Example usage: 16 17 >>> env = TimeLimit(env, max_episode_steps=3) 18 >>> env.reset() 19 >>> _, _, done, _ = env.step(0) 20 >>> _, _, done, _ = env.step(0) 21 >>> _, _, done, _ = env.step(0) 22 >>> done 23 True 24 """ 25 26 def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None): 27 super().__init__(env=env) 28 if max_episode_steps is None and self.env.spec is not None: 29 max_episode_steps = env.spec.max_episode_steps 30 if self.env.spec is not None: 31 self.env.spec.max_episode_steps = max_episode_steps 32 self._max_episode_steps = max_episode_steps 33 self._elapsed_steps = None 34 35 def step(self, action: ActionType, **kwargs): 36 assert ( 37 self._elapsed_steps is not None 38 ), "Cannot call env.step() before calling reset()" 39 observation, reward, done, info = self.env.step(action, **kwargs) 40 self._elapsed_steps += 1 41 if self._elapsed_steps >= self._max_episode_steps: 42 info["TimeLimit.truncated"] = not done 43 done = True 44 return observation, reward, done, info 45 46 def reset(self, **kwargs): 47 self._elapsed_steps = 0 48 return self.env.reset(**kwargs) 49 50 def fork(self) -> "TimeLimit": 51 """Fork the wrapped environment. 52 53 The time limit state of the forked environment is the same as the source 54 state. 55 """ 56 fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps) 57 fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access 58 return fkd 59 [end of compiler_gym/wrappers/time_limit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py --- a/compiler_gym/wrappers/time_limit.py +++ b/compiler_gym/wrappers/time_limit.py @@ -2,7 +2,7 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Optional +from typing import Iterable, Optional from compiler_gym.envs import CompilerEnv from compiler_gym.util.gym_type_hints import ActionType @@ -32,12 +32,13 @@ self._max_episode_steps = max_episode_steps self._elapsed_steps = None - def step(self, action: ActionType, **kwargs): + def multistep(self, actions: Iterable[ActionType], **kwargs): + actions = list(actions) assert ( self._elapsed_steps is not None ), "Cannot call env.step() before calling reset()" - observation, reward, done, info = self.env.step(action, **kwargs) - self._elapsed_steps += 1 + observation, reward, done, info = self.env.multistep(actions, **kwargs) + self._elapsed_steps += len(actions) if self._elapsed_steps >= self._max_episode_steps: info["TimeLimit.truncated"] = not done done = True
{"golden_diff": "diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py\n--- a/compiler_gym/wrappers/time_limit.py\n+++ b/compiler_gym/wrappers/time_limit.py\n@@ -2,7 +2,7 @@\n #\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n-from typing import Optional\n+from typing import Iterable, Optional\n \n from compiler_gym.envs import CompilerEnv\n from compiler_gym.util.gym_type_hints import ActionType\n@@ -32,12 +32,13 @@\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n \n- def step(self, action: ActionType, **kwargs):\n+ def multistep(self, actions: Iterable[ActionType], **kwargs):\n+ actions = list(actions)\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n- observation, reward, done, info = self.env.step(action, **kwargs)\n- self._elapsed_steps += 1\n+ observation, reward, done, info = self.env.multistep(actions, **kwargs)\n+ self._elapsed_steps += len(actions)\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n", "issue": "Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks\n## \ud83d\udc1b Bug\r\n\r\nIf an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return \"done\" as True. \r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```\r\nenv = TimeLimit(env, step_limit) \r\nenv = CycleOverBenchmarks(env, benchmarks) \r\n_, done, _, _ = env.reset()\r\nwhile not done:\r\n _, done, _, _ = env.step(0) \r\n```\r\nThis will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior. \r\n\r\n\r\n## Additional context\r\n\r\nAssign it to me, I will fix it when I got time. \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Optional\n\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.gym_type_hints import ActionType\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\n\nclass TimeLimit(CompilerEnvWrapper):\n \"\"\"A step-limited wrapper that is compatible with CompilerGym.\n\n Example usage:\n\n >>> env = TimeLimit(env, max_episode_steps=3)\n >>> env.reset()\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> done\n True\n \"\"\"\n\n def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):\n super().__init__(env=env)\n if max_episode_steps is None and self.env.spec is not None:\n max_episode_steps = env.spec.max_episode_steps\n if self.env.spec is not None:\n self.env.spec.max_episode_steps = max_episode_steps\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n\n def step(self, action: ActionType, **kwargs):\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n observation, reward, done, info = self.env.step(action, **kwargs)\n self._elapsed_steps += 1\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n def fork(self) -> \"TimeLimit\":\n \"\"\"Fork the wrapped environment.\n\n The time limit state of the forked environment is the same as the source\n state.\n \"\"\"\n fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)\n fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access\n return fkd\n", "path": "compiler_gym/wrappers/time_limit.py"}]}
1,301
305
gh_patches_debug_51262
rasdani/github-patches
git_diff
conda__conda-5426
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Launching navigator via prompt warnings appear _From @RidaZubair on May 24, 2017 9:47_ **OS:** Windows **Anaconda: 4.4.0** **Actual:** On launching navigator via prompt following warning appears on prompt ![2](https://cloud.githubusercontent.com/assets/27444898/26396930/175ef622-408e-11e7-8e75-e9c2218e15de.png) _Copied from original issue: ContinuumIO/navigator#1189_ </issue> <code> [start of conda/common/platform.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 from collections import OrderedDict 5 from genericpath import exists 6 from glob import glob 7 from logging import getLogger 8 import sys 9 10 from .compat import iteritems, on_win 11 from .._vendor.auxlib.decorators import memoize 12 13 log = getLogger(__name__) 14 15 16 def is_admin_on_windows(): # pragma: unix no cover 17 # http://stackoverflow.com/a/1026626/2127762 18 if not on_win: # pragma: no cover 19 return False 20 try: 21 from ctypes import windll 22 return windll.shell32.IsUserAnAdmin()() != 0 23 except ImportError as e: 24 log.debug('%r', e) 25 return 'unknown' 26 except Exception as e: 27 log.warn('%r', e) 28 return 'unknown' 29 30 31 @memoize 32 def linux_get_libc_version(): 33 """ 34 If on linux, returns (libc_family, version), otherwise (None, None) 35 """ 36 37 if not sys.platform.startswith('linux'): 38 return None, None 39 40 from os import confstr, confstr_names, readlink 41 42 # Python 2.7 does not have either of these keys in confstr_names, so provide 43 # hard-coded defaults and assert if the key is in confstr_names but differs. 44 # These are defined by POSIX anyway so should never change. 45 confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2), 46 ('CS_GNU_LIBPTHREAD_VERSION', 3)]) 47 48 val = None 49 for k, v in iteritems(confstr_names_fallback): 50 assert k not in confstr_names or confstr_names[k] == v, ( 51 "confstr_names_fallback for %s is %s yet in confstr_names it is %s" 52 "" % (k, confstr_names_fallback[k], confstr_names[k]) 53 ) 54 try: 55 val = str(confstr(v)) 56 except: 57 pass 58 else: 59 if val: 60 break 61 62 if not val: 63 # Weird, play it safe and assume glibc 2.5 64 family, version = 'glibc', '2.5' 65 log.warning("Failed to detect libc family and version, assuming %s/%s", family, version) 66 return family, version 67 family, version = val.split(' ') 68 69 # NPTL is just the name of the threading library, even though the 70 # version refers to that of uClibc. readlink() can help to try to 71 # figure out a better name instead. 72 if family == 'NPTL': 73 clibs = glob('/lib/libc.so*') 74 for clib in clibs: 75 clib = readlink(clib) 76 if exists(clib): 77 if clib.startswith('libuClibc'): 78 if version.startswith('0.'): 79 family = 'uClibc' 80 else: 81 family = 'uClibc-ng' 82 return family, version 83 # This could be some other C library; it is unlikely though. 84 family = 'uClibc' 85 log.warning("Failed to detect non-glibc family, assuming %s (%s)", family, version) 86 return family, version 87 return family, version 88 [end of conda/common/platform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda/common/platform.py b/conda/common/platform.py --- a/conda/common/platform.py +++ b/conda/common/platform.py @@ -19,12 +19,12 @@ return False try: from ctypes import windll - return windll.shell32.IsUserAnAdmin()() != 0 + return windll.shell32.IsUserAnAdmin() != 0 except ImportError as e: log.debug('%r', e) return 'unknown' except Exception as e: - log.warn('%r', e) + log.info('%r', e) return 'unknown'
{"golden_diff": "diff --git a/conda/common/platform.py b/conda/common/platform.py\n--- a/conda/common/platform.py\n+++ b/conda/common/platform.py\n@@ -19,12 +19,12 @@\n return False\n try:\n from ctypes import windll\n- return windll.shell32.IsUserAnAdmin()() != 0\n+ return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n- log.warn('%r', e)\n+ log.info('%r', e)\n return 'unknown'\n", "issue": "Launching navigator via prompt warnings appear\n_From @RidaZubair on May 24, 2017 9:47_\n\n**OS:** Windows\r\n**Anaconda: 4.4.0**\r\n\r\n**Actual:**\r\nOn launching navigator via prompt following warning appears on prompt\r\n![2](https://cloud.githubusercontent.com/assets/27444898/26396930/175ef622-408e-11e7-8e75-e9c2218e15de.png)\r\n\r\n\n\n_Copied from original issue: ContinuumIO/navigator#1189_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin()() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.warn('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py"}]}
1,573
144
gh_patches_debug_2668
rasdani/github-patches
git_diff
facebookresearch__ParlAI-1821
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Obselete download link for CLEVR Dataset Apparently, the current link to CLEVR in the source code is "https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip" that returns the message "All access to this object has been disabled" When I try to execute the following line of code `!python ~/ParlAI/examples/display_data.py -t clevr` I obtain ``` [creating task(s): clevr] [building data: /root/ParlAI/data/CLEVR] [ downloading: https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip to /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip ] Downloading CLEVR_v1.0.zip: 0.00B [00:00, ?B/s] unpacking CLEVR_v1.0.zip Traceback (most recent call last): File "/root/ParlAI/parlai/core/agents.py", line 819, in _create_task_agents task_agents = my_module.create_agent(opt) AttributeError: module 'parlai.tasks.clevr.agents' has no attribute 'create_agent' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/root/ParlAI/examples/display_data.py", line 22, in <module> display_data(opt) File "/root/ParlAI/parlai/scripts/display_data.py", line 42, in display_data world = create_task(opt, agent) File "/root/ParlAI/parlai/core/worlds.py", line 1151, in create_task world = create_task_world(opt, user_agents, default_world=default_world) File "/root/ParlAI/parlai/core/worlds.py", line 1108, in create_task_world opt, user_agents, default_world=default_world File "/root/ParlAI/parlai/core/worlds.py", line 1068, in _get_task_world task_agents = _create_task_agents(opt) File "/root/ParlAI/parlai/core/agents.py", line 822, in _create_task_agents return create_task_agent_from_taskname(opt) File "/root/ParlAI/parlai/core/agents.py", line 776, in create_task_agent_from_taskname task_agents = teacher_class(opt) File "/root/ParlAI/parlai/tasks/clevr/agents.py", line 45, in __init__ data_path, self.images_path = _path(opt) File "/root/ParlAI/parlai/tasks/clevr/agents.py", line 15, in _path build(opt) File "/root/ParlAI/parlai/tasks/clevr/build.py", line 28, in build build_data.untar(dpath, fname) File "/root/ParlAI/parlai/core/build_data.py", line 180, in untar shutil.unpack_archive(fullpath, path) File "/usr/lib/python3.6/shutil.py", line 983, in unpack_archive func(filename, extract_dir, **kwargs) File "/usr/lib/python3.6/shutil.py", line 883, in _unpack_zipfile raise ReadError("%s is not a zip file" % filename) shutil.ReadError: /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip is not a zip file ``` I found the following working link on CLEVR webpage (https://cs.stanford.edu/people/jcjohns/clevr/): https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip </issue> <code> [start of parlai/tasks/clevr/build.py] 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 # Download and build the data if it does not exist. 7 8 import parlai.core.build_data as build_data 9 import os 10 11 12 def build(opt): 13 dpath = os.path.join(opt['datapath'], 'CLEVR') 14 version = 'v1.0' 15 16 if not build_data.built(dpath, version_string=version): 17 print('[building data: ' + dpath + ']') 18 # An older version exists, so remove these outdated files. 19 if build_data.built(dpath): 20 build_data.remove_dir(dpath) 21 build_data.make_dir(dpath) 22 23 # Download the data. 24 fname = 'CLEVR_v1.0.zip' 25 url = 'https://s3-us-west-1.amazonaws.com/clevr/' 26 27 build_data.download(url + fname, dpath, fname) 28 build_data.untar(dpath, fname) 29 30 # Mark the data as built. 31 build_data.mark_done(dpath, version_string=version) 32 [end of parlai/tasks/clevr/build.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parlai/tasks/clevr/build.py b/parlai/tasks/clevr/build.py --- a/parlai/tasks/clevr/build.py +++ b/parlai/tasks/clevr/build.py @@ -22,7 +22,7 @@ # Download the data. fname = 'CLEVR_v1.0.zip' - url = 'https://s3-us-west-1.amazonaws.com/clevr/' + url = 'https://dl.fbaipublicfiles.com/clevr/' build_data.download(url + fname, dpath, fname) build_data.untar(dpath, fname)
{"golden_diff": "diff --git a/parlai/tasks/clevr/build.py b/parlai/tasks/clevr/build.py\n--- a/parlai/tasks/clevr/build.py\n+++ b/parlai/tasks/clevr/build.py\n@@ -22,7 +22,7 @@\n \n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n- url = 'https://s3-us-west-1.amazonaws.com/clevr/'\n+ url = 'https://dl.fbaipublicfiles.com/clevr/'\n \n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n", "issue": "Obselete download link for CLEVR Dataset\nApparently, the current link to CLEVR in the source code is \"https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip\" that returns the message \"All access to this object has been disabled\"\r\n\r\nWhen I try to execute the following line of code\r\n\r\n`!python ~/ParlAI/examples/display_data.py -t clevr`\r\n\r\nI obtain\r\n\r\n```\r\n[creating task(s): clevr]\r\n[building data: /root/ParlAI/data/CLEVR]\r\n[ downloading: https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip to /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip ]\r\nDownloading CLEVR_v1.0.zip: 0.00B [00:00, ?B/s]\r\nunpacking CLEVR_v1.0.zip\r\nTraceback (most recent call last):\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 819, in _create_task_agents\r\n task_agents = my_module.create_agent(opt)\r\nAttributeError: module 'parlai.tasks.clevr.agents' has no attribute 'create_agent'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/root/ParlAI/examples/display_data.py\", line 22, in <module>\r\n display_data(opt)\r\n File \"/root/ParlAI/parlai/scripts/display_data.py\", line 42, in display_data\r\n world = create_task(opt, agent)\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1151, in create_task\r\n world = create_task_world(opt, user_agents, default_world=default_world)\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1108, in create_task_world\r\n opt, user_agents, default_world=default_world\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1068, in _get_task_world\r\n task_agents = _create_task_agents(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 822, in _create_task_agents\r\n return create_task_agent_from_taskname(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 776, in create_task_agent_from_taskname\r\n task_agents = teacher_class(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/agents.py\", line 45, in __init__\r\n data_path, self.images_path = _path(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/agents.py\", line 15, in _path\r\n build(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/build.py\", line 28, in build\r\n build_data.untar(dpath, fname)\r\n File \"/root/ParlAI/parlai/core/build_data.py\", line 180, in untar\r\n shutil.unpack_archive(fullpath, path)\r\n File \"/usr/lib/python3.6/shutil.py\", line 983, in unpack_archive\r\n func(filename, extract_dir, **kwargs)\r\n File \"/usr/lib/python3.6/shutil.py\", line 883, in _unpack_zipfile\r\n raise ReadError(\"%s is not a zip file\" % filename)\r\nshutil.ReadError: /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip is not a zip file\r\n```\r\n\r\nI found the following working link on CLEVR webpage (https://cs.stanford.edu/people/jcjohns/clevr/):\r\n\r\nhttps://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'CLEVR')\n version = 'v1.0'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n # An older version exists, so remove these outdated files.\n if build_data.built(dpath):\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n url = 'https://s3-us-west-1.amazonaws.com/clevr/'\n\n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/clevr/build.py"}]}
1,708
146
gh_patches_debug_33339
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-2111
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New numpy hook for intel mkl libraries Hello - anaconda and winpython build numpy against Intel's mkl libraries. Building someone that uses numpy will need to have those libraries. Here is a winpython hook: http://stackoverflow.com/a/35853001 and I adapted that for anaconda & python3: https://github.com/maqifrnswa/scimpy/blob/master/pyinstaller-hooks/hook-numpy.py </issue> <code> [start of PyInstaller/hooks/hook-numpy.core.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2016, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 # On Windows, numpy depends on a set of dynamically-detemined DLLs, which means 10 # that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969 11 # for more information. The typical error message: ``Intel MKL FATAL ERROR: 12 # Cannot load mkl_intel_thread.dll.`` 13 # 14 # So, include them manually. 15 import os 16 import os.path 17 from PyInstaller.utils.hooks import get_package_paths 18 19 pkg_base, pkg_dir = get_package_paths('numpy.core') 20 # Walk through all files in ``numpy.core``, looking for DLLs. 21 datas = [] 22 for f in os.listdir(pkg_dir): 23 extension = os.path.splitext(f)[1] 24 if extension == '.dll': 25 # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '') 26 source = os.path.join(pkg_dir, f) 27 datas.append((source, '')) 28 [end of PyInstaller/hooks/hook-numpy.core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-numpy.core.py b/PyInstaller/hooks/hook-numpy.core.py --- a/PyInstaller/hooks/hook-numpy.core.py +++ b/PyInstaller/hooks/hook-numpy.core.py @@ -6,22 +6,41 @@ # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- -# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means -# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969 -# for more information. The typical error message: ``Intel MKL FATAL ERROR: -# Cannot load mkl_intel_thread.dll.`` +# If numpy is built with MKL support it depends on a set of libraries loaded +# at runtime. Since PyInstaller's static analysis can't find them they must be +# included manually. # -# So, include them manually. +# See +# https://github.com/pyinstaller/pyinstaller/issues/1881 +# https://github.com/pyinstaller/pyinstaller/issues/1969 +# for more information import os import os.path +import re from PyInstaller.utils.hooks import get_package_paths +from PyInstaller import log as logging +from PyInstaller import compat +binaries = [] + +# look for libraries in numpy package path pkg_base, pkg_dir = get_package_paths('numpy.core') -# Walk through all files in ``numpy.core``, looking for DLLs. -datas = [] -for f in os.listdir(pkg_dir): - extension = os.path.splitext(f)[1] - if extension == '.dll': - # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '') - source = os.path.join(pkg_dir, f) - datas.append((source, '')) +re_anylib = re.compile(r'\w+\.(?:dll|so)', re.IGNORECASE) +dlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)] +binaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg] + +# look for MKL libraries in pythons lib directory +# TODO: check numpy.__config__ if numpy is actually depending on MKL +# TODO: determine which directories are searched by the os linker +if compat.is_win: + lib_dir = os.path.join(compat.base_prefix, "Library", "bin") +else: + lib_dir = os.path.join(compat.base_prefix, "lib") +if os.path.isdir(lib_dir): + re_mkllib = re.compile(r'^(?:lib)?mkl\w+\.(?:dll|so)', re.IGNORECASE) + dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)] + if dlls_mkl: + logger = logging.getLogger(__name__) + logger.info("MKL libraries found when importing numpy. Adding MKL to binaries") + binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl] +
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-numpy.core.py b/PyInstaller/hooks/hook-numpy.core.py\n--- a/PyInstaller/hooks/hook-numpy.core.py\n+++ b/PyInstaller/hooks/hook-numpy.core.py\n@@ -6,22 +6,41 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means\n-# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969\n-# for more information. The typical error message: ``Intel MKL FATAL ERROR:\n-# Cannot load mkl_intel_thread.dll.``\n+# If numpy is built with MKL support it depends on a set of libraries loaded\n+# at runtime. Since PyInstaller's static analysis can't find them they must be\n+# included manually.\n #\n-# So, include them manually.\n+# See\n+# https://github.com/pyinstaller/pyinstaller/issues/1881\n+# https://github.com/pyinstaller/pyinstaller/issues/1969\n+# for more information\n import os\n import os.path\n+import re\n from PyInstaller.utils.hooks import get_package_paths\n+from PyInstaller import log as logging \n+from PyInstaller import compat\n \n+binaries = []\n+\n+# look for libraries in numpy package path\n pkg_base, pkg_dir = get_package_paths('numpy.core')\n-# Walk through all files in ``numpy.core``, looking for DLLs.\n-datas = []\n-for f in os.listdir(pkg_dir):\n- extension = os.path.splitext(f)[1]\n- if extension == '.dll':\n- # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')\n- source = os.path.join(pkg_dir, f)\n- datas.append((source, ''))\n+re_anylib = re.compile(r'\\w+\\.(?:dll|so)', re.IGNORECASE)\n+dlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)]\n+binaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg]\n+\n+# look for MKL libraries in pythons lib directory\n+# TODO: check numpy.__config__ if numpy is actually depending on MKL\n+# TODO: determine which directories are searched by the os linker\n+if compat.is_win:\n+ lib_dir = os.path.join(compat.base_prefix, \"Library\", \"bin\")\n+else:\n+ lib_dir = os.path.join(compat.base_prefix, \"lib\")\n+if os.path.isdir(lib_dir):\n+ re_mkllib = re.compile(r'^(?:lib)?mkl\\w+\\.(?:dll|so)', re.IGNORECASE)\n+ dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)]\n+ if dlls_mkl:\n+ logger = logging.getLogger(__name__)\n+ logger.info(\"MKL libraries found when importing numpy. Adding MKL to binaries\")\n+ binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl]\n+\n", "issue": "New numpy hook for intel mkl libraries\nHello - anaconda and winpython build numpy against Intel's mkl libraries. Building someone that uses numpy will need to have those libraries.\n\nHere is a winpython hook:\nhttp://stackoverflow.com/a/35853001\n\nand I adapted that for anaconda & python3:\nhttps://github.com/maqifrnswa/scimpy/blob/master/pyinstaller-hooks/hook-numpy.py\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means\n# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969\n# for more information. The typical error message: ``Intel MKL FATAL ERROR:\n# Cannot load mkl_intel_thread.dll.``\n#\n# So, include them manually.\nimport os\nimport os.path\nfrom PyInstaller.utils.hooks import get_package_paths\n\npkg_base, pkg_dir = get_package_paths('numpy.core')\n# Walk through all files in ``numpy.core``, looking for DLLs.\ndatas = []\nfor f in os.listdir(pkg_dir):\n extension = os.path.splitext(f)[1]\n if extension == '.dll':\n # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')\n source = os.path.join(pkg_dir, f)\n datas.append((source, ''))\n", "path": "PyInstaller/hooks/hook-numpy.core.py"}]}
947
680
gh_patches_debug_9770
rasdani/github-patches
git_diff
spyder-ide__spyder-14543
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DeprecationWarning: implicit conversion to integers in spyder/widgets/colors.py:78 When running the test suite, I received the following warning: ```python <<<PACKAGEDIR>>>/spyder/widgets/colors.py:78: DeprecationWarning: an integer is required (got type float). Implicit conversion to integers using __int__ is deprecated, and may be removed in a future version of Python. self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2) ``` Changing this to say ```python self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2)) ``` should be sufficient to fix this. </issue> <code> [start of spyder/widgets/colors.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright © Spyder Project Contributors 4 # Licensed under the terms of the MIT License 5 # (see spyder/__init__.py for details) 6 7 # Third party imports 8 from qtpy.QtCore import Property, QSize, Signal, Slot 9 from qtpy.QtGui import QColor, QIcon, QPixmap 10 from qtpy.QtWidgets import QColorDialog, QHBoxLayout, QLineEdit, QToolButton 11 12 # Local imports 13 from spyder.py3compat import is_text_string 14 15 16 class ColorButton(QToolButton): 17 """ 18 Color choosing push button 19 """ 20 colorChanged = Signal(QColor) 21 22 def __init__(self, parent=None): 23 QToolButton.__init__(self, parent) 24 self.setFixedSize(20, 20) 25 self.setIconSize(QSize(12, 12)) 26 self.clicked.connect(self.choose_color) 27 self._color = QColor() 28 29 def choose_color(self): 30 color = QColorDialog.getColor(self._color, self.parentWidget(), 31 'Select Color', 32 QColorDialog.ShowAlphaChannel) 33 if color.isValid(): 34 self.set_color(color) 35 36 def get_color(self): 37 return self._color 38 39 @Slot(QColor) 40 def set_color(self, color): 41 if color != self._color: 42 self._color = color 43 self.colorChanged.emit(self._color) 44 pixmap = QPixmap(self.iconSize()) 45 pixmap.fill(color) 46 self.setIcon(QIcon(pixmap)) 47 48 color = Property("QColor", get_color, set_color) 49 50 51 def text_to_qcolor(text): 52 """ 53 Create a QColor from specified string 54 Avoid warning from Qt when an invalid QColor is instantiated 55 """ 56 color = QColor() 57 text = str(text) 58 if not is_text_string(text): 59 return color 60 if text.startswith('#') and len(text)==7: 61 correct = '#0123456789abcdef' 62 for char in text: 63 if char.lower() not in correct: 64 return color 65 elif text not in list(QColor.colorNames()): 66 return color 67 color.setNamedColor(text) 68 return color 69 70 71 class ColorLayout(QHBoxLayout): 72 """Color-specialized QLineEdit layout""" 73 def __init__(self, color, parent=None): 74 QHBoxLayout.__init__(self) 75 assert isinstance(color, QColor) 76 self.lineedit = QLineEdit(color.name(), parent) 77 fm = self.lineedit.fontMetrics() 78 self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2) 79 self.lineedit.textChanged.connect(self.update_color) 80 self.addWidget(self.lineedit) 81 self.colorbtn = ColorButton(parent) 82 self.colorbtn.color = color 83 self.colorbtn.colorChanged.connect(self.update_text) 84 self.addWidget(self.colorbtn) 85 86 def update_color(self, text): 87 color = text_to_qcolor(text) 88 if color.isValid(): 89 self.colorbtn.color = color 90 91 def update_text(self, color): 92 self.lineedit.setText(color.name()) 93 94 def text(self): 95 return self.lineedit.text() 96 [end of spyder/widgets/colors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/spyder/widgets/colors.py b/spyder/widgets/colors.py --- a/spyder/widgets/colors.py +++ b/spyder/widgets/colors.py @@ -75,7 +75,7 @@ assert isinstance(color, QColor) self.lineedit = QLineEdit(color.name(), parent) fm = self.lineedit.fontMetrics() - self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2) + self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2)) self.lineedit.textChanged.connect(self.update_color) self.addWidget(self.lineedit) self.colorbtn = ColorButton(parent)
{"golden_diff": "diff --git a/spyder/widgets/colors.py b/spyder/widgets/colors.py\n--- a/spyder/widgets/colors.py\n+++ b/spyder/widgets/colors.py\n@@ -75,7 +75,7 @@\n assert isinstance(color, QColor)\r\n self.lineedit = QLineEdit(color.name(), parent)\r\n fm = self.lineedit.fontMetrics()\r\n- self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)\r\n+ self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2))\r\n self.lineedit.textChanged.connect(self.update_color)\r\n self.addWidget(self.lineedit)\r\n self.colorbtn = ColorButton(parent)\n", "issue": "DeprecationWarning: implicit conversion to integers in spyder/widgets/colors.py:78\nWhen running the test suite, I received the following warning:\r\n\r\n```python\r\n <<<PACKAGEDIR>>>/spyder/widgets/colors.py:78: DeprecationWarning: an integer is required (got type float). Implicit conversion to integers using __int__ is deprecated, and may be removed in a future version of Python.\r\n self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)\r\n```\r\n\r\nChanging this to say\r\n```python\r\n self.lineedit.setMinimumWidth(int(fm.width(color.name()) * 1.2))\r\n```\r\nshould be sufficient to fix this.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright \u00a9 Spyder Project Contributors\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n\r\n# Third party imports\r\nfrom qtpy.QtCore import Property, QSize, Signal, Slot\r\nfrom qtpy.QtGui import QColor, QIcon, QPixmap\r\nfrom qtpy.QtWidgets import QColorDialog, QHBoxLayout, QLineEdit, QToolButton\r\n\r\n# Local imports\r\nfrom spyder.py3compat import is_text_string\r\n\r\n\r\nclass ColorButton(QToolButton):\r\n \"\"\"\r\n Color choosing push button\r\n \"\"\"\r\n colorChanged = Signal(QColor)\r\n\r\n def __init__(self, parent=None):\r\n QToolButton.__init__(self, parent)\r\n self.setFixedSize(20, 20)\r\n self.setIconSize(QSize(12, 12))\r\n self.clicked.connect(self.choose_color)\r\n self._color = QColor()\r\n\r\n def choose_color(self):\r\n color = QColorDialog.getColor(self._color, self.parentWidget(),\r\n 'Select Color',\r\n QColorDialog.ShowAlphaChannel)\r\n if color.isValid():\r\n self.set_color(color)\r\n\r\n def get_color(self):\r\n return self._color\r\n\r\n @Slot(QColor)\r\n def set_color(self, color):\r\n if color != self._color:\r\n self._color = color\r\n self.colorChanged.emit(self._color)\r\n pixmap = QPixmap(self.iconSize())\r\n pixmap.fill(color)\r\n self.setIcon(QIcon(pixmap))\r\n\r\n color = Property(\"QColor\", get_color, set_color)\r\n\r\n\r\ndef text_to_qcolor(text):\r\n \"\"\"\r\n Create a QColor from specified string\r\n Avoid warning from Qt when an invalid QColor is instantiated\r\n \"\"\"\r\n color = QColor()\r\n text = str(text)\r\n if not is_text_string(text):\r\n return color\r\n if text.startswith('#') and len(text)==7:\r\n correct = '#0123456789abcdef'\r\n for char in text:\r\n if char.lower() not in correct:\r\n return color\r\n elif text not in list(QColor.colorNames()):\r\n return color\r\n color.setNamedColor(text)\r\n return color\r\n\r\n\r\nclass ColorLayout(QHBoxLayout):\r\n \"\"\"Color-specialized QLineEdit layout\"\"\"\r\n def __init__(self, color, parent=None):\r\n QHBoxLayout.__init__(self)\r\n assert isinstance(color, QColor)\r\n self.lineedit = QLineEdit(color.name(), parent)\r\n fm = self.lineedit.fontMetrics()\r\n self.lineedit.setMinimumWidth(fm.width(color.name()) * 1.2)\r\n self.lineedit.textChanged.connect(self.update_color)\r\n self.addWidget(self.lineedit)\r\n self.colorbtn = ColorButton(parent)\r\n self.colorbtn.color = color\r\n self.colorbtn.colorChanged.connect(self.update_text)\r\n self.addWidget(self.colorbtn)\r\n\r\n def update_color(self, text):\r\n color = text_to_qcolor(text)\r\n if color.isValid():\r\n self.colorbtn.color = color\r\n\r\n def update_text(self, color):\r\n self.lineedit.setText(color.name())\r\n\r\n def text(self):\r\n return self.lineedit.text()\r\n", "path": "spyder/widgets/colors.py"}]}
1,511
140
gh_patches_debug_123
rasdani/github-patches
git_diff
ResonantGeoData__ResonantGeoData-455
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve VTK.js 3D Viewer After #406 is merged, we should improve the 3D viewer. Basically, use [this example](https://kitware.github.io/vtk-js/examples/GeometryViewer.html) Things we should have: - [x] drop-down menu to change the scalar array - [x] Scalar bar - [x] Representation style - [x] Better background color choice (likely black) - [x] Point size slider - [x] Support RGB colors </issue> <code> [start of example_project/rgd_example/settings.py] 1 from rgd_testing_utils.settings import * # noqa 2 3 INSTALLED_APPS += [ # noqa 4 'rgd_3d', 5 'rgd_fmv', 6 'rgd_geometry', 7 'rgd_imagery', 8 # Swagger 9 'drf_yasg', 10 'django_extensions', 11 ] 12 13 ROOT_URLCONF = 'rgd_example.urls' 14 WSGI_APPLICATION = 'rgd_example.wsgi.application' 15 16 17 # Swagger 18 REFETCH_SCHEMA_WITH_AUTH = True 19 REFETCH_SCHEMA_ON_LOGOUT = True 20 OPERATIONS_SORTER = 'alpha' 21 DEEP_LINKING = True 22 [end of example_project/rgd_example/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/example_project/rgd_example/settings.py b/example_project/rgd_example/settings.py --- a/example_project/rgd_example/settings.py +++ b/example_project/rgd_example/settings.py @@ -19,3 +19,5 @@ REFETCH_SCHEMA_ON_LOGOUT = True OPERATIONS_SORTER = 'alpha' DEEP_LINKING = True + +STATIC_URL = '/static/'
{"golden_diff": "diff --git a/example_project/rgd_example/settings.py b/example_project/rgd_example/settings.py\n--- a/example_project/rgd_example/settings.py\n+++ b/example_project/rgd_example/settings.py\n@@ -19,3 +19,5 @@\n REFETCH_SCHEMA_ON_LOGOUT = True\n OPERATIONS_SORTER = 'alpha'\n DEEP_LINKING = True\n+\n+STATIC_URL = '/static/'\n", "issue": "Improve VTK.js 3D Viewer\nAfter #406 is merged, we should improve the 3D viewer. Basically, use [this example](https://kitware.github.io/vtk-js/examples/GeometryViewer.html)\r\n\r\nThings we should have:\r\n\r\n- [x] drop-down menu to change the scalar array\r\n- [x] Scalar bar\r\n- [x] Representation style\r\n- [x] Better background color choice (likely black)\r\n- [x] Point size slider\r\n- [x] Support RGB colors\n", "before_files": [{"content": "from rgd_testing_utils.settings import * # noqa\n\nINSTALLED_APPS += [ # noqa\n 'rgd_3d',\n 'rgd_fmv',\n 'rgd_geometry',\n 'rgd_imagery',\n # Swagger\n 'drf_yasg',\n 'django_extensions',\n]\n\nROOT_URLCONF = 'rgd_example.urls'\nWSGI_APPLICATION = 'rgd_example.wsgi.application'\n\n\n# Swagger\nREFETCH_SCHEMA_WITH_AUTH = True\nREFETCH_SCHEMA_ON_LOGOUT = True\nOPERATIONS_SORTER = 'alpha'\nDEEP_LINKING = True\n", "path": "example_project/rgd_example/settings.py"}]}
815
89
gh_patches_debug_6251
rasdani/github-patches
git_diff
searxng__searxng-83
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: engine "archive is" reports HTTP 404 / Not found **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** 1970d28a **Technical report** Error * Error: httpx.HTTPStatusError * Percentage: 100 * Parameters: `('404', 'Not Found', 'archive.is')` * File name: `searx/search/processors/online.py:99` * Function: `_send_http_request` * Code: `response = req(params['url'], **request_args)` </issue> <code> [start of searx/engines/xpath.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 3 from lxml import html 4 from urllib.parse import urlencode 5 from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list 6 7 search_url = None 8 url_xpath = None 9 content_xpath = None 10 title_xpath = None 11 thumbnail_xpath = False 12 paging = False 13 suggestion_xpath = '' 14 results_xpath = '' 15 cached_xpath = '' 16 cached_url = '' 17 18 # parameters for engines with paging support 19 # 20 # number of results on each page 21 # (only needed if the site requires not a page number, but an offset) 22 page_size = 1 23 # number of the first page (usually 0 or 1) 24 first_page_num = 1 25 26 27 def request(query, params): 28 query = urlencode({'q': query})[2:] 29 30 fp = {'query': query} 31 if paging and search_url.find('{pageno}') >= 0: 32 fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num 33 34 params['url'] = search_url.format(**fp) 35 params['query'] = query 36 37 return params 38 39 40 def response(resp): 41 results = [] 42 dom = html.fromstring(resp.text) 43 is_onion = True if 'onions' in categories else False # pylint: disable=undefined-variable 44 45 if results_xpath: 46 for result in eval_xpath_list(dom, results_xpath): 47 url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url) 48 title = extract_text(eval_xpath_list(result, title_xpath, min_len=1)) 49 content = extract_text(eval_xpath_list(result, content_xpath, min_len=1)) 50 tmp_result = {'url': url, 'title': title, 'content': content} 51 52 # add thumbnail if available 53 if thumbnail_xpath: 54 thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath) 55 if len(thumbnail_xpath_result) > 0: 56 tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) 57 58 # add alternative cached url if available 59 if cached_xpath: 60 tmp_result['cached_url'] = cached_url\ 61 + extract_text(eval_xpath_list(result, cached_xpath, min_len=1)) 62 63 if is_onion: 64 tmp_result['is_onion'] = True 65 66 results.append(tmp_result) 67 else: 68 if cached_xpath: 69 for url, title, content, cached in zip( 70 (extract_url(x, search_url) for 71 x in eval_xpath_list(dom, url_xpath)), 72 map(extract_text, eval_xpath_list(dom, title_xpath)), 73 map(extract_text, eval_xpath_list(dom, content_xpath)), 74 map(extract_text, eval_xpath_list(dom, cached_xpath)) 75 ): 76 results.append({'url': url, 'title': title, 'content': content, 77 'cached_url': cached_url + cached, 'is_onion': is_onion}) 78 else: 79 for url, title, content in zip( 80 (extract_url(x, search_url) for 81 x in eval_xpath_list(dom, url_xpath)), 82 map(extract_text, eval_xpath_list(dom, title_xpath)), 83 map(extract_text, eval_xpath_list(dom, content_xpath)) 84 ): 85 results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion}) 86 87 if not suggestion_xpath: 88 return results 89 for suggestion in eval_xpath(dom, suggestion_xpath): 90 results.append({'suggestion': extract_text(suggestion)}) 91 return results 92 [end of searx/engines/xpath.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -14,6 +14,7 @@ results_xpath = '' cached_xpath = '' cached_url = '' +soft_max_redirects = 0 # parameters for engines with paging support # @@ -33,6 +34,7 @@ params['url'] = search_url.format(**fp) params['query'] = query + params['soft_max_redirects'] = soft_max_redirects return params
{"golden_diff": "diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py\n--- a/searx/engines/xpath.py\n+++ b/searx/engines/xpath.py\n@@ -14,6 +14,7 @@\n results_xpath = ''\n cached_xpath = ''\n cached_url = ''\n+soft_max_redirects = 0\n \n # parameters for engines with paging support\n #\n@@ -33,6 +34,7 @@\n \n params['url'] = search_url.format(**fp)\n params['query'] = query\n+ params['soft_max_redirects'] = soft_max_redirects\n \n return params\n", "issue": "Bug: engine \"archive is\" reports HTTP 404 / Not found\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n\r\n1970d28a\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: httpx.HTTPStatusError\r\n * Percentage: 100\r\n * Parameters: `('404', 'Not Found', 'archive.is')`\r\n * File name: `searx/search/processors/online.py:99`\r\n * Function: `_send_http_request`\r\n * Code: `response = req(params['url'], **request_args)`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom lxml import html\nfrom urllib.parse import urlencode\nfrom searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list\n\nsearch_url = None\nurl_xpath = None\ncontent_xpath = None\ntitle_xpath = None\nthumbnail_xpath = False\npaging = False\nsuggestion_xpath = ''\nresults_xpath = ''\ncached_xpath = ''\ncached_url = ''\n\n# parameters for engines with paging support\n#\n# number of results on each page\n# (only needed if the site requires not a page number, but an offset)\npage_size = 1\n# number of the first page (usually 0 or 1)\nfirst_page_num = 1\n\n\ndef request(query, params):\n query = urlencode({'q': query})[2:]\n\n fp = {'query': query}\n if paging and search_url.find('{pageno}') >= 0:\n fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num\n\n params['url'] = search_url.format(**fp)\n params['query'] = query\n\n return params\n\n\ndef response(resp):\n results = []\n dom = html.fromstring(resp.text)\n is_onion = True if 'onions' in categories else False # pylint: disable=undefined-variable\n\n if results_xpath:\n for result in eval_xpath_list(dom, results_xpath):\n url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)\n title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))\n content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))\n tmp_result = {'url': url, 'title': title, 'content': content}\n\n # add thumbnail if available\n if thumbnail_xpath:\n thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)\n if len(thumbnail_xpath_result) > 0:\n tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)\n\n # add alternative cached url if available\n if cached_xpath:\n tmp_result['cached_url'] = cached_url\\\n + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))\n\n if is_onion:\n tmp_result['is_onion'] = True\n\n results.append(tmp_result)\n else:\n if cached_xpath:\n for url, title, content, cached in zip(\n (extract_url(x, search_url) for\n x in eval_xpath_list(dom, url_xpath)),\n map(extract_text, eval_xpath_list(dom, title_xpath)),\n map(extract_text, eval_xpath_list(dom, content_xpath)),\n map(extract_text, eval_xpath_list(dom, cached_xpath))\n ):\n results.append({'url': url, 'title': title, 'content': content,\n 'cached_url': cached_url + cached, 'is_onion': is_onion})\n else:\n for url, title, content in zip(\n (extract_url(x, search_url) for\n x in eval_xpath_list(dom, url_xpath)),\n map(extract_text, eval_xpath_list(dom, title_xpath)),\n map(extract_text, eval_xpath_list(dom, content_xpath))\n ):\n results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})\n\n if not suggestion_xpath:\n return results\n for suggestion in eval_xpath(dom, suggestion_xpath):\n results.append({'suggestion': extract_text(suggestion)})\n return results\n", "path": "searx/engines/xpath.py"}]}
1,630
143
gh_patches_debug_40813
rasdani/github-patches
git_diff
sublimelsp__LSP-1982
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> side_by_side should preview files in a side view **Describe the bug** When using `side_by_side: true` option for commands that support it, if the command opens a quick panel with multiple items, the items open in the main view on changing selection and only on pressing Enter to select an item it opens in the side view. I suppose that it should also show previews in side view like native ST functionality does (for example `shift+primary+f12`). **To Reproduce** Steps to reproduce the behavior: 1. Set up keybinding like: ``` { "command": "lsp_symbol_type_definition", "keys": ["f13"], "args": {"side_by_side": true}, "context": [ { "key": "lsp.session_with_capability", "operator": "equal", "operand": "typeDefinitionProvider" }, { "key": "auto_complete_visible", "operator": "equal", "operand": false } ] }, ``` 3. Press F13 on some symbol that is referenced from multiple places **Expected behavior** Changing selection in quick panel should preview the file in a side by side view. **Environment (please complete the following information):** - OS: macOS - Sublime Text version: 4134 </issue> <code> [start of plugin/locationpicker.py] 1 from .core.logging import debug 2 from .core.protocol import DocumentUri, Location, Position 3 from .core.protocol import LocationLink 4 from .core.sessions import Session 5 from .core.typing import Union, List, Optional, Tuple 6 from .core.views import get_uri_and_position_from_location 7 from .core.views import location_to_human_readable 8 from .core.views import to_encoded_filename 9 import functools 10 import sublime 11 import weakref 12 13 14 def open_location_async(session: Session, location: Union[Location, LocationLink], side_by_side: bool) -> None: 15 flags = sublime.ENCODED_POSITION 16 if side_by_side: 17 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT 18 19 def check_success_async(view: Optional[sublime.View]) -> None: 20 if not view: 21 sublime.error_message("Unable to open URI") 22 23 session.open_location_async(location, flags).then(check_success_async) 24 25 26 def open_basic_file( 27 session: Session, 28 uri: str, 29 position: Position, 30 flags: int = 0, 31 group: Optional[int] = None 32 ) -> None: 33 filename = session.config.map_server_uri_to_client_path(uri) 34 if group is None: 35 group = session.window.active_group() 36 session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group) 37 38 39 class LocationPicker: 40 41 def __init__( 42 self, 43 view: sublime.View, 44 session: Session, 45 locations: Union[List[Location], List[LocationLink]], 46 side_by_side: bool 47 ) -> None: 48 self._view = view 49 window = view.window() 50 if not window: 51 raise ValueError("missing window") 52 self._window = window 53 self._weaksession = weakref.ref(session) 54 self._side_by_side = side_by_side 55 self._items = locations 56 manager = session.manager() 57 base_dir = manager.get_project_path(view.file_name() or "") if manager else None 58 self._window.show_quick_panel( 59 items=[location_to_human_readable(session.config, base_dir, location) for location in locations], 60 on_select=self._select_entry, 61 on_highlight=self._highlight_entry, 62 flags=sublime.KEEP_OPEN_ON_FOCUS_LOST 63 ) 64 65 def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]: 66 location = self._items[index] 67 uri, position = get_uri_and_position_from_location(location) 68 return self._weaksession(), location, uri, position 69 70 def _select_entry(self, index: int) -> None: 71 if index >= 0 and self._view.is_valid(): 72 session, location, uri, position = self._unpack(index) 73 if not session: 74 return 75 # Note: this has to run on the main thread (and not via open_location_async) 76 # otherwise the bevior feels weird. It's the only reason why open_basic_file exists. 77 if uri.startswith("file:"): 78 flags = sublime.ENCODED_POSITION 79 if self._side_by_side: 80 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT 81 open_basic_file(session, uri, position, flags) 82 else: 83 sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side)) 84 else: 85 self._window.focus_view(self._view) 86 87 def _highlight_entry(self, index: int) -> None: 88 session, _, uri, position = self._unpack(index) 89 if not session: 90 return 91 if uri.startswith("file:"): 92 open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION) 93 else: 94 # TODO: Preview non-file uris? 95 debug("no preview for", uri) 96 [end of plugin/locationpicker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py --- a/plugin/locationpicker.py +++ b/plugin/locationpicker.py @@ -29,11 +29,11 @@ position: Position, flags: int = 0, group: Optional[int] = None -) -> None: +) -> sublime.View: filename = session.config.map_server_uri_to_client_path(uri) if group is None: group = session.window.active_group() - session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group) + return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group) class LocationPicker: @@ -53,6 +53,7 @@ self._weaksession = weakref.ref(session) self._side_by_side = side_by_side self._items = locations + self._highlighted_view = None # type: Optional[sublime.View] manager = session.manager() base_dir = manager.get_project_path(view.file_name() or "") if manager else None self._window.show_quick_panel( @@ -76,20 +77,35 @@ # otherwise the bevior feels weird. It's the only reason why open_basic_file exists. if uri.startswith("file:"): flags = sublime.ENCODED_POSITION - if self._side_by_side: - flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT - open_basic_file(session, uri, position, flags) + if not self._side_by_side: + open_basic_file(session, uri, position, flags) else: sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side)) else: self._window.focus_view(self._view) + # When in side-by-side mode close the current highlighted + # sheet upon canceling if the sheet is semi-transient + if self._side_by_side and self._highlighted_view: + sheet = self._highlighted_view.sheet() + if sheet and sheet.is_semi_transient(): + self._highlighted_view.close() def _highlight_entry(self, index: int) -> None: session, _, uri, position = self._unpack(index) if not session: return if uri.startswith("file:"): - open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION) + flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP + if self._side_by_side: + if self._highlighted_view and self._highlighted_view.is_valid(): + # Replacing the MRU is done relative to the current highlighted sheet + self._window.focus_view(self._highlighted_view) + flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT + else: + flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT + else: + flags |= sublime.TRANSIENT + self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group()) else: # TODO: Preview non-file uris? debug("no preview for", uri)
{"golden_diff": "diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py\n--- a/plugin/locationpicker.py\n+++ b/plugin/locationpicker.py\n@@ -29,11 +29,11 @@\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n-) -> None:\n+) -> sublime.View:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n- session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n+ return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n \n \n class LocationPicker:\n@@ -53,6 +53,7 @@\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n+ self._highlighted_view = None # type: Optional[sublime.View]\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n@@ -76,20 +77,35 @@\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n- if self._side_by_side:\n- flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n- open_basic_file(session, uri, position, flags)\n+ if not self._side_by_side:\n+ open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side))\n else:\n self._window.focus_view(self._view)\n+ # When in side-by-side mode close the current highlighted\n+ # sheet upon canceling if the sheet is semi-transient\n+ if self._side_by_side and self._highlighted_view:\n+ sheet = self._highlighted_view.sheet()\n+ if sheet and sheet.is_semi_transient():\n+ self._highlighted_view.close()\n \n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n- open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION)\n+ flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP\n+ if self._side_by_side:\n+ if self._highlighted_view and self._highlighted_view.is_valid():\n+ # Replacing the MRU is done relative to the current highlighted sheet\n+ self._window.focus_view(self._highlighted_view)\n+ flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT\n+ else:\n+ flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n+ else:\n+ flags |= sublime.TRANSIENT\n+ self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "issue": "side_by_side should preview files in a side view\n**Describe the bug**\r\nWhen using `side_by_side: true` option for commands that support it, if the command opens a quick panel with multiple items, the items open in the main view on changing selection and only on pressing Enter to select an item it opens in the side view. I suppose that it should also show previews in side view like native ST functionality does (for example `shift+primary+f12`).\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Set up keybinding like:\r\n```\r\n {\r\n \"command\": \"lsp_symbol_type_definition\",\r\n \"keys\": [\"f13\"],\r\n \"args\": {\"side_by_side\": true},\r\n \"context\": [\r\n {\r\n \"key\": \"lsp.session_with_capability\",\r\n \"operator\": \"equal\",\r\n \"operand\": \"typeDefinitionProvider\"\r\n },\r\n {\r\n \"key\": \"auto_complete_visible\",\r\n \"operator\": \"equal\",\r\n \"operand\": false\r\n }\r\n ]\r\n },\r\n```\r\n3. Press F13 on some symbol that is referenced from multiple places\r\n\r\n**Expected behavior**\r\nChanging selection in quick panel should preview the file in a side by side view.\r\n\r\n**Environment (please complete the following information):**\r\n- OS: macOS\r\n- Sublime Text version: 4134\r\n\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import DocumentUri, Location, Position\nfrom .core.protocol import LocationLink\nfrom .core.sessions import Session\nfrom .core.typing import Union, List, Optional, Tuple\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import location_to_human_readable\nfrom .core.views import to_encoded_filename\nimport functools\nimport sublime\nimport weakref\n\n\ndef open_location_async(session: Session, location: Union[Location, LocationLink], side_by_side: bool) -> None:\n flags = sublime.ENCODED_POSITION\n if side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n\n def check_success_async(view: Optional[sublime.View]) -> None:\n if not view:\n sublime.error_message(\"Unable to open URI\")\n\n session.open_location_async(location, flags).then(check_success_async)\n\n\ndef open_basic_file(\n session: Session,\n uri: str,\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n) -> None:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n\n\nclass LocationPicker:\n\n def __init__(\n self,\n view: sublime.View,\n session: Session,\n locations: Union[List[Location], List[LocationLink]],\n side_by_side: bool\n ) -> None:\n self._view = view\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n self._window = window\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n items=[location_to_human_readable(session.config, base_dir, location) for location in locations],\n on_select=self._select_entry,\n on_highlight=self._highlight_entry,\n flags=sublime.KEEP_OPEN_ON_FOCUS_LOST\n )\n\n def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:\n location = self._items[index]\n uri, position = get_uri_and_position_from_location(location)\n return self._weaksession(), location, uri, position\n\n def _select_entry(self, index: int) -> None:\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n return\n # Note: this has to run on the main thread (and not via open_location_async)\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n if self._side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(functools.partial(open_location_async, session, location, self._side_by_side))\n else:\n self._window.focus_view(self._view)\n\n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n open_basic_file(session, uri, position, sublime.TRANSIENT | sublime.ENCODED_POSITION)\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "path": "plugin/locationpicker.py"}]}
1,810
702
gh_patches_debug_18206
rasdani/github-patches
git_diff
mindsdb__lightwood-698
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make `column_importances` optional If the column importance module gets automatically disabled (see #681) an issue may arise where the information required for a `ModelAnalysis` object is not available. Example stacktrace: ```python --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-10-b3f165881113> in <module> 16 17 # Analyze the ensembles ---> 18 predictor.analyze_ensemble(enc_data) /tmp/b380bd15a0ec89f57a82f719f514e67f0cae00fe7d0085d816353697296992059.py in analyze_ensemble(self, enc_data) 1439 # --------------- # 1440 log.info("Analyzing the ensemble of mixers") -> 1441 self.model_analysis, self.runtime_analyzer = model_analyzer( 1442 data=encoded_test_data, 1443 train_data=encoded_train_data, ~/Documents/lightwood/lightwood/analysis/analyze.py in model_analyzer(predictor, data, train_data, stats_info, target, ts_cfg, dtype_dict, accuracy_functions, analysis_blocks) 91 test_sample_size=len(encoded_val_data), 92 confusion_matrix=runtime_analyzer['cm'], ---> 93 column_importances=runtime_analyzer['column_importances'], 94 histograms=stats_info.histograms, 95 dtypes=dtype_dict KeyError: 'column_importances' ``` </issue> <code> [start of lightwood/analysis/analyze.py] 1 from typing import Dict, List, Tuple, Optional 2 3 from lightwood.helpers.log import log 4 from lightwood.api import dtype 5 from lightwood.ensemble import BaseEnsemble 6 from lightwood.analysis.base import BaseAnalysisBlock 7 from lightwood.data.encoded_ds import EncodedDs 8 from lightwood.encoder.text.pretrained import PretrainedLangEncoder 9 from lightwood.api.types import ModelAnalysis, StatisticalAnalysis, TimeseriesSettings, PredictionArguments 10 11 12 def model_analyzer( 13 predictor: BaseEnsemble, 14 data: EncodedDs, 15 train_data: EncodedDs, 16 stats_info: StatisticalAnalysis, 17 target: str, 18 ts_cfg: TimeseriesSettings, 19 dtype_dict: Dict[str, str], 20 accuracy_functions, 21 analysis_blocks: Optional[List[BaseAnalysisBlock]] = [] 22 ) -> Tuple[ModelAnalysis, Dict[str, object]]: 23 """ 24 Analyses model on a validation subset to evaluate accuracy, estimate feature importance and generate a 25 calibration model to estimating confidence in future predictions. 26 27 Additionally, any user-specified analysis blocks (see class `BaseAnalysisBlock`) are also called here. 28 29 :return: 30 runtime_analyzer: This dictionary object gets populated in a sequential fashion with data generated from 31 any `.analyze()` block call. This dictionary object is stored in the predictor itself, and used when 32 calling the `.explain()` method of all analysis blocks when generating predictions. 33 34 model_analysis: `ModelAnalysis` object that contains core analysis metrics, not necessarily needed when predicting. 35 """ 36 37 runtime_analyzer = {} 38 data_type = dtype_dict[target] 39 40 # retrieve encoded data representations 41 encoded_train_data = train_data 42 encoded_val_data = data 43 data = encoded_val_data.data_frame 44 input_cols = list([col for col in data.columns if col != target]) 45 46 # predictive task 47 is_numerical = data_type in (dtype.integer, dtype.float, dtype.array, dtype.tsarray, dtype.quantity) 48 is_classification = data_type in (dtype.categorical, dtype.binary) 49 is_multi_ts = ts_cfg.is_timeseries and ts_cfg.nr_predictions > 1 50 has_pretrained_text_enc = any([isinstance(enc, PretrainedLangEncoder) 51 for enc in encoded_train_data.encoders.values()]) 52 53 # raw predictions for validation dataset 54 args = {} if not is_classification else {"predict_proba": True} 55 normal_predictions = predictor(encoded_val_data, args=PredictionArguments.from_dict(args)) 56 normal_predictions = normal_predictions.set_index(data.index) 57 58 # ------------------------- # 59 # Run analysis blocks, both core and user-defined 60 # ------------------------- # 61 kwargs = { 62 'predictor': predictor, 63 'target': target, 64 'input_cols': input_cols, 65 'dtype_dict': dtype_dict, 66 'normal_predictions': normal_predictions, 67 'data': data, 68 'train_data': train_data, 69 'encoded_val_data': encoded_val_data, 70 'is_classification': is_classification, 71 'is_numerical': is_numerical, 72 'is_multi_ts': is_multi_ts, 73 'stats_info': stats_info, 74 'ts_cfg': ts_cfg, 75 'accuracy_functions': accuracy_functions, 76 'has_pretrained_text_enc': has_pretrained_text_enc 77 } 78 79 for block in analysis_blocks: 80 log.info("The block %s is now running its analyze() method", block.__class__.__name__) 81 runtime_analyzer = block.analyze(runtime_analyzer, **kwargs) 82 83 # ------------------------- # 84 # Populate ModelAnalysis object 85 # ------------------------- # 86 model_analysis = ModelAnalysis( 87 accuracies=runtime_analyzer['score_dict'], 88 accuracy_histogram=runtime_analyzer['acc_histogram'], 89 accuracy_samples=runtime_analyzer['acc_samples'], 90 train_sample_size=len(encoded_train_data), 91 test_sample_size=len(encoded_val_data), 92 confusion_matrix=runtime_analyzer['cm'], 93 column_importances=runtime_analyzer['column_importances'], 94 histograms=stats_info.histograms, 95 dtypes=dtype_dict 96 ) 97 98 return model_analysis, runtime_analyzer 99 [end of lightwood/analysis/analyze.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightwood/analysis/analyze.py b/lightwood/analysis/analyze.py --- a/lightwood/analysis/analyze.py +++ b/lightwood/analysis/analyze.py @@ -84,13 +84,13 @@ # Populate ModelAnalysis object # ------------------------- # model_analysis = ModelAnalysis( - accuracies=runtime_analyzer['score_dict'], - accuracy_histogram=runtime_analyzer['acc_histogram'], - accuracy_samples=runtime_analyzer['acc_samples'], + accuracies=runtime_analyzer.get('score_dict', {}), + accuracy_histogram=runtime_analyzer.get('acc_histogram', {}), + accuracy_samples=runtime_analyzer.get('acc_samples', {}), train_sample_size=len(encoded_train_data), test_sample_size=len(encoded_val_data), confusion_matrix=runtime_analyzer['cm'], - column_importances=runtime_analyzer['column_importances'], + column_importances=runtime_analyzer.get('column_importances', {}), histograms=stats_info.histograms, dtypes=dtype_dict )
{"golden_diff": "diff --git a/lightwood/analysis/analyze.py b/lightwood/analysis/analyze.py\n--- a/lightwood/analysis/analyze.py\n+++ b/lightwood/analysis/analyze.py\n@@ -84,13 +84,13 @@\n # Populate ModelAnalysis object\n # ------------------------- #\n model_analysis = ModelAnalysis(\n- accuracies=runtime_analyzer['score_dict'],\n- accuracy_histogram=runtime_analyzer['acc_histogram'],\n- accuracy_samples=runtime_analyzer['acc_samples'],\n+ accuracies=runtime_analyzer.get('score_dict', {}),\n+ accuracy_histogram=runtime_analyzer.get('acc_histogram', {}),\n+ accuracy_samples=runtime_analyzer.get('acc_samples', {}),\n train_sample_size=len(encoded_train_data),\n test_sample_size=len(encoded_val_data),\n confusion_matrix=runtime_analyzer['cm'],\n- column_importances=runtime_analyzer['column_importances'],\n+ column_importances=runtime_analyzer.get('column_importances', {}),\n histograms=stats_info.histograms,\n dtypes=dtype_dict\n )\n", "issue": "Make `column_importances` optional\nIf the column importance module gets automatically disabled (see #681) an issue may arise where the information required for a `ModelAnalysis` object is not available. Example stacktrace:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-10-b3f165881113> in <module>\r\n 16 \r\n 17 # Analyze the ensembles\r\n---> 18 predictor.analyze_ensemble(enc_data)\r\n\r\n/tmp/b380bd15a0ec89f57a82f719f514e67f0cae00fe7d0085d816353697296992059.py in analyze_ensemble(self, enc_data)\r\n 1439 # --------------- #\r\n 1440 log.info(\"Analyzing the ensemble of mixers\")\r\n-> 1441 self.model_analysis, self.runtime_analyzer = model_analyzer(\r\n 1442 data=encoded_test_data,\r\n 1443 train_data=encoded_train_data,\r\n\r\n~/Documents/lightwood/lightwood/analysis/analyze.py in model_analyzer(predictor, data, train_data, stats_info, target, ts_cfg, dtype_dict, accuracy_functions, analysis_blocks)\r\n 91 test_sample_size=len(encoded_val_data),\r\n 92 confusion_matrix=runtime_analyzer['cm'],\r\n---> 93 column_importances=runtime_analyzer['column_importances'],\r\n 94 histograms=stats_info.histograms,\r\n 95 dtypes=dtype_dict\r\n\r\nKeyError: 'column_importances'\r\n```\r\n\n", "before_files": [{"content": "from typing import Dict, List, Tuple, Optional\n\nfrom lightwood.helpers.log import log\nfrom lightwood.api import dtype\nfrom lightwood.ensemble import BaseEnsemble\nfrom lightwood.analysis.base import BaseAnalysisBlock\nfrom lightwood.data.encoded_ds import EncodedDs\nfrom lightwood.encoder.text.pretrained import PretrainedLangEncoder\nfrom lightwood.api.types import ModelAnalysis, StatisticalAnalysis, TimeseriesSettings, PredictionArguments\n\n\ndef model_analyzer(\n predictor: BaseEnsemble,\n data: EncodedDs,\n train_data: EncodedDs,\n stats_info: StatisticalAnalysis,\n target: str,\n ts_cfg: TimeseriesSettings,\n dtype_dict: Dict[str, str],\n accuracy_functions,\n analysis_blocks: Optional[List[BaseAnalysisBlock]] = []\n) -> Tuple[ModelAnalysis, Dict[str, object]]:\n \"\"\"\n Analyses model on a validation subset to evaluate accuracy, estimate feature importance and generate a\n calibration model to estimating confidence in future predictions.\n\n Additionally, any user-specified analysis blocks (see class `BaseAnalysisBlock`) are also called here.\n\n :return:\n runtime_analyzer: This dictionary object gets populated in a sequential fashion with data generated from\n any `.analyze()` block call. This dictionary object is stored in the predictor itself, and used when\n calling the `.explain()` method of all analysis blocks when generating predictions.\n\n model_analysis: `ModelAnalysis` object that contains core analysis metrics, not necessarily needed when predicting.\n \"\"\"\n\n runtime_analyzer = {}\n data_type = dtype_dict[target]\n\n # retrieve encoded data representations\n encoded_train_data = train_data\n encoded_val_data = data\n data = encoded_val_data.data_frame\n input_cols = list([col for col in data.columns if col != target])\n\n # predictive task\n is_numerical = data_type in (dtype.integer, dtype.float, dtype.array, dtype.tsarray, dtype.quantity)\n is_classification = data_type in (dtype.categorical, dtype.binary)\n is_multi_ts = ts_cfg.is_timeseries and ts_cfg.nr_predictions > 1\n has_pretrained_text_enc = any([isinstance(enc, PretrainedLangEncoder)\n for enc in encoded_train_data.encoders.values()])\n\n # raw predictions for validation dataset\n args = {} if not is_classification else {\"predict_proba\": True}\n normal_predictions = predictor(encoded_val_data, args=PredictionArguments.from_dict(args))\n normal_predictions = normal_predictions.set_index(data.index)\n\n # ------------------------- #\n # Run analysis blocks, both core and user-defined\n # ------------------------- #\n kwargs = {\n 'predictor': predictor,\n 'target': target,\n 'input_cols': input_cols,\n 'dtype_dict': dtype_dict,\n 'normal_predictions': normal_predictions,\n 'data': data,\n 'train_data': train_data,\n 'encoded_val_data': encoded_val_data,\n 'is_classification': is_classification,\n 'is_numerical': is_numerical,\n 'is_multi_ts': is_multi_ts,\n 'stats_info': stats_info,\n 'ts_cfg': ts_cfg,\n 'accuracy_functions': accuracy_functions,\n 'has_pretrained_text_enc': has_pretrained_text_enc\n }\n\n for block in analysis_blocks:\n log.info(\"The block %s is now running its analyze() method\", block.__class__.__name__)\n runtime_analyzer = block.analyze(runtime_analyzer, **kwargs)\n\n # ------------------------- #\n # Populate ModelAnalysis object\n # ------------------------- #\n model_analysis = ModelAnalysis(\n accuracies=runtime_analyzer['score_dict'],\n accuracy_histogram=runtime_analyzer['acc_histogram'],\n accuracy_samples=runtime_analyzer['acc_samples'],\n train_sample_size=len(encoded_train_data),\n test_sample_size=len(encoded_val_data),\n confusion_matrix=runtime_analyzer['cm'],\n column_importances=runtime_analyzer['column_importances'],\n histograms=stats_info.histograms,\n dtypes=dtype_dict\n )\n\n return model_analysis, runtime_analyzer\n", "path": "lightwood/analysis/analyze.py"}]}
1,964
227
gh_patches_debug_14927
rasdani/github-patches
git_diff
googleapis__python-bigquery-30
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor logic in synth.py for excluding noxfile As explained in a [comment](https://github.com/googleapis/python-bigquery/pull/1#discussion_r375560206), preventing the synthtool for overriding the customized `noxfile.py` can be achieved in a more straightforward way than currently used. </issue> <code> [start of synth.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """This script is used to synthesize generated parts of this library.""" 16 17 import os 18 19 import synthtool as s 20 from synthtool import gcp 21 22 gapic = gcp.GAPICGenerator() 23 common = gcp.CommonTemplates() 24 version = 'v2' 25 26 library = gapic.py_library( 27 'bigquery', 28 version, 29 config_path='/google/cloud/bigquery/' 30 'artman_bigquery_v2.yaml', 31 artman_output_name='bigquery-v2', 32 include_protos=True, 33 ) 34 35 s.move( 36 [ 37 library / "google/cloud/bigquery_v2/gapic/enums.py", 38 library / "google/cloud/bigquery_v2/types.py", 39 library / "google/cloud/bigquery_v2/proto/location*", 40 library / "google/cloud/bigquery_v2/proto/encryption_config*", 41 library / "google/cloud/bigquery_v2/proto/model*", 42 library / "google/cloud/bigquery_v2/proto/standard_sql*", 43 ], 44 ) 45 46 # Fix up proto docs that are missing summary line. 47 s.replace( 48 "google/cloud/bigquery_v2/proto/model_pb2.py", 49 '"""Attributes:', 50 '"""Protocol buffer.\n\n Attributes:', 51 ) 52 s.replace( 53 "google/cloud/bigquery_v2/proto/encryption_config_pb2.py", 54 '"""Attributes:', 55 '"""Encryption configuration.\n\n Attributes:', 56 ) 57 58 # Remove non-ascii characters from docstrings for Python 2.7. 59 # Format quoted strings as plain text. 60 s.replace("google/cloud/bigquery_v2/proto/*.py", "[“”]", '``') 61 62 # ---------------------------------------------------------------------------- 63 # Add templated files 64 # ---------------------------------------------------------------------------- 65 templated_files = common.py_library(cov_level=100) 66 # we do not want to override the custom noxfile with the generated one 67 os.remove(os.path.join(templated_files, "noxfile.py")) 68 s.move(templated_files) 69 70 s.shell.run(["nox", "-s", "blacken"], hide_output=False) 71 [end of synth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/synth.py b/synth.py --- a/synth.py +++ b/synth.py @@ -14,8 +14,6 @@ """This script is used to synthesize generated parts of this library.""" -import os - import synthtool as s from synthtool import gcp @@ -63,8 +61,6 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library(cov_level=100) -# we do not want to override the custom noxfile with the generated one -os.remove(os.path.join(templated_files, "noxfile.py")) -s.move(templated_files) +s.move(templated_files, excludes=["noxfile.py"]) s.shell.run(["nox", "-s", "blacken"], hide_output=False)
{"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -14,8 +14,6 @@\n \n \"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n \n-import os\n-\n import synthtool as s\n from synthtool import gcp\n \n@@ -63,8 +61,6 @@\n # Add templated files\n # ----------------------------------------------------------------------------\n templated_files = common.py_library(cov_level=100)\n-# we do not want to override the custom noxfile with the generated one\n-os.remove(os.path.join(templated_files, \"noxfile.py\"))\n-s.move(templated_files)\n+s.move(templated_files, excludes=[\"noxfile.py\"])\n \n s.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "issue": "Refactor logic in synth.py for excluding noxfile\nAs explained in a [comment](https://github.com/googleapis/python-bigquery/pull/1#discussion_r375560206), preventing the synthtool for overriding the customized `noxfile.py` can be achieved in a more straightforward way than currently used.\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport os\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nversion = 'v2'\n\nlibrary = gapic.py_library(\n 'bigquery',\n version,\n config_path='/google/cloud/bigquery/'\n 'artman_bigquery_v2.yaml',\n artman_output_name='bigquery-v2',\n include_protos=True,\n)\n\ns.move(\n [\n library / \"google/cloud/bigquery_v2/gapic/enums.py\",\n library / \"google/cloud/bigquery_v2/types.py\",\n library / \"google/cloud/bigquery_v2/proto/location*\",\n library / \"google/cloud/bigquery_v2/proto/encryption_config*\",\n library / \"google/cloud/bigquery_v2/proto/model*\",\n library / \"google/cloud/bigquery_v2/proto/standard_sql*\",\n ],\n)\n\n# Fix up proto docs that are missing summary line.\ns.replace(\n \"google/cloud/bigquery_v2/proto/model_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Protocol buffer.\\n\\n Attributes:',\n)\ns.replace(\n \"google/cloud/bigquery_v2/proto/encryption_config_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Encryption configuration.\\n\\n Attributes:',\n)\n\n# Remove non-ascii characters from docstrings for Python 2.7.\n# Format quoted strings as plain text.\ns.replace(\"google/cloud/bigquery_v2/proto/*.py\", \"[\u201c\u201d]\", '``')\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(cov_level=100)\n# we do not want to override the custom noxfile with the generated one\nos.remove(os.path.join(templated_files, \"noxfile.py\"))\ns.move(templated_files)\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "path": "synth.py"}]}
1,284
176
gh_patches_debug_17991
rasdani/github-patches
git_diff
searx__searx-1464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Seems that startpage and ixquick enignes are not working anymore on Searx Hello and thanks for the works. I tried on my searx and on other instance like searx.me... but It's not working. How could we fix that? Thanks. </issue> <code> [start of searx/engines/startpage.py] 1 # Startpage (Web) 2 # 3 # @website https://startpage.com 4 # @provide-api no (nothing found) 5 # 6 # @using-api no 7 # @results HTML 8 # @stable no (HTML can change) 9 # @parse url, title, content 10 # 11 # @todo paging 12 13 from lxml import html 14 from dateutil import parser 15 from datetime import datetime, timedelta 16 import re 17 from searx.engines.xpath import extract_text 18 19 # engine dependent config 20 categories = ['general'] 21 # there is a mechanism to block "bot" search 22 # (probably the parameter qid), require 23 # storing of qid's between mulitble search-calls 24 25 # paging = False 26 language_support = True 27 28 # search-url 29 base_url = 'https://startpage.com/' 30 search_url = base_url + 'do/search' 31 32 # specific xpath variables 33 # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"] 34 # not ads: div[@class="result"] are the direct childs of div[@id="results"] 35 results_xpath = '//div[@class="result"]' 36 link_xpath = './/h3/a' 37 38 39 # do search-request 40 def request(query, params): 41 offset = (params['pageno'] - 1) * 10 42 43 params['url'] = search_url 44 params['method'] = 'POST' 45 params['data'] = {'query': query, 46 'startat': offset} 47 48 # set language 49 params['data']['with_language'] = ('lang_' + params['language'].split('-')[0]) 50 51 return params 52 53 54 # get response from search-request 55 def response(resp): 56 results = [] 57 58 dom = html.fromstring(resp.text) 59 60 # parse results 61 for result in dom.xpath(results_xpath): 62 links = result.xpath(link_xpath) 63 if not links: 64 continue 65 link = links[0] 66 url = link.attrib.get('href') 67 68 # block google-ad url's 69 if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url): 70 continue 71 72 # block startpage search url's 73 if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url): 74 continue 75 76 # block ixquick search url's 77 if re.match(r"^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url): 78 continue 79 80 title = extract_text(link) 81 82 if result.xpath('./p[@class="desc clk"]'): 83 content = extract_text(result.xpath('./p[@class="desc clk"]')) 84 else: 85 content = '' 86 87 published_date = None 88 89 # check if search result starts with something like: "2 Sep 2014 ... " 90 if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content): 91 date_pos = content.find('...') + 4 92 date_string = content[0:date_pos - 5] 93 published_date = parser.parse(date_string, dayfirst=True) 94 95 # fix content string 96 content = content[date_pos:] 97 98 # check if search result starts with something like: "5 days ago ... " 99 elif re.match(r"^[0-9]+ days? ago \.\.\. ", content): 100 date_pos = content.find('...') + 4 101 date_string = content[0:date_pos - 5] 102 103 # calculate datetime 104 published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group())) 105 106 # fix content string 107 content = content[date_pos:] 108 109 if published_date: 110 # append result 111 results.append({'url': url, 112 'title': title, 113 'content': content, 114 'publishedDate': published_date}) 115 else: 116 # append result 117 results.append({'url': url, 118 'title': title, 119 'content': content}) 120 121 # return results 122 return results 123 [end of searx/engines/startpage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -32,8 +32,9 @@ # specific xpath variables # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"] # not ads: div[@class="result"] are the direct childs of div[@id="results"] -results_xpath = '//div[@class="result"]' +results_xpath = '//li[contains(@class, "search-result") and contains(@class, "search-item")]' link_xpath = './/h3/a' +content_xpath = './p[@class="search-item__body"]' # do search-request @@ -79,8 +80,8 @@ title = extract_text(link) - if result.xpath('./p[@class="desc clk"]'): - content = extract_text(result.xpath('./p[@class="desc clk"]')) + if result.xpath(content_xpath): + content = extract_text(result.xpath(content_xpath)) else: content = ''
{"golden_diff": "diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py\n--- a/searx/engines/startpage.py\n+++ b/searx/engines/startpage.py\n@@ -32,8 +32,9 @@\n # specific xpath variables\n # ads xpath //div[@id=\"results\"]/div[@id=\"sponsored\"]//div[@class=\"result\"]\n # not ads: div[@class=\"result\"] are the direct childs of div[@id=\"results\"]\n-results_xpath = '//div[@class=\"result\"]'\n+results_xpath = '//li[contains(@class, \"search-result\") and contains(@class, \"search-item\")]'\n link_xpath = './/h3/a'\n+content_xpath = './p[@class=\"search-item__body\"]'\n \n \n # do search-request\n@@ -79,8 +80,8 @@\n \n title = extract_text(link)\n \n- if result.xpath('./p[@class=\"desc clk\"]'):\n- content = extract_text(result.xpath('./p[@class=\"desc clk\"]'))\n+ if result.xpath(content_xpath):\n+ content = extract_text(result.xpath(content_xpath))\n else:\n content = ''\n", "issue": "Seems that startpage and ixquick enignes are not working anymore on Searx\nHello and thanks for the works.\r\n\r\nI tried on my searx and on other instance like searx.me... but It's not working.\r\n\r\nHow could we fix that?\r\n\r\nThanks.\r\n\r\n\n", "before_files": [{"content": "# Startpage (Web)\n#\n# @website https://startpage.com\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML\n# @stable no (HTML can change)\n# @parse url, title, content\n#\n# @todo paging\n\nfrom lxml import html\nfrom dateutil import parser\nfrom datetime import datetime, timedelta\nimport re\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['general']\n# there is a mechanism to block \"bot\" search\n# (probably the parameter qid), require\n# storing of qid's between mulitble search-calls\n\n# paging = False\nlanguage_support = True\n\n# search-url\nbase_url = 'https://startpage.com/'\nsearch_url = base_url + 'do/search'\n\n# specific xpath variables\n# ads xpath //div[@id=\"results\"]/div[@id=\"sponsored\"]//div[@class=\"result\"]\n# not ads: div[@class=\"result\"] are the direct childs of div[@id=\"results\"]\nresults_xpath = '//div[@class=\"result\"]'\nlink_xpath = './/h3/a'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10\n\n params['url'] = search_url\n params['method'] = 'POST'\n params['data'] = {'query': query,\n 'startat': offset}\n\n # set language\n params['data']['with_language'] = ('lang_' + params['language'].split('-')[0])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in dom.xpath(results_xpath):\n links = result.xpath(link_xpath)\n if not links:\n continue\n link = links[0]\n url = link.attrib.get('href')\n\n # block google-ad url's\n if re.match(r\"^http(s|)://(www\\.)?google\\.[a-z]+/aclk.*$\", url):\n continue\n\n # block startpage search url's\n if re.match(r\"^http(s|)://(www\\.)?startpage\\.com/do/search\\?.*$\", url):\n continue\n\n # block ixquick search url's\n if re.match(r\"^http(s|)://(www\\.)?ixquick\\.com/do/search\\?.*$\", url):\n continue\n\n title = extract_text(link)\n\n if result.xpath('./p[@class=\"desc clk\"]'):\n content = extract_text(result.xpath('./p[@class=\"desc clk\"]'))\n else:\n content = ''\n\n published_date = None\n\n # check if search result starts with something like: \"2 Sep 2014 ... \"\n if re.match(r\"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \\.\\.\\. \", content):\n date_pos = content.find('...') + 4\n date_string = content[0:date_pos - 5]\n published_date = parser.parse(date_string, dayfirst=True)\n\n # fix content string\n content = content[date_pos:]\n\n # check if search result starts with something like: \"5 days ago ... \"\n elif re.match(r\"^[0-9]+ days? ago \\.\\.\\. \", content):\n date_pos = content.find('...') + 4\n date_string = content[0:date_pos - 5]\n\n # calculate datetime\n published_date = datetime.now() - timedelta(days=int(re.match(r'\\d+', date_string).group()))\n\n # fix content string\n content = content[date_pos:]\n\n if published_date:\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content,\n 'publishedDate': published_date})\n else:\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content})\n\n # return results\n return results\n", "path": "searx/engines/startpage.py"}]}
1,779
254
gh_patches_debug_22529
rasdani/github-patches
git_diff
lutris__lutris-1227
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Vulkan check will not work on certain distributions Note that this probably won't work on certain distributions, where this file, for instance, is actually called `/usr/lib/x86_64-linux-gnu/libvulkan.so.1` or similar. Confirmed not working on Linux Mint 19. _Originally posted by @Vexatos in https://github.com/lutris/lutris/pull/1186_ </issue> <code> [start of lutris/util/vulkan.py] 1 """Vulkan helper module""" 2 import os 3 from enum import Enum 4 5 class vulkan_available(Enum): 6 NONE = 0 7 THIRTY_TWO = 1 8 SIXTY_FOUR = 2 9 ALL = 3 10 11 def vulkan_check(): 12 vulkan_lib = os.path.isfile("/usr/lib/libvulkan.so") 13 vulkan_lib32 = os.path.isfile("/usr/lib32/libvulkan.so") 14 vulkan_lib_multi = os.path.isfile("/usr/lib/x86_64-linux-gnu/libvulkan.so") 15 vulkan_lib32_multi = os.path.isfile("/usr/lib32/i386-linux-gnu/libvulkan.so") 16 has_32_bit = vulkan_lib32 or vulkan_lib32_multi 17 has_64_bit = vulkan_lib or vulkan_lib_multi 18 19 if not (has_64_bit or has_32_bit): 20 return vulkan_available.NONE 21 if has_64_bit and not has_32_bit: 22 return vulkan_available.SIXTY_FOUR 23 if not has_64_bit and has_32_bit: 24 return vulkan_available.THIRTY_TWO 25 return vulkan_available.ALL 26 [end of lutris/util/vulkan.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/util/vulkan.py b/lutris/util/vulkan.py --- a/lutris/util/vulkan.py +++ b/lutris/util/vulkan.py @@ -1,5 +1,6 @@ """Vulkan helper module""" import os +import re from enum import Enum class vulkan_available(Enum): @@ -8,11 +9,20 @@ SIXTY_FOUR = 2 ALL = 3 +def search_for_file(directory): + if os.path.isdir(directory): + pattern = re.compile(r'^libvulkan\.so') + files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] + files = [os.path.join(directory, f) for f in files if pattern.search(f)] + if files: + return True + return False + def vulkan_check(): - vulkan_lib = os.path.isfile("/usr/lib/libvulkan.so") - vulkan_lib32 = os.path.isfile("/usr/lib32/libvulkan.so") - vulkan_lib_multi = os.path.isfile("/usr/lib/x86_64-linux-gnu/libvulkan.so") - vulkan_lib32_multi = os.path.isfile("/usr/lib32/i386-linux-gnu/libvulkan.so") + vulkan_lib = search_for_file("/usr/lib") + vulkan_lib32 = search_for_file("/usr/lib32") + vulkan_lib_multi = search_for_file("/usr/lib/x86_64-linux-gnu") + vulkan_lib32_multi = search_for_file("/usr/lib32/i386-linux-gnu") has_32_bit = vulkan_lib32 or vulkan_lib32_multi has_64_bit = vulkan_lib or vulkan_lib_multi
{"golden_diff": "diff --git a/lutris/util/vulkan.py b/lutris/util/vulkan.py\n--- a/lutris/util/vulkan.py\n+++ b/lutris/util/vulkan.py\n@@ -1,5 +1,6 @@\n \"\"\"Vulkan helper module\"\"\"\n import os\n+import re\n from enum import Enum\n \n class vulkan_available(Enum):\n@@ -8,11 +9,20 @@\n SIXTY_FOUR = 2\n ALL = 3\n \n+def search_for_file(directory):\n+ if os.path.isdir(directory):\n+ pattern = re.compile(r'^libvulkan\\.so')\n+ files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n+ files = [os.path.join(directory, f) for f in files if pattern.search(f)]\n+ if files:\n+ return True\n+ return False\n+\n def vulkan_check():\n- vulkan_lib = os.path.isfile(\"/usr/lib/libvulkan.so\")\n- vulkan_lib32 = os.path.isfile(\"/usr/lib32/libvulkan.so\")\n- vulkan_lib_multi = os.path.isfile(\"/usr/lib/x86_64-linux-gnu/libvulkan.so\")\n- vulkan_lib32_multi = os.path.isfile(\"/usr/lib32/i386-linux-gnu/libvulkan.so\")\n+ vulkan_lib = search_for_file(\"/usr/lib\")\n+ vulkan_lib32 = search_for_file(\"/usr/lib32\")\n+ vulkan_lib_multi = search_for_file(\"/usr/lib/x86_64-linux-gnu\")\n+ vulkan_lib32_multi = search_for_file(\"/usr/lib32/i386-linux-gnu\")\n has_32_bit = vulkan_lib32 or vulkan_lib32_multi\n has_64_bit = vulkan_lib or vulkan_lib_multi\n", "issue": "Vulkan check will not work on certain distributions\nNote that this probably won't work on certain distributions, where this file, for instance, is actually called `/usr/lib/x86_64-linux-gnu/libvulkan.so.1` or similar. Confirmed not working on Linux Mint 19.\r\n\r\n_Originally posted by @Vexatos in https://github.com/lutris/lutris/pull/1186_\n", "before_files": [{"content": "\"\"\"Vulkan helper module\"\"\"\nimport os\nfrom enum import Enum\n\nclass vulkan_available(Enum):\n NONE = 0\n THIRTY_TWO = 1\n SIXTY_FOUR = 2\n ALL = 3\n\ndef vulkan_check():\n vulkan_lib = os.path.isfile(\"/usr/lib/libvulkan.so\")\n vulkan_lib32 = os.path.isfile(\"/usr/lib32/libvulkan.so\")\n vulkan_lib_multi = os.path.isfile(\"/usr/lib/x86_64-linux-gnu/libvulkan.so\")\n vulkan_lib32_multi = os.path.isfile(\"/usr/lib32/i386-linux-gnu/libvulkan.so\")\n has_32_bit = vulkan_lib32 or vulkan_lib32_multi\n has_64_bit = vulkan_lib or vulkan_lib_multi\n\n if not (has_64_bit or has_32_bit):\n return vulkan_available.NONE\n if has_64_bit and not has_32_bit:\n return vulkan_available.SIXTY_FOUR\n if not has_64_bit and has_32_bit:\n return vulkan_available.THIRTY_TWO\n return vulkan_available.ALL\n", "path": "lutris/util/vulkan.py"}]}
934
402
gh_patches_debug_16786
rasdani/github-patches
git_diff
blaze__blaze-431
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Individual columns should be able to repr if not passed in CSV This issue title is possibly the worst ever, so here's an example: ``` python import tempfile import pandas as pd from blaze import * ``` This works: ``` python with tempfile.NamedTemporaryFile(delete=False) as f: df = pd.DataFrame(np.random.randn(10, 2)) df.to_csv(f.name, index=False, header=False) csv = CSV(f.name, columns=list('ab')) # passing columns to CSV t = Table(csv) assert t.a.isidentical(t['a']) ``` But this: ``` python with tempfile.NamedTemporaryFile(delete=False) as f: df = pd.DataFrame(np.random.randn(10, 2)) df.to_csv(f.name, index=False, header=False) csv = CSV(f.name) t = Table(csv, columns=list('ab')) # passing columns to Table assert t.a.isidentical(t['a']) ``` yield a `t` where `t.a` throws an error in the interpreter when I try to `repr` it. The difference is that the first propagates the columns to the `Table` (or maybe it doesn't propagate, but it uses the correct names), while in the second the columns are still represented by their "anonymous" names `_0`, `_1`, etc. </issue> <code> [start of blaze/api/table.py] 1 2 from datashape import discover, Tuple, Record, dshape, Fixed 3 import itertools 4 5 from ..expr.core import Expr 6 from ..expr.table import TableSymbol, TableExpr 7 from ..data.python import Python 8 from ..dispatch import dispatch 9 from ..data.core import DataDescriptor, discover 10 from ..data.pandas import into, DataFrame 11 from .into import into 12 13 names = ('_%d' % i for i in itertools.count(1)) 14 15 class Table(TableSymbol): 16 """ Interactive Table 17 18 Parameters 19 ---------- 20 21 data: DataDescriptor, tuple, DataFrame, RDD, SQL Table, ... 22 Anything that ``compute`` knows how to work with 23 24 Optional 25 -------- 26 27 name: string 28 A name for the table 29 columns: iterable of strings 30 Column names, will be inferred from datasource if possible 31 schema: string or DataShape 32 Explitit Record containing datatypes and column names 33 """ 34 __slots__ = 'data', 'schema', '_name', 'iscolumn' 35 36 def __init__(self, data, name=None, columns=None, schema=None, 37 iscolumn=False): 38 if not schema: 39 schema = discover(data).subshape[0] 40 types = None 41 if isinstance(schema[0], Tuple): 42 columns = columns or list(range(len(schema[0].dshapes))) 43 types = schema[0].dshapes 44 if isinstance(schema[0], Record): 45 columns = columns or schema[0].names 46 types = schema[0].types 47 if isinstance(schema[0], Fixed): 48 types = (schema[1],) * int(schema[0]) 49 if not columns: 50 raise TypeError("Could not infer column names from data. " 51 "Please specify column names with `column=` " 52 "keyword") 53 if not types: 54 raise TypeError("Could not infer data types from data. " 55 "Please specify schema with `schema=` keyword") 56 57 schema = dshape(Record(list(zip(columns, types)))) 58 self.schema = dshape(schema) 59 60 self.data = data 61 self._name = name or next(names) 62 self.iscolumn = iscolumn 63 64 def resources(self): 65 return {self: self.data} 66 67 @property 68 def args(self): 69 return (id(self.data), self.schema, self._name, self.iscolumn) 70 71 72 @dispatch(Table, dict) 73 def _subs(o, d): 74 return o 75 76 77 @dispatch(Expr) 78 def compute(expr): 79 resources = expr.resources() 80 if not resources: 81 raise ValueError("No data resources found") 82 else: 83 return compute(expr, resources) 84 85 86 def table_repr(expr, n=10): 87 if not expr.resources(): 88 return str(expr) 89 if isinstance(expr, TableExpr): 90 head = expr.head(n + 1) 91 result = compute(head) 92 93 if expr.columns: 94 df = into(DataFrame(columns=expr.columns), result) 95 else: 96 df = into(DataFrame, result) 97 s = repr(df) 98 if len(df) > 10: 99 df = df[:10] 100 s = '\n'.join(s.split('\n')[:-1]) + '\n...' 101 return s 102 103 else: 104 return repr(compute(expr)) 105 106 107 @dispatch((type, object), TableExpr) 108 def into(a, b): 109 return into(a, compute(b)) 110 111 112 @dispatch(DataFrame, TableExpr) 113 def into(a, b): 114 columns = b.columns 115 return into(DataFrame(columns=columns), compute(b)) 116 117 118 Expr.__repr__ = table_repr 119 [end of blaze/api/table.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/blaze/api/table.py b/blaze/api/table.py --- a/blaze/api/table.py +++ b/blaze/api/table.py @@ -4,9 +4,7 @@ from ..expr.core import Expr from ..expr.table import TableSymbol, TableExpr -from ..data.python import Python from ..dispatch import dispatch -from ..data.core import DataDescriptor, discover from ..data.pandas import into, DataFrame from .into import into @@ -58,6 +56,12 @@ self.schema = dshape(schema) self.data = data + + if hasattr(data, 'schema') and self.schema != data.schema: + raise TypeError('%s schema %s does not match %s schema %s' % + (type(data).__name__, data.schema, + type(self).__name__, self.schema)) + self._name = name or next(names) self.iscolumn = iscolumn
{"golden_diff": "diff --git a/blaze/api/table.py b/blaze/api/table.py\n--- a/blaze/api/table.py\n+++ b/blaze/api/table.py\n@@ -4,9 +4,7 @@\n \n from ..expr.core import Expr\n from ..expr.table import TableSymbol, TableExpr\n-from ..data.python import Python\n from ..dispatch import dispatch\n-from ..data.core import DataDescriptor, discover\n from ..data.pandas import into, DataFrame\n from .into import into\n \n@@ -58,6 +56,12 @@\n self.schema = dshape(schema)\n \n self.data = data\n+\n+ if hasattr(data, 'schema') and self.schema != data.schema:\n+ raise TypeError('%s schema %s does not match %s schema %s' %\n+ (type(data).__name__, data.schema,\n+ type(self).__name__, self.schema))\n+\n self._name = name or next(names)\n self.iscolumn = iscolumn\n", "issue": "Individual columns should be able to repr if not passed in CSV\nThis issue title is possibly the worst ever, so here's an example:\n\n``` python\nimport tempfile\nimport pandas as pd\nfrom blaze import *\n```\n\nThis works:\n\n``` python\nwith tempfile.NamedTemporaryFile(delete=False) as f:\n df = pd.DataFrame(np.random.randn(10, 2))\n df.to_csv(f.name, index=False, header=False)\n csv = CSV(f.name, columns=list('ab')) # passing columns to CSV\n t = Table(csv)\n assert t.a.isidentical(t['a'])\n```\n\nBut this:\n\n``` python\nwith tempfile.NamedTemporaryFile(delete=False) as f:\n df = pd.DataFrame(np.random.randn(10, 2))\n df.to_csv(f.name, index=False, header=False)\n csv = CSV(f.name)\n t = Table(csv, columns=list('ab')) # passing columns to Table\n assert t.a.isidentical(t['a'])\n```\n\nyield a `t` where `t.a` throws an error in the interpreter when I try to `repr` it.\n\nThe difference is that the first propagates the columns to the `Table` (or maybe it doesn't propagate, but it uses the correct names), while in the second the columns are still represented by their \"anonymous\" names `_0`, `_1`, etc.\n\n", "before_files": [{"content": "\nfrom datashape import discover, Tuple, Record, dshape, Fixed\nimport itertools\n\nfrom ..expr.core import Expr\nfrom ..expr.table import TableSymbol, TableExpr\nfrom ..data.python import Python\nfrom ..dispatch import dispatch\nfrom ..data.core import DataDescriptor, discover\nfrom ..data.pandas import into, DataFrame\nfrom .into import into\n\nnames = ('_%d' % i for i in itertools.count(1))\n\nclass Table(TableSymbol):\n \"\"\" Interactive Table\n\n Parameters\n ----------\n\n data: DataDescriptor, tuple, DataFrame, RDD, SQL Table, ...\n Anything that ``compute`` knows how to work with\n\n Optional\n --------\n\n name: string\n A name for the table\n columns: iterable of strings\n Column names, will be inferred from datasource if possible\n schema: string or DataShape\n Explitit Record containing datatypes and column names\n \"\"\"\n __slots__ = 'data', 'schema', '_name', 'iscolumn'\n\n def __init__(self, data, name=None, columns=None, schema=None,\n iscolumn=False):\n if not schema:\n schema = discover(data).subshape[0]\n types = None\n if isinstance(schema[0], Tuple):\n columns = columns or list(range(len(schema[0].dshapes)))\n types = schema[0].dshapes\n if isinstance(schema[0], Record):\n columns = columns or schema[0].names\n types = schema[0].types\n if isinstance(schema[0], Fixed):\n types = (schema[1],) * int(schema[0])\n if not columns:\n raise TypeError(\"Could not infer column names from data. \"\n \"Please specify column names with `column=` \"\n \"keyword\")\n if not types:\n raise TypeError(\"Could not infer data types from data. \"\n \"Please specify schema with `schema=` keyword\")\n\n schema = dshape(Record(list(zip(columns, types))))\n self.schema = dshape(schema)\n\n self.data = data\n self._name = name or next(names)\n self.iscolumn = iscolumn\n\n def resources(self):\n return {self: self.data}\n\n @property\n def args(self):\n return (id(self.data), self.schema, self._name, self.iscolumn)\n\n\n@dispatch(Table, dict)\ndef _subs(o, d):\n return o\n\n\n@dispatch(Expr)\ndef compute(expr):\n resources = expr.resources()\n if not resources:\n raise ValueError(\"No data resources found\")\n else:\n return compute(expr, resources)\n\n\ndef table_repr(expr, n=10):\n if not expr.resources():\n return str(expr)\n if isinstance(expr, TableExpr):\n head = expr.head(n + 1)\n result = compute(head)\n\n if expr.columns:\n df = into(DataFrame(columns=expr.columns), result)\n else:\n df = into(DataFrame, result)\n s = repr(df)\n if len(df) > 10:\n df = df[:10]\n s = '\\n'.join(s.split('\\n')[:-1]) + '\\n...'\n return s\n\n else:\n return repr(compute(expr))\n\n\n@dispatch((type, object), TableExpr)\ndef into(a, b):\n return into(a, compute(b))\n\n\n@dispatch(DataFrame, TableExpr)\ndef into(a, b):\n columns = b.columns\n return into(DataFrame(columns=columns), compute(b))\n\n\nExpr.__repr__ = table_repr\n", "path": "blaze/api/table.py"}]}
1,840
205
gh_patches_debug_28013
rasdani/github-patches
git_diff
CTFd__CTFd-2344
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Challenge Preview Improvements Challenge Preview should probably preview in the context of a full page. Primarily because it's easier to theme this. </issue> <code> [start of CTFd/admin/challenges.py] 1 from flask import abort, render_template, request, url_for 2 3 from CTFd.admin import admin 4 from CTFd.models import Challenges, Flags, Solves 5 from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class 6 from CTFd.utils.decorators import admins_only 7 8 9 @admin.route("/admin/challenges") 10 @admins_only 11 def challenges_listing(): 12 q = request.args.get("q") 13 field = request.args.get("field") 14 filters = [] 15 16 if q: 17 # The field exists as an exposed column 18 if Challenges.__mapper__.has_property(field): 19 filters.append(getattr(Challenges, field).like("%{}%".format(q))) 20 21 query = Challenges.query.filter(*filters).order_by(Challenges.id.asc()) 22 challenges = query.all() 23 total = query.count() 24 25 return render_template( 26 "admin/challenges/challenges.html", 27 challenges=challenges, 28 total=total, 29 q=q, 30 field=field, 31 ) 32 33 34 @admin.route("/admin/challenges/<int:challenge_id>") 35 @admins_only 36 def challenges_detail(challenge_id): 37 challenges = dict( 38 Challenges.query.with_entities(Challenges.id, Challenges.name).all() 39 ) 40 challenge = Challenges.query.filter_by(id=challenge_id).first_or_404() 41 solves = ( 42 Solves.query.filter_by(challenge_id=challenge.id) 43 .order_by(Solves.date.asc()) 44 .all() 45 ) 46 flags = Flags.query.filter_by(challenge_id=challenge.id).all() 47 48 try: 49 challenge_class = get_chal_class(challenge.type) 50 except KeyError: 51 abort( 52 500, 53 f"The underlying challenge type ({challenge.type}) is not installed. This challenge can not be loaded.", 54 ) 55 56 update_j2 = render_template( 57 challenge_class.templates["update"].lstrip("/"), challenge=challenge 58 ) 59 60 update_script = url_for( 61 "views.static_html", route=challenge_class.scripts["update"].lstrip("/") 62 ) 63 return render_template( 64 "admin/challenges/challenge.html", 65 update_template=update_j2, 66 update_script=update_script, 67 challenge=challenge, 68 challenges=challenges, 69 solves=solves, 70 flags=flags, 71 ) 72 73 74 @admin.route("/admin/challenges/new") 75 @admins_only 76 def challenges_new(): 77 types = CHALLENGE_CLASSES.keys() 78 return render_template("admin/challenges/new.html", types=types) 79 [end of CTFd/admin/challenges.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/admin/challenges.py b/CTFd/admin/challenges.py --- a/CTFd/admin/challenges.py +++ b/CTFd/admin/challenges.py @@ -3,7 +3,10 @@ from CTFd.admin import admin from CTFd.models import Challenges, Flags, Solves from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class +from CTFd.schemas.tags import TagSchema from CTFd.utils.decorators import admins_only +from CTFd.utils.security.signing import serialize +from CTFd.utils.user import get_current_team, get_current_user @admin.route("/admin/challenges") @@ -71,6 +74,43 @@ ) [email protected]("/admin/challenges/preview/<int:challenge_id>") +@admins_only +def challenges_preview(challenge_id): + challenge = Challenges.query.filter_by(id=challenge_id).first_or_404() + chal_class = get_chal_class(challenge.type) + user = get_current_user() + team = get_current_team() + + files = [] + for f in challenge.files: + token = { + "user_id": user.id, + "team_id": team.id if team else None, + "file_id": f.id, + } + files.append(url_for("views.files", path=f.location, token=serialize(token))) + + tags = [ + tag["value"] for tag in TagSchema("user", many=True).dump(challenge.tags).data + ] + + content = render_template( + chal_class.templates["view"].lstrip("/"), + solves=None, + solved_by_me=False, + files=files, + tags=tags, + hints=challenge.hints, + max_attempts=challenge.max_attempts, + attempts=0, + challenge=challenge, + ) + return render_template( + "admin/challenges/preview.html", content=content, challenge=challenge + ) + + @admin.route("/admin/challenges/new") @admins_only def challenges_new():
{"golden_diff": "diff --git a/CTFd/admin/challenges.py b/CTFd/admin/challenges.py\n--- a/CTFd/admin/challenges.py\n+++ b/CTFd/admin/challenges.py\n@@ -3,7 +3,10 @@\n from CTFd.admin import admin\n from CTFd.models import Challenges, Flags, Solves\n from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class\n+from CTFd.schemas.tags import TagSchema\n from CTFd.utils.decorators import admins_only\n+from CTFd.utils.security.signing import serialize\n+from CTFd.utils.user import get_current_team, get_current_user\n \n \n @admin.route(\"/admin/challenges\")\n@@ -71,6 +74,43 @@\n )\n \n \[email protected](\"/admin/challenges/preview/<int:challenge_id>\")\n+@admins_only\n+def challenges_preview(challenge_id):\n+ challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n+ chal_class = get_chal_class(challenge.type)\n+ user = get_current_user()\n+ team = get_current_team()\n+\n+ files = []\n+ for f in challenge.files:\n+ token = {\n+ \"user_id\": user.id,\n+ \"team_id\": team.id if team else None,\n+ \"file_id\": f.id,\n+ }\n+ files.append(url_for(\"views.files\", path=f.location, token=serialize(token)))\n+\n+ tags = [\n+ tag[\"value\"] for tag in TagSchema(\"user\", many=True).dump(challenge.tags).data\n+ ]\n+\n+ content = render_template(\n+ chal_class.templates[\"view\"].lstrip(\"/\"),\n+ solves=None,\n+ solved_by_me=False,\n+ files=files,\n+ tags=tags,\n+ hints=challenge.hints,\n+ max_attempts=challenge.max_attempts,\n+ attempts=0,\n+ challenge=challenge,\n+ )\n+ return render_template(\n+ \"admin/challenges/preview.html\", content=content, challenge=challenge\n+ )\n+\n+\n @admin.route(\"/admin/challenges/new\")\n @admins_only\n def challenges_new():\n", "issue": "Challenge Preview Improvements\nChallenge Preview should probably preview in the context of a full page. Primarily because it's easier to theme this. \n", "before_files": [{"content": "from flask import abort, render_template, request, url_for\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Flags, Solves\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class\nfrom CTFd.utils.decorators import admins_only\n\n\[email protected](\"/admin/challenges\")\n@admins_only\ndef challenges_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n filters = []\n\n if q:\n # The field exists as an exposed column\n if Challenges.__mapper__.has_property(field):\n filters.append(getattr(Challenges, field).like(\"%{}%\".format(q)))\n\n query = Challenges.query.filter(*filters).order_by(Challenges.id.asc())\n challenges = query.all()\n total = query.count()\n\n return render_template(\n \"admin/challenges/challenges.html\",\n challenges=challenges,\n total=total,\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/challenges/<int:challenge_id>\")\n@admins_only\ndef challenges_detail(challenge_id):\n challenges = dict(\n Challenges.query.with_entities(Challenges.id, Challenges.name).all()\n )\n challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()\n solves = (\n Solves.query.filter_by(challenge_id=challenge.id)\n .order_by(Solves.date.asc())\n .all()\n )\n flags = Flags.query.filter_by(challenge_id=challenge.id).all()\n\n try:\n challenge_class = get_chal_class(challenge.type)\n except KeyError:\n abort(\n 500,\n f\"The underlying challenge type ({challenge.type}) is not installed. This challenge can not be loaded.\",\n )\n\n update_j2 = render_template(\n challenge_class.templates[\"update\"].lstrip(\"/\"), challenge=challenge\n )\n\n update_script = url_for(\n \"views.static_html\", route=challenge_class.scripts[\"update\"].lstrip(\"/\")\n )\n return render_template(\n \"admin/challenges/challenge.html\",\n update_template=update_j2,\n update_script=update_script,\n challenge=challenge,\n challenges=challenges,\n solves=solves,\n flags=flags,\n )\n\n\[email protected](\"/admin/challenges/new\")\n@admins_only\ndef challenges_new():\n types = CHALLENGE_CLASSES.keys()\n return render_template(\"admin/challenges/new.html\", types=types)\n", "path": "CTFd/admin/challenges.py"}]}
1,248
473
gh_patches_debug_19819
rasdani/github-patches
git_diff
chainer__chainer-3327
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document about initializers Criteria for initializer arguments of links is complicated (https://github.com/chainer/chainer/pull/3259#issuecomment-325562538). We should write some dedicated documentation about that, and let each link documentation point to it. Maybe we can write on [this page](https://docs.chainer.org/en/v2.0.2/reference/initializers.html). Also we should describe *the default initializer* (which is `LeCunNormal`). </issue> <code> [start of chainer/links/connection/linear.py] 1 from chainer.functions.connection import linear 2 from chainer import initializers 3 from chainer import link 4 from chainer import variable 5 6 7 class Linear(link.Link): 8 9 """Linear layer (a.k.a.\\ fully-connected layer). 10 11 This is a link that wraps the :func:`~chainer.functions.linear` function, 12 and holds a weight matrix ``W`` and optionally a bias vector ``b`` as 13 parameters. 14 15 The weight matrix ``W`` is initialized with i.i.d. Gaussian samples, each 16 of which has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The 17 bias vector ``b`` is of size ``out_size``. Each element is initialized with 18 the ``bias`` value. If ``nobias`` argument is set to ``True``, then this 19 link does not hold a bias vector. 20 21 Args: 22 in_size (int or None): Dimension of input vectors. If ``None``, 23 parameter initialization will be deferred until the first forward 24 data pass at which time the size will be determined. 25 out_size (int): Dimension of output vectors. 26 nobias (bool): If ``True``, then this function does not use the bias. 27 initialW (2-D array): Initial weight value. If ``None``, then the 28 default initializer is used. 29 May also be a callable that takes ``numpy.ndarray`` or 30 ``cupy.ndarray`` and edits its value. 31 initial_bias (1-D array): Initial bias value. If ``None``, the bias 32 vector is initialized to zero. 33 May also be a callable that takes ``numpy.ndarray`` or 34 ``cupy.ndarray`` and edits its value. 35 .. seealso:: :func:`~chainer.functions.linear` 36 37 Attributes: 38 W (~chainer.Variable): Weight parameter. 39 b (~chainer.Variable): Bias parameter. 40 41 .. admonition:: Example 42 43 There are several ways to make a Linear link. 44 45 Define an input vector ``x`` as: 46 47 >>> x = np.array([[0, 1, 2, 3, 4]], 'f') 48 49 1. Give the first two arguments explicitly: 50 51 Those numbers are considered as the input size and the output size. 52 53 >>> l = L.Linear(5, 10) 54 >>> y = l(x) 55 >>> y.shape 56 (1, 10) 57 58 2. Omit ``in_size`` (give the output size only as the first argument) 59 or fill it with ``None``: 60 61 In this case, the size of second axis of ``x`` is used as the 62 input size. So the below two cases are the same. 63 64 >>> l = L.Linear(10) 65 >>> y = l(x) 66 >>> y.shape 67 (1, 10) 68 69 >>> l = L.Linear(None, 10) 70 >>> y = l(x) 71 >>> y.shape 72 (1, 10) 73 74 When you omit the first argument, you need to specify the other 75 subsequent arguments from ``nobias`` as keyword arguments. So the 76 below two cases are the same. 77 78 >>> l = L.Linear(None, 10, False, None, 0) 79 >>> y = l(x) 80 >>> y.shape 81 (1, 10) 82 83 >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0) 84 >>> y = l(x) 85 >>> y.shape 86 (1, 10) 87 88 """ 89 90 def __init__(self, in_size, out_size=None, nobias=False, 91 initialW=None, initial_bias=None): 92 super(Linear, self).__init__() 93 94 if out_size is None: 95 in_size, out_size = None, in_size 96 self.out_size = out_size 97 98 with self.init_scope(): 99 W_initializer = initializers._get_initializer(initialW) 100 self.W = variable.Parameter(W_initializer) 101 if in_size is not None: 102 self._initialize_params(in_size) 103 104 if nobias: 105 self.b = None 106 else: 107 if initial_bias is None: 108 initial_bias = 0 109 bias_initializer = initializers._get_initializer(initial_bias) 110 self.b = variable.Parameter(bias_initializer, out_size) 111 112 def _initialize_params(self, in_size): 113 self.W.initialize((self.out_size, in_size)) 114 115 def __call__(self, x): 116 """Applies the linear layer. 117 118 Args: 119 x (~chainer.Variable): Batch of input vectors. 120 121 Returns: 122 ~chainer.Variable: Output of the linear layer. 123 124 """ 125 if self.W.data is None: 126 self._initialize_params(x.size // x.shape[0]) 127 return linear.linear(x, self.W, self.b) 128 [end of chainer/links/connection/linear.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py --- a/chainer/links/connection/linear.py +++ b/chainer/links/connection/linear.py @@ -24,14 +24,11 @@ data pass at which time the size will be determined. out_size (int): Dimension of output vectors. nobias (bool): If ``True``, then this function does not use the bias. - initialW (2-D array): Initial weight value. If ``None``, then the - default initializer is used. - May also be a callable that takes ``numpy.ndarray`` or - ``cupy.ndarray`` and edits its value. - initial_bias (1-D array): Initial bias value. If ``None``, the bias - vector is initialized to zero. - May also be a callable that takes ``numpy.ndarray`` or - ``cupy.ndarray`` and edits its value. + initialW (:ref:`initializer <initializer>`): Initializer to initialize + the weight. + initial_bias (:ref:`initializer <initializer>`): Initializer to + initialize the bias. If ``None``, the bias will be initialized to + zero. .. seealso:: :func:`~chainer.functions.linear` Attributes:
{"golden_diff": "diff --git a/chainer/links/connection/linear.py b/chainer/links/connection/linear.py\n--- a/chainer/links/connection/linear.py\n+++ b/chainer/links/connection/linear.py\n@@ -24,14 +24,11 @@\n data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors.\n nobias (bool): If ``True``, then this function does not use the bias.\n- initialW (2-D array): Initial weight value. If ``None``, then the\n- default initializer is used.\n- May also be a callable that takes ``numpy.ndarray`` or\n- ``cupy.ndarray`` and edits its value.\n- initial_bias (1-D array): Initial bias value. If ``None``, the bias\n- vector is initialized to zero.\n- May also be a callable that takes ``numpy.ndarray`` or\n- ``cupy.ndarray`` and edits its value.\n+ initialW (:ref:`initializer <initializer>`): Initializer to initialize\n+ the weight.\n+ initial_bias (:ref:`initializer <initializer>`): Initializer to\n+ initialize the bias. If ``None``, the bias will be initialized to\n+ zero.\n .. seealso:: :func:`~chainer.functions.linear`\n \n Attributes:\n", "issue": "Document about initializers\nCriteria for initializer arguments of links is complicated (https://github.com/chainer/chainer/pull/3259#issuecomment-325562538). We should write some dedicated documentation about that, and let each link documentation point to it. Maybe we can write on [this page](https://docs.chainer.org/en/v2.0.2/reference/initializers.html).\r\n\r\nAlso we should describe *the default initializer* (which is `LeCunNormal`).\n", "before_files": [{"content": "from chainer.functions.connection import linear\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer import variable\n\n\nclass Linear(link.Link):\n\n \"\"\"Linear layer (a.k.a.\\\\ fully-connected layer).\n\n This is a link that wraps the :func:`~chainer.functions.linear` function,\n and holds a weight matrix ``W`` and optionally a bias vector ``b`` as\n parameters.\n\n The weight matrix ``W`` is initialized with i.i.d. Gaussian samples, each\n of which has zero mean and deviation :math:`\\\\sqrt{1/\\\\text{in_size}}`. The\n bias vector ``b`` is of size ``out_size``. Each element is initialized with\n the ``bias`` value. If ``nobias`` argument is set to ``True``, then this\n link does not hold a bias vector.\n\n Args:\n in_size (int or None): Dimension of input vectors. If ``None``,\n parameter initialization will be deferred until the first forward\n data pass at which time the size will be determined.\n out_size (int): Dimension of output vectors.\n nobias (bool): If ``True``, then this function does not use the bias.\n initialW (2-D array): Initial weight value. If ``None``, then the\n default initializer is used.\n May also be a callable that takes ``numpy.ndarray`` or\n ``cupy.ndarray`` and edits its value.\n initial_bias (1-D array): Initial bias value. If ``None``, the bias\n vector is initialized to zero.\n May also be a callable that takes ``numpy.ndarray`` or\n ``cupy.ndarray`` and edits its value.\n .. seealso:: :func:`~chainer.functions.linear`\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter.\n\n .. admonition:: Example\n\n There are several ways to make a Linear link.\n\n Define an input vector ``x`` as:\n\n >>> x = np.array([[0, 1, 2, 3, 4]], 'f')\n\n 1. Give the first two arguments explicitly:\n\n Those numbers are considered as the input size and the output size.\n\n >>> l = L.Linear(5, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n 2. Omit ``in_size`` (give the output size only as the first argument)\n or fill it with ``None``:\n\n In this case, the size of second axis of ``x`` is used as the\n input size. So the below two cases are the same.\n\n >>> l = L.Linear(10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(None, 10)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n When you omit the first argument, you need to specify the other\n subsequent arguments from ``nobias`` as keyword arguments. So the\n below two cases are the same.\n\n >>> l = L.Linear(None, 10, False, None, 0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n >>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)\n >>> y = l(x)\n >>> y.shape\n (1, 10)\n\n \"\"\"\n\n def __init__(self, in_size, out_size=None, nobias=False,\n initialW=None, initial_bias=None):\n super(Linear, self).__init__()\n\n if out_size is None:\n in_size, out_size = None, in_size\n self.out_size = out_size\n\n with self.init_scope():\n W_initializer = initializers._get_initializer(initialW)\n self.W = variable.Parameter(W_initializer)\n if in_size is not None:\n self._initialize_params(in_size)\n\n if nobias:\n self.b = None\n else:\n if initial_bias is None:\n initial_bias = 0\n bias_initializer = initializers._get_initializer(initial_bias)\n self.b = variable.Parameter(bias_initializer, out_size)\n\n def _initialize_params(self, in_size):\n self.W.initialize((self.out_size, in_size))\n\n def __call__(self, x):\n \"\"\"Applies the linear layer.\n\n Args:\n x (~chainer.Variable): Batch of input vectors.\n\n Returns:\n ~chainer.Variable: Output of the linear layer.\n\n \"\"\"\n if self.W.data is None:\n self._initialize_params(x.size // x.shape[0])\n return linear.linear(x, self.W, self.b)\n", "path": "chainer/links/connection/linear.py"}]}
1,970
286
gh_patches_debug_8039
rasdani/github-patches
git_diff
docker__docker-py-1399
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> docker-py installation breaks docker-compose im not quite sure if this is correct, but trying to install `docker-py` through pip after i've installed `docker-compose` breaks `docker-compose` with ``` Traceback (most recent call last): File "/usr/local/bin/docker-compose", line 7, in <module> from compose.cli.main import main File "/usr/local/lib/python2.7/site-packages/compose/cli/main.py", line 20, in <module> from ..bundle import get_image_digests File "/usr/local/lib/python2.7/site-packages/compose/bundle.py", line 13, in <module> from .network import get_network_defs_for_service File "/usr/local/lib/python2.7/site-packages/compose/network.py", line 7, in <module> from docker.types import IPAMConfig ImportError: cannot import name IPAMConfig ``` To fix that error, i just need to do the installations in this order: ``` pip install docker-py pip install docker-compose ``` gist: https://gist.github.com/serialdoom/3a443c420aa29f9422f8c5fc73f46602 python/pip versions tried: ``` docker run -it python:2.7.13 bash -c 'pip --version' pip 9.0.1 from /usr/local/lib/python2.7/site-packages (python 2.7) docker run -it python:2.7.12 bash -c 'pip --version' pip 8.1.2 from /usr/local/lib/python2.7/site-packages (python 2.7) ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import codecs 3 import os 4 import sys 5 6 from setuptools import setup, find_packages 7 8 9 ROOT_DIR = os.path.dirname(__file__) 10 SOURCE_DIR = os.path.join(ROOT_DIR) 11 12 requirements = [ 13 'requests >= 2.5.2, != 2.11.0, != 2.12.2', 14 'six >= 1.4.0', 15 'websocket-client >= 0.32.0', 16 'docker-pycreds >= 0.2.1' 17 ] 18 19 if sys.platform == 'win32': 20 requirements.append('pypiwin32 >= 219') 21 22 extras_require = { 23 ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5', 24 # While not imported explicitly, the ipaddress module is required for 25 # ssl_match_hostname to verify hosts match with certificates via 26 # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname 27 ':python_version < "3.3"': 'ipaddress >= 1.0.16', 28 } 29 30 version = None 31 exec(open('docker/version.py').read()) 32 33 with open('./test-requirements.txt') as test_reqs_txt: 34 test_requirements = [line for line in test_reqs_txt] 35 36 37 long_description = '' 38 try: 39 with codecs.open('./README.rst', encoding='utf-8') as readme_rst: 40 long_description = readme_rst.read() 41 except IOError: 42 # README.rst is only generated on release. Its absence should not prevent 43 # setup.py from working properly. 44 pass 45 46 setup( 47 name="docker", 48 version=version, 49 description="A Python library for the Docker Engine API.", 50 long_description=long_description, 51 url='https://github.com/docker/docker-py', 52 packages=find_packages(exclude=["tests.*", "tests"]), 53 install_requires=requirements, 54 tests_require=test_requirements, 55 extras_require=extras_require, 56 zip_safe=False, 57 test_suite='tests', 58 classifiers=[ 59 'Development Status :: 5 - Production/Stable', 60 'Environment :: Other Environment', 61 'Intended Audience :: Developers', 62 'Operating System :: OS Independent', 63 'Programming Language :: Python', 64 'Programming Language :: Python :: 2', 65 'Programming Language :: Python :: 2.7', 66 'Programming Language :: Python :: 3', 67 'Programming Language :: Python :: 3.3', 68 'Programming Language :: Python :: 3.4', 69 'Programming Language :: Python :: 3.5', 70 'Topic :: Utilities', 71 'License :: OSI Approved :: Apache Software License', 72 ], 73 maintainer='Joffrey F', 74 maintainer_email='[email protected]', 75 ) 76 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,10 +1,20 @@ #!/usr/bin/env python +from __future__ import print_function + import codecs import os import sys +import pip + from setuptools import setup, find_packages +if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]: + print( + 'ERROR: "docker-py" needs to be uninstalled before installing this' + ' package:\npip uninstall docker-py', file=sys.stderr + ) + sys.exit(1) ROOT_DIR = os.path.dirname(__file__) SOURCE_DIR = os.path.join(ROOT_DIR)
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,10 +1,20 @@\n #!/usr/bin/env python\n+from __future__ import print_function\n+\n import codecs\n import os\n import sys\n \n+import pip\n+\n from setuptools import setup, find_packages\n \n+if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:\n+ print(\n+ 'ERROR: \"docker-py\" needs to be uninstalled before installing this'\n+ ' package:\\npip uninstall docker-py', file=sys.stderr\n+ )\n+ sys.exit(1)\n \n ROOT_DIR = os.path.dirname(__file__)\n SOURCE_DIR = os.path.join(ROOT_DIR)\n", "issue": "docker-py installation breaks docker-compose\nim not quite sure if this is correct, but trying to install `docker-py` through pip after i've installed `docker-compose` breaks `docker-compose` with\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/docker-compose\", line 7, in <module>\r\n from compose.cli.main import main\r\n File \"/usr/local/lib/python2.7/site-packages/compose/cli/main.py\", line 20, in <module>\r\n from ..bundle import get_image_digests\r\n File \"/usr/local/lib/python2.7/site-packages/compose/bundle.py\", line 13, in <module>\r\n from .network import get_network_defs_for_service\r\n File \"/usr/local/lib/python2.7/site-packages/compose/network.py\", line 7, in <module>\r\n from docker.types import IPAMConfig\r\nImportError: cannot import name IPAMConfig\r\n```\r\n\r\nTo fix that error, i just need to do the installations in this order:\r\n```\r\npip install docker-py\r\npip install docker-compose\r\n```\r\n\r\n\r\ngist:\r\nhttps://gist.github.com/serialdoom/3a443c420aa29f9422f8c5fc73f46602\r\n\r\npython/pip versions tried:\r\n```\r\ndocker run -it python:2.7.13 bash -c 'pip --version'\r\npip 9.0.1 from /usr/local/lib/python2.7/site-packages (python 2.7)\r\ndocker run -it python:2.7.12 bash -c 'pip --version'\r\npip 8.1.2 from /usr/local/lib/python2.7/site-packages (python 2.7)\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, != 2.11.0, != 2.12.2',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n # While not imported explicitly, the ipaddress module is required for\n # ssl_match_hostname to verify hosts match with certificates via\n # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\ntry:\n with codecs.open('./README.rst', encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\nexcept IOError:\n # README.rst is only generated on release. Its absence should not prevent\n # setup.py from working properly.\n pass\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n url='https://github.com/docker/docker-py',\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Joffrey F',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}]}
1,640
162
gh_patches_debug_54050
rasdani/github-patches
git_diff
python-discord__bot-1404
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add an `attachments` rule # Abstract We should have an antispam rule filtering small burst of images. # Rationale Currently, when a user posts 4 images in less than 10 seconds without any comment, the `duplicates` rule will trigger. While we still want to be informed when many images are posted, having the `duplicates` rule trigger doesn't make much sense. Besides, if different message content is given for each image, it will only trigger `burst` if more than 9 messages are sent in 10 seconds. # Specification - [ ] Make sure that the `duplicates` filter won't be triggered by messages with images. We can safely skip empty messages with attachments. - [ ] Create an `images` filter based on `duplicates` that will trigger when more than 3 images are posted in less than 10 seconds. It should ignore the message content. </issue> <code> [start of bot/rules/duplicates.py] 1 from typing import Dict, Iterable, List, Optional, Tuple 2 3 from discord import Member, Message 4 5 6 async def apply( 7 last_message: Message, recent_messages: List[Message], config: Dict[str, int] 8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]: 9 """Detects duplicated messages sent by a single user.""" 10 relevant_messages = tuple( 11 msg 12 for msg in recent_messages 13 if ( 14 msg.author == last_message.author 15 and msg.content == last_message.content 16 ) 17 ) 18 19 total_duplicated = len(relevant_messages) 20 21 if total_duplicated > config['max']: 22 return ( 23 f"sent {total_duplicated} duplicated messages in {config['interval']}s", 24 (last_message.author,), 25 relevant_messages 26 ) 27 return None 28 [end of bot/rules/duplicates.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bot/rules/duplicates.py b/bot/rules/duplicates.py --- a/bot/rules/duplicates.py +++ b/bot/rules/duplicates.py @@ -13,6 +13,7 @@ if ( msg.author == last_message.author and msg.content == last_message.content + and msg.content ) )
{"golden_diff": "diff --git a/bot/rules/duplicates.py b/bot/rules/duplicates.py\n--- a/bot/rules/duplicates.py\n+++ b/bot/rules/duplicates.py\n@@ -13,6 +13,7 @@\n if (\n msg.author == last_message.author\n and msg.content == last_message.content\n+ and msg.content\n )\n )\n", "issue": "Add an `attachments` rule\n# Abstract \r\n\r\nWe should have an antispam rule filtering small burst of images. \r\n\r\n# Rationale\r\n\r\nCurrently, when a user posts 4 images in less than 10 seconds without any comment, the `duplicates` rule will trigger. While we still want to be informed when many images are posted, having the `duplicates` rule trigger doesn't make much sense. Besides, if different message content is given for each image, it will only trigger `burst` if more than 9 messages are sent in 10 seconds. \r\n\r\n# Specification\r\n\r\n- [ ] Make sure that the `duplicates` filter won't be triggered by messages with images. We can safely skip empty messages with attachments.\r\n- [ ] Create an `images` filter based on `duplicates` that will trigger when more than 3 images are posted in less than 10 seconds. It should ignore the message content. \n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects duplicated messages sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and msg.content == last_message.content\n )\n )\n\n total_duplicated = len(relevant_messages)\n\n if total_duplicated > config['max']:\n return (\n f\"sent {total_duplicated} duplicated messages in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/duplicates.py"}]}
952
79
gh_patches_debug_7083
rasdani/github-patches
git_diff
bridgecrewio__checkov-5222
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_GIT_4 always fail with terraform_plan **Describe the issue** Once a GitHub org/repo secret is created and stored in the terraform state, the check CKV_GIT_4 will always fail when scanning terraform plans even if the value was created using an encrypted value. It seems like the check consider `"plaintext_text": ""` to be a hard-coded secret because if I remove that line from the plan or change it to `"plaintext_text": null`, the check passes. ``` "resources": [ { "address": "github_actions_organization_secret.my_github_secret", "mode": "managed", "type": "github_actions_organization_secret", "name": "my_github_secret", "provider_name": "registry.terraform.io/integrations/github", "schema_version": 0, "values": { "created_at": "2023-05-17 13:54:59 +0000 UTC", "encrypted_value": "MIr5c6eSzTJeGW/uyB0u...", "id": "MY_GITHUB_SECRET", "plaintext_value": "", "secret_name": "MY_GITHUB_SECRET", "selected_repository_ids": [], "updated_at": "2023-05-17 13:54:59 +0000 UTC", "visibility": "all" }, "sensitive_values": { "selected_repository_ids": [] } } ``` **Examples** **Version (please complete the following information):** - Checkov Version 2.3.223 **Additional context** Add any other context about the problem here. </issue> <code> [start of checkov/terraform/checks/resource/github/SecretsEncrypted.py] 1 from typing import List, Any, Dict 2 3 from checkov.common.models.enums import CheckCategories, CheckResult 4 from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck 5 from checkov.common.models.consts import ANY_VALUE 6 7 8 class SecretsEncrypted(BaseResourceNegativeValueCheck): 9 def __init__(self) -> None: 10 # -from github docs "It is also advised that you do not store plaintext values in your code but rather populate 11 # the encrypted_value using fields from a resource, data source or variable as, 12 # while encrypted in state, these will be easily accessible in your code" 13 name = "Ensure GitHub Actions secrets are encrypted" 14 id = "CKV_GIT_4" 15 supported_resources = ( 16 "github_actions_environment_secret", 17 "github_actions_organization_secret", 18 "github_actions_secret", 19 ) 20 categories = (CheckCategories.ENCRYPTION,) 21 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 22 23 def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult: 24 plaintext = conf.get("plaintext_value") 25 if plaintext and self._is_variable_dependant(plaintext[0]): 26 return CheckResult.UNKNOWN 27 28 return super().scan_resource_conf(conf) 29 30 def get_inspected_key(self) -> str: 31 return "plaintext_value" 32 33 def get_forbidden_values(self) -> List[Any]: 34 return [ANY_VALUE] 35 36 37 check = SecretsEncrypted() 38 [end of checkov/terraform/checks/resource/github/SecretsEncrypted.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/github/SecretsEncrypted.py b/checkov/terraform/checks/resource/github/SecretsEncrypted.py --- a/checkov/terraform/checks/resource/github/SecretsEncrypted.py +++ b/checkov/terraform/checks/resource/github/SecretsEncrypted.py @@ -25,6 +25,10 @@ if plaintext and self._is_variable_dependant(plaintext[0]): return CheckResult.UNKNOWN + if isinstance(plaintext, list) and not plaintext[0]: + # this happens mainly in TF plan files, because the value is just an empty string + return CheckResult.PASSED + return super().scan_resource_conf(conf) def get_inspected_key(self) -> str:
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/github/SecretsEncrypted.py b/checkov/terraform/checks/resource/github/SecretsEncrypted.py\n--- a/checkov/terraform/checks/resource/github/SecretsEncrypted.py\n+++ b/checkov/terraform/checks/resource/github/SecretsEncrypted.py\n@@ -25,6 +25,10 @@\n if plaintext and self._is_variable_dependant(plaintext[0]):\n return CheckResult.UNKNOWN\n \n+ if isinstance(plaintext, list) and not plaintext[0]:\n+ # this happens mainly in TF plan files, because the value is just an empty string\n+ return CheckResult.PASSED\n+\n return super().scan_resource_conf(conf)\n \n def get_inspected_key(self) -> str:\n", "issue": "CKV_GIT_4 always fail with terraform_plan\n**Describe the issue**\r\nOnce a GitHub org/repo secret is created and stored in the terraform state, the check CKV_GIT_4 will always fail when scanning terraform plans even if the value was created using an encrypted value.\r\n\r\nIt seems like the check consider `\"plaintext_text\": \"\"` to be a hard-coded secret because if I remove that line from the plan or change it to `\"plaintext_text\": null`, the check passes.\r\n\r\n```\r\n \"resources\": [\r\n {\r\n \"address\": \"github_actions_organization_secret.my_github_secret\",\r\n \"mode\": \"managed\",\r\n \"type\": \"github_actions_organization_secret\",\r\n \"name\": \"my_github_secret\",\r\n \"provider_name\": \"registry.terraform.io/integrations/github\",\r\n \"schema_version\": 0,\r\n \"values\": {\r\n \"created_at\": \"2023-05-17 13:54:59 +0000 UTC\",\r\n \"encrypted_value\": \"MIr5c6eSzTJeGW/uyB0u...\",\r\n \"id\": \"MY_GITHUB_SECRET\",\r\n \"plaintext_value\": \"\",\r\n \"secret_name\": \"MY_GITHUB_SECRET\",\r\n \"selected_repository_ids\": [],\r\n \"updated_at\": \"2023-05-17 13:54:59 +0000 UTC\",\r\n \"visibility\": \"all\"\r\n },\r\n \"sensitive_values\": {\r\n \"selected_repository_ids\": []\r\n }\r\n }\r\n```\r\n\r\n**Examples**\r\n\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.223\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from typing import List, Any, Dict\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass SecretsEncrypted(BaseResourceNegativeValueCheck):\n def __init__(self) -> None:\n # -from github docs \"It is also advised that you do not store plaintext values in your code but rather populate\n # the encrypted_value using fields from a resource, data source or variable as,\n # while encrypted in state, these will be easily accessible in your code\"\n name = \"Ensure GitHub Actions secrets are encrypted\"\n id = \"CKV_GIT_4\"\n supported_resources = (\n \"github_actions_environment_secret\",\n \"github_actions_organization_secret\",\n \"github_actions_secret\",\n )\n categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n plaintext = conf.get(\"plaintext_value\")\n if plaintext and self._is_variable_dependant(plaintext[0]):\n return CheckResult.UNKNOWN\n\n return super().scan_resource_conf(conf)\n\n def get_inspected_key(self) -> str:\n return \"plaintext_value\"\n\n def get_forbidden_values(self) -> List[Any]:\n return [ANY_VALUE]\n\n\ncheck = SecretsEncrypted()\n", "path": "checkov/terraform/checks/resource/github/SecretsEncrypted.py"}]}
1,316
169
gh_patches_debug_2251
rasdani/github-patches
git_diff
mirumee__ariadne-232
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update GraphQL Core Next & Starlette Issue for me to remember to update our core dependencies to latest versions before release. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 import os 3 from setuptools import setup 4 5 CLASSIFIERS = [ 6 "Development Status :: 4 - Beta", 7 "Intended Audience :: Developers", 8 "License :: OSI Approved :: BSD License", 9 "Operating System :: OS Independent", 10 "Programming Language :: Python", 11 "Programming Language :: Python :: 3.6", 12 "Programming Language :: Python :: 3.7", 13 "Topic :: Software Development :: Libraries :: Python Modules", 14 ] 15 16 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md") 17 with open(README_PATH, "r") as f: 18 README = f.read() 19 20 setup( 21 name="ariadne", 22 author="Mirumee Software", 23 author_email="[email protected]", 24 description="Ariadne is a Python library for implementing GraphQL servers.", 25 long_description=README, 26 long_description_content_type="text/markdown", 27 license="BSD", 28 version="0.5.0", 29 url="https://github.com/mirumee/ariadne", 30 packages=["ariadne"], 31 include_package_data=True, 32 install_requires=[ 33 "graphql-core-next>=1.0.4", 34 "python-multipart>=0.0.5", 35 "starlette<0.13", 36 "typing_extensions>=3.6.0", 37 ], 38 classifiers=CLASSIFIERS, 39 platforms=["any"], 40 zip_safe=False, 41 ) 42 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ packages=["ariadne"], include_package_data=True, install_requires=[ - "graphql-core-next>=1.0.4", + "graphql-core-next<3.0.0", "python-multipart>=0.0.5", "starlette<0.13", "typing_extensions>=3.6.0",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n- \"graphql-core-next>=1.0.4\",\n+ \"graphql-core-next<3.0.0\",\n \"python-multipart>=0.0.5\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n", "issue": "Update GraphQL Core Next & Starlette\nIssue for me to remember to update our core dependencies to latest versions before release.\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.5.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core-next>=1.0.4\",\n \"python-multipart>=0.0.5\",\n \"starlette<0.13\",\n \"typing_extensions>=3.6.0\",\n ],\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
942
109
gh_patches_debug_56857
rasdani/github-patches
git_diff
spesmilo__electrum-1112
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hardcoded icon path in launcher for GNU/Linux systems Hi, currently the icon path specified in the created _.desktop_ launcher file is set to: _Icon=/usr/share/app-install/icons/electrum.png_ (see https://github.com/spesmilo/electrum/blob/bc3013caf0d3d6a35290e9cc9e51125b7d03d14c/electrum.desktop) This prevents icon theming without editing the launcher file. I'd like to ask you to change the icon location as well as the icon path line in the launcher according to the freedesktop.org standards. Please find more info here: http://standards.freedesktop.org/icon-theme-spec/icon-theme-spec-latest.html https://github.com/Foggalong/hardcode-fixer/wiki/What,-Why-&-How --- According to the above resources standard icon locations are: _/usr/share/icons/hicolor/[size]/apps/[icon name]_ _~/.local/share/icons/hicolor/[size]/apps/[icon name]_ _/usr/share/pixmaps/[icon name]_ The standard icon line in the .desktop launcher is: _Icon=[icon name]_ (without full path, without file extension) for instance **Icon=electrum** (for an icon _electrum.svg_ or _electrum.png_ stored in standard location) Could you please move the icon to one of those locations and adjust the icon path in the .desktop file accordingly? </issue> <code> [start of setup.py] 1 #!/usr/bin/python 2 3 # python setup.py sdist --format=zip,gztar 4 5 from setuptools import setup 6 import os 7 import sys 8 import platform 9 import imp 10 11 12 version = imp.load_source('version', 'lib/version.py') 13 14 if sys.version_info[:3] < (2, 7, 0): 15 sys.exit("Error: Electrum requires Python version >= 2.7.0...") 16 17 18 19 data_files = [] 20 if platform.system() == 'Linux' or platform.system() == 'FreeBSD': 21 usr_share = os.path.join(sys.prefix, "share") 22 data_files += [ 23 (os.path.join(usr_share, 'applications/'), ['electrum.desktop']), 24 (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png']) 25 ] 26 27 28 setup( 29 name="Electrum", 30 version=version.ELECTRUM_VERSION, 31 install_requires=[ 32 'slowaes>=0.1a1', 33 'ecdsa>=0.9', 34 'pbkdf2', 35 'requests', 36 'pyasn1-modules', 37 'pyasn1', 38 'qrcode', 39 'protobuf', 40 'tlslite', 41 'dnspython', 42 ], 43 package_dir={ 44 'electrum': 'lib', 45 'electrum_gui': 'gui', 46 'electrum_plugins': 'plugins', 47 }, 48 packages=['electrum','electrum_gui','electrum_gui.qt','electrum_plugins'], 49 package_data={ 50 'electrum': [ 51 'wordlist/*.txt', 52 'locale/*/LC_MESSAGES/electrum.mo', 53 ], 54 'electrum_gui': [ 55 "qt/themes/cleanlook/name.cfg", 56 "qt/themes/cleanlook/style.css", 57 "qt/themes/sahara/name.cfg", 58 "qt/themes/sahara/style.css", 59 "qt/themes/dark/name.cfg", 60 "qt/themes/dark/style.css", 61 ] 62 }, 63 scripts=['electrum'], 64 data_files=data_files, 65 description="Lightweight Bitcoin Wallet", 66 author="Thomas Voegtlin", 67 author_email="[email protected]", 68 license="GNU GPLv3", 69 url="https://electrum.org", 70 long_description="""Lightweight Bitcoin Wallet""" 71 ) 72 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ usr_share = os.path.join(sys.prefix, "share") data_files += [ (os.path.join(usr_share, 'applications/'), ['electrum.desktop']), - (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png']) + (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png']) ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n usr_share = os.path.join(sys.prefix, \"share\")\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n- (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png'])\n+ (os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])\n ]\n", "issue": "Hardcoded icon path in launcher for GNU/Linux systems\nHi,\n\ncurrently the icon path specified in the created _.desktop_ launcher file is set to: \n_Icon=/usr/share/app-install/icons/electrum.png_ \n\n(see https://github.com/spesmilo/electrum/blob/bc3013caf0d3d6a35290e9cc9e51125b7d03d14c/electrum.desktop)\n\nThis prevents icon theming without editing the launcher file.\n\nI'd like to ask you to change the icon location as well as the icon path line in the launcher according to the freedesktop.org standards.\n\nPlease find more info here:\nhttp://standards.freedesktop.org/icon-theme-spec/icon-theme-spec-latest.html\nhttps://github.com/Foggalong/hardcode-fixer/wiki/What,-Why-&-How\n\n---\n\nAccording to the above resources standard icon locations are:\n\n_/usr/share/icons/hicolor/[size]/apps/[icon name]_\n_~/.local/share/icons/hicolor/[size]/apps/[icon name]_\n_/usr/share/pixmaps/[icon name]_\n\nThe standard icon line in the .desktop launcher is:\n_Icon=[icon name]_ (without full path, without file extension)\n\nfor instance\n**Icon=electrum** \n(for an icon _electrum.svg_ or _electrum.png_ stored in standard location)\n\nCould you please move the icon to one of those locations and adjust the icon path in the .desktop file accordingly?\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# python setup.py sdist --format=zip,gztar\n\nfrom setuptools import setup\nimport os\nimport sys\nimport platform\nimport imp\n\n\nversion = imp.load_source('version', 'lib/version.py')\n\nif sys.version_info[:3] < (2, 7, 0):\n sys.exit(\"Error: Electrum requires Python version >= 2.7.0...\")\n\n\n\ndata_files = []\nif platform.system() == 'Linux' or platform.system() == 'FreeBSD':\n usr_share = os.path.join(sys.prefix, \"share\")\n data_files += [\n (os.path.join(usr_share, 'applications/'), ['electrum.desktop']),\n (os.path.join(usr_share, 'app-install', 'icons/'), ['icons/electrum.png'])\n ]\n\n\nsetup(\n name=\"Electrum\",\n version=version.ELECTRUM_VERSION,\n install_requires=[\n 'slowaes>=0.1a1',\n 'ecdsa>=0.9',\n 'pbkdf2',\n 'requests',\n 'pyasn1-modules',\n 'pyasn1',\n 'qrcode',\n 'protobuf',\n 'tlslite',\n 'dnspython',\n ],\n package_dir={\n 'electrum': 'lib',\n 'electrum_gui': 'gui',\n 'electrum_plugins': 'plugins',\n },\n packages=['electrum','electrum_gui','electrum_gui.qt','electrum_plugins'],\n package_data={\n 'electrum': [\n 'wordlist/*.txt',\n 'locale/*/LC_MESSAGES/electrum.mo',\n ],\n 'electrum_gui': [\n \"qt/themes/cleanlook/name.cfg\",\n \"qt/themes/cleanlook/style.css\",\n \"qt/themes/sahara/name.cfg\",\n \"qt/themes/sahara/style.css\",\n \"qt/themes/dark/name.cfg\",\n \"qt/themes/dark/style.css\",\n ]\n },\n scripts=['electrum'],\n data_files=data_files,\n description=\"Lightweight Bitcoin Wallet\",\n author=\"Thomas Voegtlin\",\n author_email=\"[email protected]\",\n license=\"GNU GPLv3\",\n url=\"https://electrum.org\",\n long_description=\"\"\"Lightweight Bitcoin Wallet\"\"\"\n)\n", "path": "setup.py"}]}
1,470
118
gh_patches_debug_2600
rasdani/github-patches
git_diff
dmlc__dgl-2505
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> jtnn example error NOCUDA=1 python3 vaetrain_dgl.py it shows NameError: name 'tensor' is not defined in dgl/examples/pytorch/jtnn/jtnn/nnutils.py", line 11, in cuda return tensor env: dgl 0.5.3 torch 1.7.1 mac os </issue> <code> [start of examples/pytorch/jtnn/jtnn/nnutils.py] 1 import torch 2 import torch.nn as nn 3 import os 4 import dgl 5 6 7 def cuda(x): 8 if torch.cuda.is_available() and not os.getenv('NOCUDA', None): 9 return x.to(torch.device('cuda')) # works for both DGLGraph and tensor 10 else: 11 return tensor 12 13 14 class GRUUpdate(nn.Module): 15 def __init__(self, hidden_size): 16 nn.Module.__init__(self) 17 self.hidden_size = hidden_size 18 19 self.W_z = nn.Linear(2 * hidden_size, hidden_size) 20 self.W_r = nn.Linear(hidden_size, hidden_size, bias=False) 21 self.U_r = nn.Linear(hidden_size, hidden_size) 22 self.W_h = nn.Linear(2 * hidden_size, hidden_size) 23 24 def update_zm(self, node): 25 src_x = node.data['src_x'] 26 s = node.data['s'] 27 rm = node.data['accum_rm'] 28 z = torch.sigmoid(self.W_z(torch.cat([src_x, s], 1))) 29 m = torch.tanh(self.W_h(torch.cat([src_x, rm], 1))) 30 m = (1 - z) * s + z * m 31 return {'m': m, 'z': z} 32 33 def update_r(self, node, zm=None): 34 dst_x = node.data['dst_x'] 35 m = node.data['m'] if zm is None else zm['m'] 36 r_1 = self.W_r(dst_x) 37 r_2 = self.U_r(m) 38 r = torch.sigmoid(r_1 + r_2) 39 return {'r': r, 'rm': r * m} 40 41 def forward(self, node): 42 dic = self.update_zm(node) 43 dic.update(self.update_r(node, zm=dic)) 44 return dic 45 46 def tocpu(g): 47 src, dst = g.edges() 48 src = src.cpu() 49 dst = dst.cpu() 50 return dgl.graph((src, dst), num_nodes=g.number_of_nodes()) 51 [end of examples/pytorch/jtnn/jtnn/nnutils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/pytorch/jtnn/jtnn/nnutils.py b/examples/pytorch/jtnn/jtnn/nnutils.py --- a/examples/pytorch/jtnn/jtnn/nnutils.py +++ b/examples/pytorch/jtnn/jtnn/nnutils.py @@ -8,7 +8,7 @@ if torch.cuda.is_available() and not os.getenv('NOCUDA', None): return x.to(torch.device('cuda')) # works for both DGLGraph and tensor else: - return tensor + return x class GRUUpdate(nn.Module):
{"golden_diff": "diff --git a/examples/pytorch/jtnn/jtnn/nnutils.py b/examples/pytorch/jtnn/jtnn/nnutils.py\n--- a/examples/pytorch/jtnn/jtnn/nnutils.py\n+++ b/examples/pytorch/jtnn/jtnn/nnutils.py\n@@ -8,7 +8,7 @@\n if torch.cuda.is_available() and not os.getenv('NOCUDA', None):\n return x.to(torch.device('cuda')) # works for both DGLGraph and tensor\n else:\n- return tensor\n+ return x\n \n \n class GRUUpdate(nn.Module):\n", "issue": "jtnn example error\nNOCUDA=1 python3 vaetrain_dgl.py\r\nit shows NameError: name 'tensor' is not defined in dgl/examples/pytorch/jtnn/jtnn/nnutils.py\", line 11, in cuda\r\n return tensor\r\n\r\nenv: \r\ndgl 0.5.3\r\ntorch 1.7.1\r\nmac os\r\n\r\n\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport os\nimport dgl\n\n\ndef cuda(x):\n if torch.cuda.is_available() and not os.getenv('NOCUDA', None):\n return x.to(torch.device('cuda')) # works for both DGLGraph and tensor\n else:\n return tensor\n\n\nclass GRUUpdate(nn.Module):\n def __init__(self, hidden_size):\n nn.Module.__init__(self)\n self.hidden_size = hidden_size\n\n self.W_z = nn.Linear(2 * hidden_size, hidden_size)\n self.W_r = nn.Linear(hidden_size, hidden_size, bias=False)\n self.U_r = nn.Linear(hidden_size, hidden_size)\n self.W_h = nn.Linear(2 * hidden_size, hidden_size)\n\n def update_zm(self, node):\n src_x = node.data['src_x']\n s = node.data['s']\n rm = node.data['accum_rm']\n z = torch.sigmoid(self.W_z(torch.cat([src_x, s], 1)))\n m = torch.tanh(self.W_h(torch.cat([src_x, rm], 1)))\n m = (1 - z) * s + z * m\n return {'m': m, 'z': z}\n\n def update_r(self, node, zm=None):\n dst_x = node.data['dst_x']\n m = node.data['m'] if zm is None else zm['m']\n r_1 = self.W_r(dst_x)\n r_2 = self.U_r(m)\n r = torch.sigmoid(r_1 + r_2)\n return {'r': r, 'rm': r * m}\n\n def forward(self, node):\n dic = self.update_zm(node)\n dic.update(self.update_r(node, zm=dic))\n return dic\n\ndef tocpu(g):\n src, dst = g.edges()\n src = src.cpu()\n dst = dst.cpu()\n return dgl.graph((src, dst), num_nodes=g.number_of_nodes())\n", "path": "examples/pytorch/jtnn/jtnn/nnutils.py"}]}
1,160
134
gh_patches_debug_178
rasdani/github-patches
git_diff
napalm-automation__napalm-904
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `get_lldp_neighbors_detail()` fails on Arista 7150S ```python In [1]: from napalm.eos import EOSDriver In [2]: from getpass import getpass In [3]: with EOSDriver("arista", "bewing", getpass()) as d: ...: print(d.get_lldp_neighbors_detail()) ...: Password: --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-3-85f875e30fe3> in <module> 1 with EOSDriver("arista", "bewing", getpass()) as d: ----> 2 print(d.get_lldp_neighbors_detail()) 3 /mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in get_lldp_neighbors_detail(self, interface) 647 lldp_neighbors_out[interface] = [] 648 capabilities = neighbor.get("systemCapabilities", {}) --> 649 available_capabilities = self._transform_lldp_capab(capabilities.keys()) 650 enabled_capabilities = self._transform_lldp_capab( 651 [capab for capab, enabled in capabilities.items() if enabled] /mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in _transform_lldp_capab(self, capabilities) 616 617 def _transform_lldp_capab(self, capabilities): --> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities]) 619 620 def get_lldp_neighbors_detail(self, interface=""): /mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in <listcomp>(.0) 616 617 def _transform_lldp_capab(self, capabilities): --> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities]) 619 620 def get_lldp_neighbors_detail(self, interface=""): KeyError: 'stationonly' ``` </issue> <code> [start of napalm/eos/constants.py] 1 # Based on: 2 # https://code.getnoc.com/noc/noc/blob/6f3db2a6e4b1ece77aaf4c4c98413e35ff64643a/sa/profiles/Arista/EOS/get_lldp_neighbors.py#L76-79 3 LLDP_CAPAB_TRANFORM_TABLE = { 4 "other": "other", 5 "repeater": "repeater", 6 "bridge": "bridge", 7 "wlanaccesspoint": "wlan-access-point", 8 "router": "router", 9 "telephone": "telephone", 10 "docsis": "docsis-cable-device", 11 "station": "station", 12 } 13 [end of napalm/eos/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napalm/eos/constants.py b/napalm/eos/constants.py --- a/napalm/eos/constants.py +++ b/napalm/eos/constants.py @@ -9,4 +9,5 @@ "telephone": "telephone", "docsis": "docsis-cable-device", "station": "station", + "stationonly": "station", }
{"golden_diff": "diff --git a/napalm/eos/constants.py b/napalm/eos/constants.py\n--- a/napalm/eos/constants.py\n+++ b/napalm/eos/constants.py\n@@ -9,4 +9,5 @@\n \"telephone\": \"telephone\",\n \"docsis\": \"docsis-cable-device\",\n \"station\": \"station\",\n+ \"stationonly\": \"station\",\n }\n", "issue": "`get_lldp_neighbors_detail()` fails on Arista 7150S\n```python\r\nIn [1]: from napalm.eos import EOSDriver\r\n\r\nIn [2]: from getpass import getpass\r\n\r\nIn [3]: with EOSDriver(\"arista\", \"bewing\", getpass()) as d:\r\n ...: print(d.get_lldp_neighbors_detail())\r\n ...:\r\nPassword:\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-3-85f875e30fe3> in <module>\r\n 1 with EOSDriver(\"arista\", \"bewing\", getpass()) as d:\r\n----> 2 print(d.get_lldp_neighbors_detail())\r\n 3\r\n\r\n/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in get_lldp_neighbors_detail(self, interface)\r\n 647 lldp_neighbors_out[interface] = []\r\n 648 capabilities = neighbor.get(\"systemCapabilities\", {})\r\n--> 649 available_capabilities = self._transform_lldp_capab(capabilities.keys())\r\n 650 enabled_capabilities = self._transform_lldp_capab(\r\n 651 [capab for capab, enabled in capabilities.items() if enabled]\r\n\r\n/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in _transform_lldp_capab(self, capabilities)\r\n 616\r\n 617 def _transform_lldp_capab(self, capabilities):\r\n--> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities])\r\n 619\r\n 620 def get_lldp_neighbors_detail(self, interface=\"\"):\r\n\r\n/mnt/c/Users/bewing/PycharmProjects/napalm/napalm/eos/eos.py in <listcomp>(.0)\r\n 616\r\n 617 def _transform_lldp_capab(self, capabilities):\r\n--> 618 return sorted([LLDP_CAPAB_TRANFORM_TABLE[c.lower()] for c in capabilities])\r\n 619\r\n 620 def get_lldp_neighbors_detail(self, interface=\"\"):\r\n\r\nKeyError: 'stationonly'\r\n```\n", "before_files": [{"content": "# Based on:\n# https://code.getnoc.com/noc/noc/blob/6f3db2a6e4b1ece77aaf4c4c98413e35ff64643a/sa/profiles/Arista/EOS/get_lldp_neighbors.py#L76-79\nLLDP_CAPAB_TRANFORM_TABLE = {\n \"other\": \"other\",\n \"repeater\": \"repeater\",\n \"bridge\": \"bridge\",\n \"wlanaccesspoint\": \"wlan-access-point\",\n \"router\": \"router\",\n \"telephone\": \"telephone\",\n \"docsis\": \"docsis-cable-device\",\n \"station\": \"station\",\n}\n", "path": "napalm/eos/constants.py"}]}
1,209
87
gh_patches_debug_34642
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-1571
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tracer and Meter provider lack environment variables Now that `Configuration` has been removed, both providers should use consistent environment variables. </issue> <code> [start of opentelemetry-api/src/opentelemetry/environment_variables/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 OTEL_PROPAGATORS = "OTEL_PROPAGATORS" 16 OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT" 17 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS = "OTEL_PYTHON_DISABLED_INSTRUMENTATIONS" 18 OTEL_PYTHON_IDS_GENERATOR = "OTEL_PYTHON_IDS_GENERATOR" 19 OTEL_PYTHON_SERVICE_NAME = "OTEL_PYTHON_SERVICE_NAME" 20 OTEL_TRACE_EXPORTER = "OTEL_TRACE_EXPORTER" 21 OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" 22 [end of opentelemetry-api/src/opentelemetry/environment_variables/__init__.py] [start of opentelemetry-api/src/opentelemetry/util/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import time 16 from logging import getLogger 17 from os import environ 18 from typing import TYPE_CHECKING, Union, cast 19 20 from pkg_resources import iter_entry_points 21 22 if TYPE_CHECKING: 23 from opentelemetry.metrics import MeterProvider 24 from opentelemetry.trace import TracerProvider 25 26 Provider = Union["TracerProvider", "MeterProvider"] 27 28 logger = getLogger(__name__) 29 30 # Since we want API users to be able to provide timestamps, 31 # this needs to be in the API. 32 33 try: 34 time_ns = time.time_ns 35 # Python versions < 3.7 36 except AttributeError: 37 38 def time_ns() -> int: 39 return int(time.time() * 1e9) 40 41 42 def _load_provider(provider: str) -> Provider: 43 try: 44 entry_point = next( 45 iter_entry_points( 46 "opentelemetry_{}".format(provider), 47 name=cast( 48 str, 49 environ.get( 50 provider.upper(), "default_{}".format(provider), 51 ), 52 ), 53 ) 54 ) 55 return cast(Provider, entry_point.load()(),) 56 except Exception: # pylint: disable=broad-except 57 logger.error("Failed to load configured provider %s", provider) 58 raise 59 60 61 def _load_meter_provider(provider: str) -> "MeterProvider": 62 return cast("MeterProvider", _load_provider(provider)) 63 64 65 def _load_trace_provider(provider: str) -> "TracerProvider": 66 return cast("TracerProvider", _load_provider(provider)) 67 [end of opentelemetry-api/src/opentelemetry/util/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py --- a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py +++ b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py @@ -19,3 +19,5 @@ OTEL_PYTHON_SERVICE_NAME = "OTEL_PYTHON_SERVICE_NAME" OTEL_TRACE_EXPORTER = "OTEL_TRACE_EXPORTER" OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" +OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER" +OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER" diff --git a/opentelemetry-api/src/opentelemetry/util/__init__.py b/opentelemetry-api/src/opentelemetry/util/__init__.py --- a/opentelemetry-api/src/opentelemetry/util/__init__.py +++ b/opentelemetry-api/src/opentelemetry/util/__init__.py @@ -19,6 +19,11 @@ from pkg_resources import iter_entry_points +from opentelemetry.environment_variables import ( + OTEL_PYTHON_METER_PROVIDER, + OTEL_PYTHON_TRACER_PROVIDER, +) + if TYPE_CHECKING: from opentelemetry.metrics import MeterProvider from opentelemetry.trace import TracerProvider @@ -39,7 +44,9 @@ return int(time.time() * 1e9) -def _load_provider(provider: str) -> Provider: +def _load_provider( + provider_environment_variable: str, provider: str +) -> Provider: try: entry_point = next( iter_entry_points( @@ -47,7 +54,8 @@ name=cast( str, environ.get( - provider.upper(), "default_{}".format(provider), + provider_environment_variable, + "default_{}".format(provider), ), ), ) @@ -59,8 +67,13 @@ def _load_meter_provider(provider: str) -> "MeterProvider": - return cast("MeterProvider", _load_provider(provider)) + return cast( + "MeterProvider", _load_provider(OTEL_PYTHON_METER_PROVIDER, provider), + ) def _load_trace_provider(provider: str) -> "TracerProvider": - return cast("TracerProvider", _load_provider(provider)) + return cast( + "TracerProvider", + _load_provider(OTEL_PYTHON_TRACER_PROVIDER, provider), + )
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/environment_variables/__init__.py\n@@ -19,3 +19,5 @@\n OTEL_PYTHON_SERVICE_NAME = \"OTEL_PYTHON_SERVICE_NAME\"\n OTEL_TRACE_EXPORTER = \"OTEL_TRACE_EXPORTER\"\n OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n+OTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n+OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\ndiff --git a/opentelemetry-api/src/opentelemetry/util/__init__.py b/opentelemetry-api/src/opentelemetry/util/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/util/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/util/__init__.py\n@@ -19,6 +19,11 @@\n \n from pkg_resources import iter_entry_points\n \n+from opentelemetry.environment_variables import (\n+ OTEL_PYTHON_METER_PROVIDER,\n+ OTEL_PYTHON_TRACER_PROVIDER,\n+)\n+\n if TYPE_CHECKING:\n from opentelemetry.metrics import MeterProvider\n from opentelemetry.trace import TracerProvider\n@@ -39,7 +44,9 @@\n return int(time.time() * 1e9)\n \n \n-def _load_provider(provider: str) -> Provider:\n+def _load_provider(\n+ provider_environment_variable: str, provider: str\n+) -> Provider:\n try:\n entry_point = next(\n iter_entry_points(\n@@ -47,7 +54,8 @@\n name=cast(\n str,\n environ.get(\n- provider.upper(), \"default_{}\".format(provider),\n+ provider_environment_variable,\n+ \"default_{}\".format(provider),\n ),\n ),\n )\n@@ -59,8 +67,13 @@\n \n \n def _load_meter_provider(provider: str) -> \"MeterProvider\":\n- return cast(\"MeterProvider\", _load_provider(provider))\n+ return cast(\n+ \"MeterProvider\", _load_provider(OTEL_PYTHON_METER_PROVIDER, provider),\n+ )\n \n \n def _load_trace_provider(provider: str) -> \"TracerProvider\":\n- return cast(\"TracerProvider\", _load_provider(provider))\n+ return cast(\n+ \"TracerProvider\",\n+ _load_provider(OTEL_PYTHON_TRACER_PROVIDER, provider),\n+ )\n", "issue": "Tracer and Meter provider lack environment variables\nNow that `Configuration` has been removed, both providers should use consistent environment variables.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\nOTEL_PYTHON_DISABLED_INSTRUMENTATIONS = \"OTEL_PYTHON_DISABLED_INSTRUMENTATIONS\"\nOTEL_PYTHON_IDS_GENERATOR = \"OTEL_PYTHON_IDS_GENERATOR\"\nOTEL_PYTHON_SERVICE_NAME = \"OTEL_PYTHON_SERVICE_NAME\"\nOTEL_TRACE_EXPORTER = \"OTEL_TRACE_EXPORTER\"\nOTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nfrom logging import getLogger\nfrom os import environ\nfrom typing import TYPE_CHECKING, Union, cast\n\nfrom pkg_resources import iter_entry_points\n\nif TYPE_CHECKING:\n from opentelemetry.metrics import MeterProvider\n from opentelemetry.trace import TracerProvider\n\nProvider = Union[\"TracerProvider\", \"MeterProvider\"]\n\nlogger = getLogger(__name__)\n\n# Since we want API users to be able to provide timestamps,\n# this needs to be in the API.\n\ntry:\n time_ns = time.time_ns\n# Python versions < 3.7\nexcept AttributeError:\n\n def time_ns() -> int:\n return int(time.time() * 1e9)\n\n\ndef _load_provider(provider: str) -> Provider:\n try:\n entry_point = next(\n iter_entry_points(\n \"opentelemetry_{}\".format(provider),\n name=cast(\n str,\n environ.get(\n provider.upper(), \"default_{}\".format(provider),\n ),\n ),\n )\n )\n return cast(Provider, entry_point.load()(),)\n except Exception: # pylint: disable=broad-except\n logger.error(\"Failed to load configured provider %s\", provider)\n raise\n\n\ndef _load_meter_provider(provider: str) -> \"MeterProvider\":\n return cast(\"MeterProvider\", _load_provider(provider))\n\n\ndef _load_trace_provider(provider: str) -> \"TracerProvider\":\n return cast(\"TracerProvider\", _load_provider(provider))\n", "path": "opentelemetry-api/src/opentelemetry/util/__init__.py"}]}
1,454
567
gh_patches_debug_3448
rasdani/github-patches
git_diff
SciTools__cartopy-1245
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SlippyImageArtist cannot be composited For example, take the WMTS example and add a second layer. Then attempt to save as a PDF. ``` python import cartopy.crs as ccrs import matplotlib.pyplot as plt url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi' layer1 = 'VIIRS_CityLights_2012' layer2 = 'ASTER_GDEM_Color_Index' ax = plt.axes(projection=ccrs.PlateCarree()) ax.add_wmts(url, layer1) ax.add_wmts(url, layer2) ax.set_extent((-15, 25, 35, 60)) plt.title('Suomi NPP Earth at night April/October 2012') plt.savefig('test.pdf') plt.show() ``` which results in: ``` python Traceback (most recent call last): File "wmts.py", line 33, in main plt.savefig('test.pdf') File "/usr/lib64/python2.7/site-packages/matplotlib/pyplot.py", line 577, in savefig res = fig.savefig(*args, **kwargs) File "/usr/lib64/python2.7/site-packages/matplotlib/figure.py", line 1476, in savefig self.canvas.print_figure(*args, **kwargs) File "/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py", line 161, in print_figure FigureCanvasAgg.print_figure(self, *args, **kwargs) File "/usr/lib64/python2.7/site-packages/matplotlib/backend_bases.py", line 2211, in print_figure **kwargs) File "/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_pdf.py", line 2485, in print_pdf self.figure.draw(renderer) File "/usr/lib64/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper draw(artist, renderer, *args, **kwargs) File "/usr/lib64/python2.7/site-packages/matplotlib/figure.py", line 1085, in draw func(*args) File "/usr/lib64/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper draw(artist, renderer, *args, **kwargs) File "/usr/lib64/python2.7/site-packages/cartopy/mpl/geoaxes.py", line 359, in draw inframe=inframe) File "/usr/lib64/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper draw(artist, renderer, *args, **kwargs) File "/usr/lib64/python2.7/site-packages/matplotlib/axes/_base.py", line 2081, in draw for z, im in zorder_images] File "/usr/lib64/python2.7/site-packages/matplotlib/image.py", line 580, in make_image raise RuntimeError('You must first set the image' RuntimeError: You must first set the image array or the image attribute ``` I think maybe `SlippyImageArtist` should be overriding `make_image`, too. </issue> <code> [start of lib/cartopy/mpl/slippy_image_artist.py] 1 # (C) British Crown Copyright 2014 - 2018, Met Office 2 # 3 # This file is part of cartopy. 4 # 5 # cartopy is free software: you can redistribute it and/or modify it under 6 # the terms of the GNU Lesser General Public License as published by the 7 # Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # cartopy is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU Lesser General Public License for more details. 14 # 15 # You should have received a copy of the GNU Lesser General Public License 16 # along with cartopy. If not, see <https://www.gnu.org/licenses/>. 17 """ 18 Define the SlippyImageArtist class, which interfaces with 19 :class:`cartopy.io.RasterSource` instances at draw time, for interactive 20 dragging and zooming of raster data. 21 22 """ 23 24 from __future__ import (absolute_import, division, print_function) 25 26 from matplotlib.image import AxesImage 27 import matplotlib.artist 28 29 30 class SlippyImageArtist(AxesImage): 31 32 """ 33 A subclass of :class:`~matplotlib.image.AxesImage` which provides an 34 interface for getting a raster from the given object with interactive 35 slippy map type functionality. 36 37 Kwargs are passed to the AxesImage constructor. 38 39 """ 40 def __init__(self, ax, raster_source, **kwargs): 41 self.raster_source = raster_source 42 super(SlippyImageArtist, self).__init__(ax, **kwargs) 43 self.set_clip_path(ax.background_patch) 44 self.cache = [] 45 46 ax.figure.canvas.mpl_connect('button_press_event', self.on_press) 47 ax.figure.canvas.mpl_connect('button_release_event', self.on_release) 48 49 self.on_release() 50 51 def on_press(self, event=None): 52 self.user_is_interacting = True 53 54 def on_release(self, event=None): 55 self.user_is_interacting = False 56 self.stale = True 57 58 @matplotlib.artist.allow_rasterization 59 def draw(self, renderer, *args, **kwargs): 60 if not self.get_visible(): 61 return 62 63 ax = self.axes 64 window_extent = ax.get_window_extent() 65 [x1, y1], [x2, y2] = ax.viewLim.get_points() 66 if not self.user_is_interacting: 67 located_images = self.raster_source.fetch_raster( 68 ax.projection, extent=[x1, x2, y1, y2], 69 target_resolution=(window_extent.width, window_extent.height)) 70 self.cache = located_images 71 72 for img, extent in self.cache: 73 self.set_array(img) 74 with ax.hold_limits(): 75 self.set_extent(extent) 76 super(SlippyImageArtist, self).draw(renderer, *args, **kwargs) 77 [end of lib/cartopy/mpl/slippy_image_artist.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/cartopy/mpl/slippy_image_artist.py b/lib/cartopy/mpl/slippy_image_artist.py --- a/lib/cartopy/mpl/slippy_image_artist.py +++ b/lib/cartopy/mpl/slippy_image_artist.py @@ -74,3 +74,8 @@ with ax.hold_limits(): self.set_extent(extent) super(SlippyImageArtist, self).draw(renderer, *args, **kwargs) + + def can_composite(self): + # As per https://github.com/SciTools/cartopy/issues/689, disable + # compositing multiple raster sources. + return False
{"golden_diff": "diff --git a/lib/cartopy/mpl/slippy_image_artist.py b/lib/cartopy/mpl/slippy_image_artist.py\n--- a/lib/cartopy/mpl/slippy_image_artist.py\n+++ b/lib/cartopy/mpl/slippy_image_artist.py\n@@ -74,3 +74,8 @@\n with ax.hold_limits():\n self.set_extent(extent)\n super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)\n+\n+ def can_composite(self):\n+ # As per https://github.com/SciTools/cartopy/issues/689, disable\n+ # compositing multiple raster sources.\n+ return False\n", "issue": "SlippyImageArtist cannot be composited\nFor example, take the WMTS example and add a second layer. Then attempt to save as a PDF.\n\n``` python\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\nurl = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'\nlayer1 = 'VIIRS_CityLights_2012'\nlayer2 = 'ASTER_GDEM_Color_Index'\n\nax = plt.axes(projection=ccrs.PlateCarree())\nax.add_wmts(url, layer1)\nax.add_wmts(url, layer2)\nax.set_extent((-15, 25, 35, 60))\n\nplt.title('Suomi NPP Earth at night April/October 2012')\nplt.savefig('test.pdf')\nplt.show()\n```\n\nwhich results in:\n\n``` python\nTraceback (most recent call last):\n File \"wmts.py\", line 33, in main\n plt.savefig('test.pdf')\n File \"/usr/lib64/python2.7/site-packages/matplotlib/pyplot.py\", line 577, in savefig\n res = fig.savefig(*args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/figure.py\", line 1476, in savefig\n self.canvas.print_figure(*args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py\", line 161, in print_figure\n FigureCanvasAgg.print_figure(self, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/backend_bases.py\", line 2211, in print_figure\n **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/backends/backend_pdf.py\", line 2485, in print_pdf\n self.figure.draw(renderer)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/artist.py\", line 59, in draw_wrapper\n draw(artist, renderer, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/figure.py\", line 1085, in draw\n func(*args)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/artist.py\", line 59, in draw_wrapper\n draw(artist, renderer, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/cartopy/mpl/geoaxes.py\", line 359, in draw\n inframe=inframe)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/artist.py\", line 59, in draw_wrapper\n draw(artist, renderer, *args, **kwargs)\n File \"/usr/lib64/python2.7/site-packages/matplotlib/axes/_base.py\", line 2081, in draw\n for z, im in zorder_images]\n File \"/usr/lib64/python2.7/site-packages/matplotlib/image.py\", line 580, in make_image\n raise RuntimeError('You must first set the image'\nRuntimeError: You must first set the image array or the image attribute\n```\n\nI think maybe `SlippyImageArtist` should be overriding `make_image`, too.\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2014 - 2018, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"\nDefine the SlippyImageArtist class, which interfaces with\n:class:`cartopy.io.RasterSource` instances at draw time, for interactive\ndragging and zooming of raster data.\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nfrom matplotlib.image import AxesImage\nimport matplotlib.artist\n\n\nclass SlippyImageArtist(AxesImage):\n\n \"\"\"\n A subclass of :class:`~matplotlib.image.AxesImage` which provides an\n interface for getting a raster from the given object with interactive\n slippy map type functionality.\n\n Kwargs are passed to the AxesImage constructor.\n\n \"\"\"\n def __init__(self, ax, raster_source, **kwargs):\n self.raster_source = raster_source\n super(SlippyImageArtist, self).__init__(ax, **kwargs)\n self.set_clip_path(ax.background_patch)\n self.cache = []\n\n ax.figure.canvas.mpl_connect('button_press_event', self.on_press)\n ax.figure.canvas.mpl_connect('button_release_event', self.on_release)\n\n self.on_release()\n\n def on_press(self, event=None):\n self.user_is_interacting = True\n\n def on_release(self, event=None):\n self.user_is_interacting = False\n self.stale = True\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n if not self.get_visible():\n return\n\n ax = self.axes\n window_extent = ax.get_window_extent()\n [x1, y1], [x2, y2] = ax.viewLim.get_points()\n if not self.user_is_interacting:\n located_images = self.raster_source.fetch_raster(\n ax.projection, extent=[x1, x2, y1, y2],\n target_resolution=(window_extent.width, window_extent.height))\n self.cache = located_images\n\n for img, extent in self.cache:\n self.set_array(img)\n with ax.hold_limits():\n self.set_extent(extent)\n super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)\n", "path": "lib/cartopy/mpl/slippy_image_artist.py"}]}
2,044
144
gh_patches_debug_19761
rasdani/github-patches
git_diff
sopel-irc__sopel-1381
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ctrl-C doesn't run shutdown routines When pressing <kbd>Ctrl</kbd>-<kbd>C</kbd> to interrupt a Sopel instance running in the foreground of an active shell, it simply prints `KeyboardInterrupt` and exits seemingly without calling any of the shutdown routines. Pressing <kbd>Ctrl</kbd>-<kbd>C</kbd> should behave more or less the same as `sopel --quit`. Discovered while testing for #1369. </issue> <code> [start of sopel/__init__.py] 1 # coding=utf-8 2 # ASCII ONLY IN THIS FILE THOUGH!!!!!!! 3 # Python does some stupid bullshit of respecting LC_ALL over the encoding on the 4 # file, so in order to undo Python's ridiculous fucking idiocy, we have to have 5 # our own check. 6 7 # Copyright 2008, Sean B. Palmer, inamidst.com 8 # Copyright 2012, Elsie Powell, http://embolalia.com 9 # Copyright 2012, Elad Alfassa <[email protected]> 10 # 11 # Licensed under the Eiffel Forum License 2. 12 13 from __future__ import unicode_literals, absolute_import, print_function, division 14 15 import locale 16 import sys 17 loc = locale.getlocale() 18 if sys.version_info.major > 2: 19 if not loc[1] or 'UTF-8' not in loc[1]: 20 print('WARNING!!! You are running with a non-UTF8 locale environment ' 21 'variables (e.g. LC_ALL is set to "C"), which makes Python 3 do ' 22 'stupid things. If you get strange errors, please set it to ' 23 'something like "en_US.UTF-8".', file=sys.stderr) 24 25 26 from collections import namedtuple 27 import os 28 import re 29 import time 30 import traceback 31 import signal 32 33 __version__ = '6.5.3' 34 35 36 def _version_info(version=__version__): 37 regex = re.compile(r'(\d+)\.(\d+)\.(\d+)(?:(a|b|rc)(\d+))?.*') 38 version_groups = regex.match(__version__).groups() 39 major, minor, micro = (int(piece) for piece in version_groups[0:3]) 40 level = version_groups[3] 41 serial = int(version_groups[4] or 0) 42 if level == 'a': 43 level = 'alpha' 44 elif level == 'b': 45 level = 'beta' 46 elif level == 'rc': 47 level = 'candidate' 48 elif not level and version_groups[4] is None: 49 level = 'final' 50 else: 51 level = 'alpha' 52 version_type = namedtuple('version_info', 53 'major, minor, micro, releaselevel, serial') 54 return version_type(major, minor, micro, level, serial) 55 56 57 version_info = _version_info() 58 59 60 def run(config, pid_file, daemon=False): 61 import sopel.bot as bot 62 import sopel.logger 63 from sopel.tools import stderr 64 delay = 20 65 # Inject ca_certs from config to web for SSL validation of web requests 66 if not config.core.ca_certs: 67 stderr('Could not open CA certificates file. SSL will not ' 68 'work properly.') 69 70 def signal_handler(sig, frame): 71 if sig == signal.SIGUSR1 or sig == signal.SIGTERM: 72 stderr('Got quit signal, shutting down.') 73 p.quit('Closing') 74 while True: 75 try: 76 p = bot.Sopel(config, daemon=daemon) 77 if hasattr(signal, 'SIGUSR1'): 78 signal.signal(signal.SIGUSR1, signal_handler) 79 if hasattr(signal, 'SIGTERM'): 80 signal.signal(signal.SIGTERM, signal_handler) 81 sopel.logger.setup_logging(p) 82 p.run(config.core.host, int(config.core.port)) 83 except KeyboardInterrupt: 84 break 85 except Exception: # TODO: Be specific 86 trace = traceback.format_exc() 87 try: 88 stderr(trace) 89 except Exception: # TODO: Be specific 90 pass 91 logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a') 92 logfile.write('Critical exception in core') 93 logfile.write(trace) 94 logfile.write('----------------------------------------\n\n') 95 logfile.close() 96 os.unlink(pid_file) 97 os._exit(1) 98 99 if not isinstance(delay, int): 100 break 101 if p.hasquit: 102 break 103 stderr('Warning: Disconnected. Reconnecting in %s seconds...' % delay) 104 time.sleep(delay) 105 os.unlink(pid_file) 106 os._exit(0) 107 [end of sopel/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/__init__.py b/sopel/__init__.py --- a/sopel/__init__.py +++ b/sopel/__init__.py @@ -68,7 +68,7 @@ 'work properly.') def signal_handler(sig, frame): - if sig == signal.SIGUSR1 or sig == signal.SIGTERM: + if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT: stderr('Got quit signal, shutting down.') p.quit('Closing') while True: @@ -78,6 +78,8 @@ signal.signal(signal.SIGUSR1, signal_handler) if hasattr(signal, 'SIGTERM'): signal.signal(signal.SIGTERM, signal_handler) + if hasattr(signal, 'SIGINT'): + signal.signal(signal.SIGINT, signal_handler) sopel.logger.setup_logging(p) p.run(config.core.host, int(config.core.port)) except KeyboardInterrupt:
{"golden_diff": "diff --git a/sopel/__init__.py b/sopel/__init__.py\n--- a/sopel/__init__.py\n+++ b/sopel/__init__.py\n@@ -68,7 +68,7 @@\n 'work properly.')\n \n def signal_handler(sig, frame):\n- if sig == signal.SIGUSR1 or sig == signal.SIGTERM:\n+ if sig == signal.SIGUSR1 or sig == signal.SIGTERM or sig == signal.SIGINT:\n stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n while True:\n@@ -78,6 +78,8 @@\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n+ if hasattr(signal, 'SIGINT'):\n+ signal.signal(signal.SIGINT, signal_handler)\n sopel.logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n", "issue": "Ctrl-C doesn't run shutdown routines\nWhen pressing <kbd>Ctrl</kbd>-<kbd>C</kbd> to interrupt a Sopel instance running in the foreground of an active shell, it simply prints `KeyboardInterrupt` and exits seemingly without calling any of the shutdown routines.\r\n\r\nPressing <kbd>Ctrl</kbd>-<kbd>C</kbd> should behave more or less the same as `sopel --quit`.\r\n\r\nDiscovered while testing for #1369.\n", "before_files": [{"content": "# coding=utf-8\n# ASCII ONLY IN THIS FILE THOUGH!!!!!!!\n# Python does some stupid bullshit of respecting LC_ALL over the encoding on the\n# file, so in order to undo Python's ridiculous fucking idiocy, we have to have\n# our own check.\n\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, http://embolalia.com\n# Copyright 2012, Elad Alfassa <[email protected]>\n#\n# Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport locale\nimport sys\nloc = locale.getlocale()\nif sys.version_info.major > 2:\n if not loc[1] or 'UTF-8' not in loc[1]:\n print('WARNING!!! You are running with a non-UTF8 locale environment '\n 'variables (e.g. LC_ALL is set to \"C\"), which makes Python 3 do '\n 'stupid things. If you get strange errors, please set it to '\n 'something like \"en_US.UTF-8\".', file=sys.stderr)\n\n\nfrom collections import namedtuple\nimport os\nimport re\nimport time\nimport traceback\nimport signal\n\n__version__ = '6.5.3'\n\n\ndef _version_info(version=__version__):\n regex = re.compile(r'(\\d+)\\.(\\d+)\\.(\\d+)(?:(a|b|rc)(\\d+))?.*')\n version_groups = regex.match(__version__).groups()\n major, minor, micro = (int(piece) for piece in version_groups[0:3])\n level = version_groups[3]\n serial = int(version_groups[4] or 0)\n if level == 'a':\n level = 'alpha'\n elif level == 'b':\n level = 'beta'\n elif level == 'rc':\n level = 'candidate'\n elif not level and version_groups[4] is None:\n level = 'final'\n else:\n level = 'alpha'\n version_type = namedtuple('version_info',\n 'major, minor, micro, releaselevel, serial')\n return version_type(major, minor, micro, level, serial)\n\n\nversion_info = _version_info()\n\n\ndef run(config, pid_file, daemon=False):\n import sopel.bot as bot\n import sopel.logger\n from sopel.tools import stderr\n delay = 20\n # Inject ca_certs from config to web for SSL validation of web requests\n if not config.core.ca_certs:\n stderr('Could not open CA certificates file. SSL will not '\n 'work properly.')\n\n def signal_handler(sig, frame):\n if sig == signal.SIGUSR1 or sig == signal.SIGTERM:\n stderr('Got quit signal, shutting down.')\n p.quit('Closing')\n while True:\n try:\n p = bot.Sopel(config, daemon=daemon)\n if hasattr(signal, 'SIGUSR1'):\n signal.signal(signal.SIGUSR1, signal_handler)\n if hasattr(signal, 'SIGTERM'):\n signal.signal(signal.SIGTERM, signal_handler)\n sopel.logger.setup_logging(p)\n p.run(config.core.host, int(config.core.port))\n except KeyboardInterrupt:\n break\n except Exception: # TODO: Be specific\n trace = traceback.format_exc()\n try:\n stderr(trace)\n except Exception: # TODO: Be specific\n pass\n logfile = open(os.path.join(config.core.logdir, 'exceptions.log'), 'a')\n logfile.write('Critical exception in core')\n logfile.write(trace)\n logfile.write('----------------------------------------\\n\\n')\n logfile.close()\n os.unlink(pid_file)\n os._exit(1)\n\n if not isinstance(delay, int):\n break\n if p.hasquit:\n break\n stderr('Warning: Disconnected. Reconnecting in %s seconds...' % delay)\n time.sleep(delay)\n os.unlink(pid_file)\n os._exit(0)\n", "path": "sopel/__init__.py"}]}
1,725
212
gh_patches_debug_6195
rasdani/github-patches
git_diff
pystiche__pystiche-132
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create encoding preprocessors not until runtime Right now the preprocessors are created at import https://github.com/pmeier/pystiche/blob/cad5ab6e9485680f2543cf4397d0d21e72a88b9e/pystiche/enc/preprocessing.py#L1-L4 We should only create them if they are needed to speed up the import. </issue> <code> [start of pystiche/enc/preprocessing.py] 1 from torch import nn 2 from pystiche.image import TorchPreprocessing, CaffePreprocessing 3 4 PREPROCESSORS = {"torch": TorchPreprocessing(), "caffe": CaffePreprocessing()} 5 6 __all__ = ["get_preprocessor"] 7 8 9 def get_preprocessor(framework: str) -> nn.Module: 10 return PREPROCESSORS[framework] 11 [end of pystiche/enc/preprocessing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pystiche/enc/preprocessing.py b/pystiche/enc/preprocessing.py --- a/pystiche/enc/preprocessing.py +++ b/pystiche/enc/preprocessing.py @@ -1,10 +1,10 @@ from torch import nn from pystiche.image import TorchPreprocessing, CaffePreprocessing -PREPROCESSORS = {"torch": TorchPreprocessing(), "caffe": CaffePreprocessing()} +PREPROCESSORS = {"torch": TorchPreprocessing, "caffe": CaffePreprocessing} __all__ = ["get_preprocessor"] def get_preprocessor(framework: str) -> nn.Module: - return PREPROCESSORS[framework] + return PREPROCESSORS[framework]()
{"golden_diff": "diff --git a/pystiche/enc/preprocessing.py b/pystiche/enc/preprocessing.py\n--- a/pystiche/enc/preprocessing.py\n+++ b/pystiche/enc/preprocessing.py\n@@ -1,10 +1,10 @@\n from torch import nn\n from pystiche.image import TorchPreprocessing, CaffePreprocessing\n \n-PREPROCESSORS = {\"torch\": TorchPreprocessing(), \"caffe\": CaffePreprocessing()}\n+PREPROCESSORS = {\"torch\": TorchPreprocessing, \"caffe\": CaffePreprocessing}\n \n __all__ = [\"get_preprocessor\"]\n \n \n def get_preprocessor(framework: str) -> nn.Module:\n- return PREPROCESSORS[framework]\n+ return PREPROCESSORS[framework]()\n", "issue": "Create encoding preprocessors not until runtime\nRight now the preprocessors are created at import\r\n\r\nhttps://github.com/pmeier/pystiche/blob/cad5ab6e9485680f2543cf4397d0d21e72a88b9e/pystiche/enc/preprocessing.py#L1-L4\r\n\r\nWe should only create them if they are needed to speed up the import.\n", "before_files": [{"content": "from torch import nn\nfrom pystiche.image import TorchPreprocessing, CaffePreprocessing\n\nPREPROCESSORS = {\"torch\": TorchPreprocessing(), \"caffe\": CaffePreprocessing()}\n\n__all__ = [\"get_preprocessor\"]\n\n\ndef get_preprocessor(framework: str) -> nn.Module:\n return PREPROCESSORS[framework]\n", "path": "pystiche/enc/preprocessing.py"}]}
723
159
gh_patches_debug_1042
rasdani/github-patches
git_diff
fossasia__open-event-server-395
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> list_events url is inconsistent in API v2 The url is `/events/` whereas it should be `/events` to be consistent with other urls. </issue> <code> [start of open_event/api/events.py] 1 from flask.ext.restplus import Resource, Namespace, fields 2 3 from open_event.models.event import Event as EventModel 4 from .helpers import get_object_list, get_object_or_404 5 6 api = Namespace('events', description='Events') 7 8 EVENT = api.model('Event', { 9 'id': fields.Integer(required=True), 10 'name': fields.String, 11 'email': fields.String, 12 'color': fields.String, 13 'logo': fields.String, 14 'start_time': fields.DateTime, 15 'end_time': fields.DateTime, 16 'latitude': fields.Float, 17 'longitude': fields.Float, 18 'slogan': fields.String, 19 'url': fields.String, 20 'location_name': fields.String, 21 }) 22 23 24 @api.route('/<int:event_id>') 25 @api.param('event_id') 26 @api.response(404, 'Event not found') 27 class Event(Resource): 28 @api.doc('get_event') 29 @api.marshal_with(EVENT) 30 def get(self, event_id): 31 """Fetch an event given its id""" 32 return get_object_or_404(EventModel, event_id) 33 34 35 @api.route('/') 36 class EventList(Resource): 37 @api.doc('list_events') 38 @api.marshal_list_with(EVENT) 39 def get(self): 40 """List all events""" 41 return get_object_list(EventModel) 42 [end of open_event/api/events.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/open_event/api/events.py b/open_event/api/events.py --- a/open_event/api/events.py +++ b/open_event/api/events.py @@ -32,7 +32,7 @@ return get_object_or_404(EventModel, event_id) [email protected]('/') [email protected]('') class EventList(Resource): @api.doc('list_events') @api.marshal_list_with(EVENT)
{"golden_diff": "diff --git a/open_event/api/events.py b/open_event/api/events.py\n--- a/open_event/api/events.py\n+++ b/open_event/api/events.py\n@@ -32,7 +32,7 @@\n return get_object_or_404(EventModel, event_id)\n \n \[email protected]('/')\[email protected]('')\n class EventList(Resource):\n @api.doc('list_events')\n @api.marshal_list_with(EVENT)\n", "issue": "list_events url is inconsistent in API v2\nThe url is `/events/` whereas it should be `/events` to be consistent with other urls. \n\n", "before_files": [{"content": "from flask.ext.restplus import Resource, Namespace, fields\n\nfrom open_event.models.event import Event as EventModel\nfrom .helpers import get_object_list, get_object_or_404\n\napi = Namespace('events', description='Events')\n\nEVENT = api.model('Event', {\n 'id': fields.Integer(required=True),\n 'name': fields.String,\n 'email': fields.String,\n 'color': fields.String,\n 'logo': fields.String,\n 'start_time': fields.DateTime,\n 'end_time': fields.DateTime,\n 'latitude': fields.Float,\n 'longitude': fields.Float,\n 'slogan': fields.String,\n 'url': fields.String,\n 'location_name': fields.String,\n})\n\n\[email protected]('/<int:event_id>')\[email protected]('event_id')\[email protected](404, 'Event not found')\nclass Event(Resource):\n @api.doc('get_event')\n @api.marshal_with(EVENT)\n def get(self, event_id):\n \"\"\"Fetch an event given its id\"\"\"\n return get_object_or_404(EventModel, event_id)\n\n\[email protected]('/')\nclass EventList(Resource):\n @api.doc('list_events')\n @api.marshal_list_with(EVENT)\n def get(self):\n \"\"\"List all events\"\"\"\n return get_object_list(EventModel)\n", "path": "open_event/api/events.py"}]}
924
93
gh_patches_debug_6894
rasdani/github-patches
git_diff
ray-project__ray-1662
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DataFrame] Error checking on Pandas version We need better reporting for issues with the Pandas version on a user's system. </issue> <code> [start of python/ray/dataframe/__init__.py] 1 from __future__ import absolute_import 2 from __future__ import division 3 from __future__ import print_function 4 import threading 5 6 DEFAULT_NPARTITIONS = 4 7 8 9 def set_npartition_default(n): 10 global DEFAULT_NPARTITIONS 11 DEFAULT_NPARTITIONS = n 12 13 14 def get_npartitions(): 15 return DEFAULT_NPARTITIONS 16 17 18 # We import these file after above two function 19 # because they depend on npartitions. 20 from .dataframe import DataFrame # noqa: 402 21 from .dataframe import from_pandas # noqa: 402 22 from .dataframe import to_pandas # noqa: 402 23 from .series import Series # noqa: 402 24 from .io import (read_csv, read_parquet) # noqa: 402 25 26 __all__ = [ 27 "DataFrame", "from_pandas", "to_pandas", "Series", "read_csv", 28 "read_parquet" 29 ] 30 31 try: 32 if threading.current_thread().name == "MainThread": 33 import ray 34 ray.init() 35 except AssertionError: 36 pass 37 [end of python/ray/dataframe/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py --- a/python/ray/dataframe/__init__.py +++ b/python/ray/dataframe/__init__.py @@ -1,8 +1,18 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function + +import pandas as pd import threading +pd_version = pd.__version__ +pd_major = int(pd_version.split(".")[0]) +pd_minor = int(pd_version.split(".")[1]) + +if pd_major == 0 and pd_minor < 22: + raise Exception("In order to use Pandas on Ray, please upgrade your Pandas" + " version to >= 0.22.") + DEFAULT_NPARTITIONS = 4
{"golden_diff": "diff --git a/python/ray/dataframe/__init__.py b/python/ray/dataframe/__init__.py\n--- a/python/ray/dataframe/__init__.py\n+++ b/python/ray/dataframe/__init__.py\n@@ -1,8 +1,18 @@\n from __future__ import absolute_import\n from __future__ import division\n from __future__ import print_function\n+\n+import pandas as pd\n import threading\n \n+pd_version = pd.__version__\n+pd_major = int(pd_version.split(\".\")[0])\n+pd_minor = int(pd_version.split(\".\")[1])\n+\n+if pd_major == 0 and pd_minor < 22:\n+ raise Exception(\"In order to use Pandas on Ray, please upgrade your Pandas\"\n+ \" version to >= 0.22.\")\n+\n DEFAULT_NPARTITIONS = 4\n", "issue": "[DataFrame] Error checking on Pandas version\nWe need better reporting for issues with the Pandas version on a user's system. \n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport threading\n\nDEFAULT_NPARTITIONS = 4\n\n\ndef set_npartition_default(n):\n global DEFAULT_NPARTITIONS\n DEFAULT_NPARTITIONS = n\n\n\ndef get_npartitions():\n return DEFAULT_NPARTITIONS\n\n\n# We import these file after above two function\n# because they depend on npartitions.\nfrom .dataframe import DataFrame # noqa: 402\nfrom .dataframe import from_pandas # noqa: 402\nfrom .dataframe import to_pandas # noqa: 402\nfrom .series import Series # noqa: 402\nfrom .io import (read_csv, read_parquet) # noqa: 402\n\n__all__ = [\n \"DataFrame\", \"from_pandas\", \"to_pandas\", \"Series\", \"read_csv\",\n \"read_parquet\"\n]\n\ntry:\n if threading.current_thread().name == \"MainThread\":\n import ray\n ray.init()\nexcept AssertionError:\n pass\n", "path": "python/ray/dataframe/__init__.py"}]}
878
182
gh_patches_debug_10727
rasdani/github-patches
git_diff
akvo__akvo-rsr-2712
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> react-clickoutside doesn't close the date-picker on clicking outside Created via Reamaze: Link: https://akvoo.reamaze.com/admin/conversations/bug-10 Assignee: Anthony Gonzalez React-clickoutside needs to load before the date-picker loads </issue> <code> [start of akvo/rsr/context_processors.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Akvo RSR is covered by the GNU Affero General Public License. 4 5 See more details in the license.txt file located at the root folder of the 6 Akvo RSR module. For additional details on the GNU license please see 7 < http://www.gnu.org/licenses/agpl.html >. 8 """ 9 10 import django 11 12 from django.conf import settings 13 from django.core.exceptions import DisallowedHost 14 from django.contrib.sites.models import get_current_site 15 16 17 def extra_context(request, protocol="http"): 18 """Add information to the request context.""" 19 try: 20 current_site = get_current_site(request) 21 except DisallowedHost: 22 current_site = None 23 24 django_version = django.get_version() 25 debug = getattr(settings, 'DEBUG', False) 26 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown') 27 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown') 28 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown') 29 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown') 30 31 return dict( 32 current_site=current_site, 33 django_version=django_version, 34 debug=debug, 35 deploy_tag=deploy_tag, 36 deploy_branch=deploy_branch, 37 deploy_commit_id=deploy_commit_id, 38 deploy_commit_full_id=deploy_commit_full_id 39 ) 40 41 42 def get_current_path_without_lang(request): 43 """Return current path without lang.""" 44 path = request.get_full_path() 45 path_bits = path.split('/') 46 path = '/'.join(path_bits[2:]) 47 return {'current_path_without_lang': path} 48 49 50 def extra_pages_context(request): 51 """Add context information of an RSR Page.""" 52 if request.rsr_page: 53 page = request.rsr_page 54 return { 55 'rsr_page': page, 56 'favicon': page.favicon, 57 'logo': page.logo, 58 'organisation': page.organisation, 59 'return_url': page.return_url, 60 'return_url_text': page.custom_return_url_text, 61 'stylesheet': page.stylesheet, 62 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN), 63 'domain_url': '//{}'.format(settings.RSR_DOMAIN), 64 'no_facebook': not page.facebook_button, 65 'facebook_app_id': page.facebook_app_id, 66 'no_twitter': not page.twitter_button, 67 } 68 69 return {} 70 [end of akvo/rsr/context_processors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py --- a/akvo/rsr/context_processors.py +++ b/akvo/rsr/context_processors.py @@ -58,7 +58,7 @@ 'organisation': page.organisation, 'return_url': page.return_url, 'return_url_text': page.custom_return_url_text, - 'stylesheet': page.stylesheet, + 'page_stylesheet': page.stylesheet, 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN), 'domain_url': '//{}'.format(settings.RSR_DOMAIN), 'no_facebook': not page.facebook_button,
{"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -58,7 +58,7 @@\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n- 'stylesheet': page.stylesheet,\n+ 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n", "issue": "react-clickoutside doesn't close the date-picker on clicking outside\nCreated via Reamaze:\r\n\r\nLink: https://akvoo.reamaze.com/admin/conversations/bug-10\r\nAssignee: Anthony Gonzalez\r\n\r\nReact-clickoutside needs to load before the date-picker loads\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]}
1,237
146
gh_patches_debug_9837
rasdani/github-patches
git_diff
StackStorm__st2-2489
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> action alias regex fails to evaluate null optional arguments when type is not string For example, something like: `update {{hostname}}( {{count}} times)?` Works for `!update myhost 2 times` Fails for `!update myhost` Error: `(invalid literal for int() with base 10: '')` So the workaround is to change the type of `count` from integer to string. Or @emedvedev provides another workaround: ``` - update {{ hostname }} {{ count }} times - update {{ hostname }} ``` Start from the most explicit. </issue> <code> [start of st2common/st2common/models/utils/action_alias_utils.py] 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import re 17 from st2common.exceptions import content 18 19 __all__ = [ 20 'ActionAliasFormatParser' 21 ] 22 23 24 class ActionAliasFormatParser(object): 25 26 def __init__(self, alias_format=None, param_stream=None): 27 self._format = alias_format or '' 28 self._param_stream = param_stream or '' 29 30 def get_extracted_param_value(self): 31 32 result = {} 33 34 # As there's a lot of questions about using regular expressions, 35 # I'll try to be thorough when documenting this code. 36 37 # We're parsing the arbitrary key-value pairs at the end of the stream 38 # to support passing of parameters not specified in the format string, 39 # and cutting them from the stream as they're no longer needed. 40 # Possible values are quoted strings, a word, or anything inside "{}". 41 pairs_match = r'(?:^|\s+)(\S+)=("(.*?)"|\'(.*?)\'|({.*?})|(\S+))' 42 extra = re.match(r'.*?((' + pairs_match + r'\s*)*)$', 43 self._param_stream, re.DOTALL) 44 if extra: 45 kv_pairs = re.findall(pairs_match, 46 extra.group(1), re.DOTALL) 47 self._param_stream = self._param_stream.replace(extra.group(1), '') 48 self._param_stream = " %s " % self._param_stream 49 50 # Now we'll match parameters with default values in form of 51 # {{ value = parameter }} (and all possible permutations of spaces), 52 # compiling them into a list. 53 # "test {{ url = http://google.com }} {{ extra = Test }}" will become 54 # [ ["url", "http://google.com"], ["extra", "Test"] ] 55 params = re.findall(r'{{\s*(.+?)\s*(?:=\s*[\'"]?({.+?}|.+?)[\'"]?)?\s*}}', 56 self._format, re.DOTALL) 57 58 # Now we're transforming our format string into a regular expression, 59 # substituting {{ ... }} with regex named groups, so that param_stream 60 # matched against this expression yields a dict of params with values. 61 param_match = r'["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?' 62 reg = re.sub(r'(\s*){{\s*([^=}]+?)\s*}}(?![\'"]?\s+}})', 63 r'\1' + param_match, 64 self._format) 65 reg = re.sub(r'(\s*){{\s*(\S+)\s*=\s*(?:{.+?}|.+?)\s*}}', 66 r'(?:\1' + param_match + r')?', 67 reg) 68 reg = re.sub(r'(\s*){{\s*(.+?)\s*}}', 69 r'\1' + param_match, 70 reg) 71 reg = '^\s*' + reg + r'\s*$' 72 73 # Now we're matching param_stream against our format string regex, 74 # getting a dict of values. We'll also get default values from 75 # "params" list if something is not present. 76 # Priority, from lowest to highest: 77 # 1. Default parameters 78 # 2. Matched parameters 79 # 3. Extra parameters 80 matched_stream = re.match(reg, self._param_stream, re.DOTALL) 81 if matched_stream: 82 values = matched_stream.groupdict() 83 for param in params: 84 matched_value = values[param[0]] if matched_stream else None 85 result[param[0]] = matched_value or param[1] 86 if extra: 87 for pair in kv_pairs: 88 result[pair[0]] = ''.join(pair[2:]) 89 90 if self._format and not (self._param_stream.strip() or any(result.values())): 91 raise content.ParseException('No value supplied and no default value found.') 92 93 return result 94 [end of st2common/st2common/models/utils/action_alias_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/st2common/st2common/models/utils/action_alias_utils.py b/st2common/st2common/models/utils/action_alias_utils.py --- a/st2common/st2common/models/utils/action_alias_utils.py +++ b/st2common/st2common/models/utils/action_alias_utils.py @@ -82,7 +82,9 @@ values = matched_stream.groupdict() for param in params: matched_value = values[param[0]] if matched_stream else None - result[param[0]] = matched_value or param[1] + matched_result = matched_value or param[1] + if matched_result: + result[param[0]] = matched_result if extra: for pair in kv_pairs: result[pair[0]] = ''.join(pair[2:])
{"golden_diff": "diff --git a/st2common/st2common/models/utils/action_alias_utils.py b/st2common/st2common/models/utils/action_alias_utils.py\n--- a/st2common/st2common/models/utils/action_alias_utils.py\n+++ b/st2common/st2common/models/utils/action_alias_utils.py\n@@ -82,7 +82,9 @@\n values = matched_stream.groupdict()\n for param in params:\n matched_value = values[param[0]] if matched_stream else None\n- result[param[0]] = matched_value or param[1]\n+ matched_result = matched_value or param[1]\n+ if matched_result:\n+ result[param[0]] = matched_result\n if extra:\n for pair in kv_pairs:\n result[pair[0]] = ''.join(pair[2:])\n", "issue": "action alias regex fails to evaluate null optional arguments when type is not string\nFor example, something like: `update {{hostname}}( {{count}} times)?` \nWorks for `!update myhost 2 times`\nFails for `!update myhost`\nError: `(invalid literal for int() with base 10: '')`\nSo the workaround is to change the type of `count` from integer to string.\nOr @emedvedev provides another workaround:\n\n```\n- update {{ hostname }} {{ count }} times\n- update {{ hostname }}\n```\n\nStart from the most explicit.\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom st2common.exceptions import content\n\n__all__ = [\n 'ActionAliasFormatParser'\n]\n\n\nclass ActionAliasFormatParser(object):\n\n def __init__(self, alias_format=None, param_stream=None):\n self._format = alias_format or ''\n self._param_stream = param_stream or ''\n\n def get_extracted_param_value(self):\n\n result = {}\n\n # As there's a lot of questions about using regular expressions,\n # I'll try to be thorough when documenting this code.\n\n # We're parsing the arbitrary key-value pairs at the end of the stream\n # to support passing of parameters not specified in the format string,\n # and cutting them from the stream as they're no longer needed.\n # Possible values are quoted strings, a word, or anything inside \"{}\".\n pairs_match = r'(?:^|\\s+)(\\S+)=(\"(.*?)\"|\\'(.*?)\\'|({.*?})|(\\S+))'\n extra = re.match(r'.*?((' + pairs_match + r'\\s*)*)$',\n self._param_stream, re.DOTALL)\n if extra:\n kv_pairs = re.findall(pairs_match,\n extra.group(1), re.DOTALL)\n self._param_stream = self._param_stream.replace(extra.group(1), '')\n self._param_stream = \" %s \" % self._param_stream\n\n # Now we'll match parameters with default values in form of\n # {{ value = parameter }} (and all possible permutations of spaces),\n # compiling them into a list.\n # \"test {{ url = http://google.com }} {{ extra = Test }}\" will become\n # [ [\"url\", \"http://google.com\"], [\"extra\", \"Test\"] ]\n params = re.findall(r'{{\\s*(.+?)\\s*(?:=\\s*[\\'\"]?({.+?}|.+?)[\\'\"]?)?\\s*}}',\n self._format, re.DOTALL)\n\n # Now we're transforming our format string into a regular expression,\n # substituting {{ ... }} with regex named groups, so that param_stream\n # matched against this expression yields a dict of params with values.\n param_match = r'[\"\\']?(?P<\\2>(?:(?<=\\').+?(?=\\')|(?<=\").+?(?=\")|{.+?}|.+?))[\"\\']?'\n reg = re.sub(r'(\\s*){{\\s*([^=}]+?)\\s*}}(?![\\'\"]?\\s+}})',\n r'\\1' + param_match,\n self._format)\n reg = re.sub(r'(\\s*){{\\s*(\\S+)\\s*=\\s*(?:{.+?}|.+?)\\s*}}',\n r'(?:\\1' + param_match + r')?',\n reg)\n reg = re.sub(r'(\\s*){{\\s*(.+?)\\s*}}',\n r'\\1' + param_match,\n reg)\n reg = '^\\s*' + reg + r'\\s*$'\n\n # Now we're matching param_stream against our format string regex,\n # getting a dict of values. We'll also get default values from\n # \"params\" list if something is not present.\n # Priority, from lowest to highest:\n # 1. Default parameters\n # 2. Matched parameters\n # 3. Extra parameters\n matched_stream = re.match(reg, self._param_stream, re.DOTALL)\n if matched_stream:\n values = matched_stream.groupdict()\n for param in params:\n matched_value = values[param[0]] if matched_stream else None\n result[param[0]] = matched_value or param[1]\n if extra:\n for pair in kv_pairs:\n result[pair[0]] = ''.join(pair[2:])\n\n if self._format and not (self._param_stream.strip() or any(result.values())):\n raise content.ParseException('No value supplied and no default value found.')\n\n return result\n", "path": "st2common/st2common/models/utils/action_alias_utils.py"}]}
1,892
171
gh_patches_debug_31347
rasdani/github-patches
git_diff
pre-commit__pre-commit-2641
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dotnet install fails for prefixed packages ### search you tried in the issue tracker dotnet tool ### describe your issue A bit of an oversight when constructing `tool_name` here: https://github.com/pre-commit/pre-commit/blob/cb0bcfd67fc35e91f7b2eca7e33bceda459dca77/pre_commit/languages/dotnet.py#L60-L63 E.g. ```console $ pre-commit try-repo https://github.com/rkm/sample-dotnet-tool [INFO] Initializing environment for https://github.com/rkm/sample-dotnet-tool. =============================================================================== Using config: =============================================================================== repos: - repo: https://github.com/rkm/sample-dotnet-tool rev: e53a3601bc06bb038dac30da813572291dd8d58f hooks: - id: sample-dotnet-tool =============================================================================== [INFO] Installing environment for https://github.com/rkm/sample-dotnet-tool. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... An unexpected error has occurred: CalledProcessError: command: ('/home/rkm/bin/dotnet', 'tool', 'install', '--tool-path', '/tmp/tmp6bk4v26x/repotefhurdg/dotnetenv-default/bin', '--add-source', 'pre-commit-build', 'Rkm') return code: 1 expected return code: 0 stdout: /tmp/1873db78-d0a7-48ba-bbff-10a7ef85a2a6/restore.csproj : error NU1101: Unable to find package rkm. No packages exist with this id in source(s): /tmp/tmp6bk4v26x/repotefhurdg/pre-commit-build, nuget.org stderr: The tool package could not be restored. Tool 'rkm' failed to install. This failure may have been caused by: * You are attempting to install a preview release and did not use the --version option to specify the version. * A package by this name was found, but it was not a .NET tool. * The required NuGet feed cannot be accessed, perhaps because of an Internet connection problem. * You mistyped the name of the tool. For more reasons, including package naming enforcement, visit https://aka.ms/failure-installing-tool Check the log at /home/rkm/.cache/pre-commit/pre-commit.log ``` ### pre-commit --version pre-commit 2.20.0 ### .pre-commit-config.yaml ```yaml repos: - repo: https://github.com/rkm/sample-dotnet-tool rev: e53a3601bc06bb038dac30da813572291dd8d58f hooks: - id: sample-dotnet-tool ``` ### ~/.cache/pre-commit/pre-commit.log (if present) _No response_ </issue> <code> [start of pre_commit/languages/dotnet.py] 1 from __future__ import annotations 2 3 import contextlib 4 import os.path 5 from typing import Generator 6 from typing import Sequence 7 8 import pre_commit.constants as C 9 from pre_commit.envcontext import envcontext 10 from pre_commit.envcontext import PatchesT 11 from pre_commit.envcontext import Var 12 from pre_commit.hook import Hook 13 from pre_commit.languages import helpers 14 from pre_commit.prefix import Prefix 15 from pre_commit.util import clean_path_on_failure 16 17 ENVIRONMENT_DIR = 'dotnetenv' 18 BIN_DIR = 'bin' 19 20 get_default_version = helpers.basic_get_default_version 21 health_check = helpers.basic_health_check 22 23 24 def get_env_patch(venv: str) -> PatchesT: 25 return ( 26 ('PATH', (os.path.join(venv, BIN_DIR), os.pathsep, Var('PATH'))), 27 ) 28 29 30 @contextlib.contextmanager 31 def in_env(prefix: Prefix) -> Generator[None, None, None]: 32 directory = helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT) 33 envdir = prefix.path(directory) 34 with envcontext(get_env_patch(envdir)): 35 yield 36 37 38 def install_environment( 39 prefix: Prefix, 40 version: str, 41 additional_dependencies: Sequence[str], 42 ) -> None: 43 helpers.assert_version_default('dotnet', version) 44 helpers.assert_no_additional_deps('dotnet', additional_dependencies) 45 46 envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version)) 47 with clean_path_on_failure(envdir): 48 build_dir = 'pre-commit-build' 49 50 # Build & pack nupkg file 51 helpers.run_setup_cmd( 52 prefix, 53 ( 54 'dotnet', 'pack', 55 '--configuration', 'Release', 56 '--output', build_dir, 57 ), 58 ) 59 60 # Determine tool from the packaged file <tool_name>.<version>.nupkg 61 build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir)) 62 for output in build_outputs: 63 tool_name = output.split('.')[0] 64 65 # Install to bin dir 66 helpers.run_setup_cmd( 67 prefix, 68 ( 69 'dotnet', 'tool', 'install', 70 '--tool-path', os.path.join(envdir, BIN_DIR), 71 '--add-source', build_dir, 72 tool_name, 73 ), 74 ) 75 76 # Clean the git dir, ignoring the environment dir 77 clean_cmd = ('git', 'clean', '-ffxd', '-e', f'{ENVIRONMENT_DIR}-*') 78 helpers.run_setup_cmd(prefix, clean_cmd) 79 80 81 def run_hook( 82 hook: Hook, 83 file_args: Sequence[str], 84 color: bool, 85 ) -> tuple[int, bytes]: 86 with in_env(hook.prefix): 87 return helpers.run_xargs(hook, hook.cmd, file_args, color=color) 88 [end of pre_commit/languages/dotnet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/dotnet.py b/pre_commit/languages/dotnet.py --- a/pre_commit/languages/dotnet.py +++ b/pre_commit/languages/dotnet.py @@ -2,6 +2,9 @@ import contextlib import os.path +import re +import xml.etree.ElementTree +import zipfile from typing import Generator from typing import Sequence @@ -57,10 +60,29 @@ ), ) - # Determine tool from the packaged file <tool_name>.<version>.nupkg - build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir)) - for output in build_outputs: - tool_name = output.split('.')[0] + nupkg_dir = prefix.path(build_dir) + nupkgs = [x for x in os.listdir(nupkg_dir) if x.endswith('.nupkg')] + + if not nupkgs: + raise AssertionError('could not find any build outputs to install') + + for nupkg in nupkgs: + with zipfile.ZipFile(os.path.join(nupkg_dir, nupkg)) as f: + nuspec, = (x for x in f.namelist() if x.endswith('.nuspec')) + with f.open(nuspec) as spec: + tree = xml.etree.ElementTree.parse(spec) + + namespace = re.match(r'{.*}', tree.getroot().tag) + if not namespace: + raise AssertionError('could not parse namespace from nuspec') + + tool_id_element = tree.find(f'.//{namespace[0]}id') + if tool_id_element is None: + raise AssertionError('expected to find an "id" element') + + tool_id = tool_id_element.text + if not tool_id: + raise AssertionError('"id" element missing tool name') # Install to bin dir helpers.run_setup_cmd( @@ -69,7 +91,7 @@ 'dotnet', 'tool', 'install', '--tool-path', os.path.join(envdir, BIN_DIR), '--add-source', build_dir, - tool_name, + tool_id, ), )
{"golden_diff": "diff --git a/pre_commit/languages/dotnet.py b/pre_commit/languages/dotnet.py\n--- a/pre_commit/languages/dotnet.py\n+++ b/pre_commit/languages/dotnet.py\n@@ -2,6 +2,9 @@\n \n import contextlib\n import os.path\n+import re\n+import xml.etree.ElementTree\n+import zipfile\n from typing import Generator\n from typing import Sequence\n \n@@ -57,10 +60,29 @@\n ),\n )\n \n- # Determine tool from the packaged file <tool_name>.<version>.nupkg\n- build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))\n- for output in build_outputs:\n- tool_name = output.split('.')[0]\n+ nupkg_dir = prefix.path(build_dir)\n+ nupkgs = [x for x in os.listdir(nupkg_dir) if x.endswith('.nupkg')]\n+\n+ if not nupkgs:\n+ raise AssertionError('could not find any build outputs to install')\n+\n+ for nupkg in nupkgs:\n+ with zipfile.ZipFile(os.path.join(nupkg_dir, nupkg)) as f:\n+ nuspec, = (x for x in f.namelist() if x.endswith('.nuspec'))\n+ with f.open(nuspec) as spec:\n+ tree = xml.etree.ElementTree.parse(spec)\n+\n+ namespace = re.match(r'{.*}', tree.getroot().tag)\n+ if not namespace:\n+ raise AssertionError('could not parse namespace from nuspec')\n+\n+ tool_id_element = tree.find(f'.//{namespace[0]}id')\n+ if tool_id_element is None:\n+ raise AssertionError('expected to find an \"id\" element')\n+\n+ tool_id = tool_id_element.text\n+ if not tool_id:\n+ raise AssertionError('\"id\" element missing tool name')\n \n # Install to bin dir\n helpers.run_setup_cmd(\n@@ -69,7 +91,7 @@\n 'dotnet', 'tool', 'install',\n '--tool-path', os.path.join(envdir, BIN_DIR),\n '--add-source', build_dir,\n- tool_name,\n+ tool_id,\n ),\n )\n", "issue": "dotnet install fails for prefixed packages\n### search you tried in the issue tracker\n\ndotnet tool\n\n### describe your issue\n\nA bit of an oversight when constructing `tool_name` here:\r\n\r\nhttps://github.com/pre-commit/pre-commit/blob/cb0bcfd67fc35e91f7b2eca7e33bceda459dca77/pre_commit/languages/dotnet.py#L60-L63\r\n\r\nE.g.\r\n\r\n```console\r\n$ pre-commit try-repo https://github.com/rkm/sample-dotnet-tool\r\n[INFO] Initializing environment for https://github.com/rkm/sample-dotnet-tool.\r\n===============================================================================\r\nUsing config:\r\n===============================================================================\r\nrepos:\r\n- repo: https://github.com/rkm/sample-dotnet-tool\r\n rev: e53a3601bc06bb038dac30da813572291dd8d58f\r\n hooks:\r\n - id: sample-dotnet-tool\r\n===============================================================================\r\n[INFO] Installing environment for https://github.com/rkm/sample-dotnet-tool.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: command: ('/home/rkm/bin/dotnet', 'tool', 'install', '--tool-path', '/tmp/tmp6bk4v26x/repotefhurdg/dotnetenv-default/bin', '--add-source', 'pre-commit-build', 'Rkm')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout:\r\n /tmp/1873db78-d0a7-48ba-bbff-10a7ef85a2a6/restore.csproj : error NU1101: Unable to find package rkm. No packages exist with this id in source(s): /tmp/tmp6bk4v26x/repotefhurdg/pre-commit-build, nuget.org\r\n\r\nstderr:\r\n The tool package could not be restored.\r\n Tool 'rkm' failed to install. This failure may have been caused by:\r\n\r\n * You are attempting to install a preview release and did not use the --version option to specify the version.\r\n * A package by this name was found, but it was not a .NET tool.\r\n * The required NuGet feed cannot be accessed, perhaps because of an Internet connection problem.\r\n * You mistyped the name of the tool.\r\n\r\n For more reasons, including package naming enforcement, visit https://aka.ms/failure-installing-tool\r\n\r\nCheck the log at /home/rkm/.cache/pre-commit/pre-commit.log\r\n```\n\n### pre-commit --version\n\npre-commit 2.20.0\n\n### .pre-commit-config.yaml\n\n```yaml\nrepos:\r\n- repo: https://github.com/rkm/sample-dotnet-tool\r\n rev: e53a3601bc06bb038dac30da813572291dd8d58f\r\n hooks:\r\n - id: sample-dotnet-tool\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport os.path\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\n\nENVIRONMENT_DIR = 'dotnetenv'\nBIN_DIR = 'bin'\n\nget_default_version = helpers.basic_get_default_version\nhealth_check = helpers.basic_health_check\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, BIN_DIR), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT)\n envdir = prefix.path(directory)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('dotnet', version)\n helpers.assert_no_additional_deps('dotnet', additional_dependencies)\n\n envdir = prefix.path(helpers.environment_dir(ENVIRONMENT_DIR, version))\n with clean_path_on_failure(envdir):\n build_dir = 'pre-commit-build'\n\n # Build & pack nupkg file\n helpers.run_setup_cmd(\n prefix,\n (\n 'dotnet', 'pack',\n '--configuration', 'Release',\n '--output', build_dir,\n ),\n )\n\n # Determine tool from the packaged file <tool_name>.<version>.nupkg\n build_outputs = os.listdir(os.path.join(prefix.prefix_dir, build_dir))\n for output in build_outputs:\n tool_name = output.split('.')[0]\n\n # Install to bin dir\n helpers.run_setup_cmd(\n prefix,\n (\n 'dotnet', 'tool', 'install',\n '--tool-path', os.path.join(envdir, BIN_DIR),\n '--add-source', build_dir,\n tool_name,\n ),\n )\n\n # Clean the git dir, ignoring the environment dir\n clean_cmd = ('git', 'clean', '-ffxd', '-e', f'{ENVIRONMENT_DIR}-*')\n helpers.run_setup_cmd(prefix, clean_cmd)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/dotnet.py"}]}
1,969
492
gh_patches_debug_32731
rasdani/github-patches
git_diff
microsoft__ptvsd-1454
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Stop on entry hangs the debugger ## Environment data - PTVSD version: master - OS and version: windows - Python version (& distribution if applicable, e.g. Anaconda): 3.6 - Using VS Code or Visual Studio: VS or VSC ## Actual behavior Note that you have to change the default for STOP_ON_ENTRY in wrapper.py#1198 to True. Repros almost always on VS, but infrequently on VSC. ## Expected behavior Should run and break on entry ## Steps to reproduce: For VS: 1. Change wrapper.py:1198 default for STOP_ON_ENTRY to True. 2. Use F5 to start the debugger. For VSC: 1. Set `stopOnEntry` in launch json. </issue> <code> [start of src/ptvsd/debugger.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 import sys 6 7 import ptvsd.log 8 from ptvsd._local import run_module, run_file, run_main 9 10 11 # TODO: not needed? 12 DONT_DEBUG = [] 13 14 LOCALHOST = 'localhost' 15 16 RUNNERS = { 17 'module': run_module, # python -m spam 18 'script': run_file, # python spam.py 19 'code': run_file, # python -c 'print("spam")' 20 None: run_file, # catchall 21 } 22 23 24 def debug(filename, port_num, debug_id, debug_options, run_as, 25 _runners=RUNNERS, _extra=None, *args, **kwargs): 26 27 ptvsd.log.to_file() 28 ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as)) 29 30 if _extra is None: 31 _extra = sys.argv[1:] 32 address = (LOCALHOST, port_num) 33 try: 34 run = _runners[run_as] 35 except KeyError: 36 # TODO: fail? 37 run = _runners[None] 38 if _extra: 39 args = _extra + list(args) 40 kwargs.setdefault('singlesession', True) 41 run(address, filename, *args, **kwargs) 42 43 44 def run(filename, port_num, run_as, 45 *args, **kwargs): 46 47 ptvsd.log.to_file() 48 ptvsd.log.info('run{0!r}', (filename, port_num, run_as)) 49 50 address = (LOCALHOST, port_num) 51 run_main(address, filename, run_as, *args, **kwargs) 52 [end of src/ptvsd/debugger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py --- a/src/ptvsd/debugger.py +++ b/src/ptvsd/debugger.py @@ -2,50 +2,38 @@ # Licensed under the MIT License. See LICENSE in the project root # for license information. -import sys - import ptvsd.log -from ptvsd._local import run_module, run_file, run_main - - -# TODO: not needed? -DONT_DEBUG = [] +import ptvsd.options +from ptvsd.__main__ import run_file, run_module, run_code -LOCALHOST = 'localhost' RUNNERS = { - 'module': run_module, # python -m spam - 'script': run_file, # python spam.py - 'code': run_file, # python -c 'print("spam")' - None: run_file, # catchall + 'module': run_module, + 'script': run_file, + 'code': run_code, } +# Not actually used, but VS will try to add entries to it. +DONT_DEBUG = [] -def debug(filename, port_num, debug_id, debug_options, run_as, - _runners=RUNNERS, _extra=None, *args, **kwargs): +# A legacy entrypoint for Visual Studio, to allow older versions to work with new ptvsd. +# All new code should use the entrypoints in __main__ directly. +def debug(filename, port_num, debug_id, debug_options, run_as): ptvsd.log.to_file() ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as)) - if _extra is None: - _extra = sys.argv[1:] - address = (LOCALHOST, port_num) try: - run = _runners[run_as] + run = RUNNERS[run_as] except KeyError: - # TODO: fail? - run = _runners[None] - if _extra: - args = _extra + list(args) - kwargs.setdefault('singlesession', True) - run(address, filename, *args, **kwargs) - + raise ValueError('run_as must be one of: {0!r}'.format(tuple(RUNNERS.keys()))) -def run(filename, port_num, run_as, - *args, **kwargs): + ptvsd.options.target_kind = 'file' if run_as == 'script' else run_as + ptvsd.options.target = filename + ptvsd.options.port = port_num + ptvsd.options.client = True - ptvsd.log.to_file() - ptvsd.log.info('run{0!r}', (filename, port_num, run_as)) + # debug_id is ignored because it has no meaning in DAP. + # debug_options are ignored, because they will be passed later via DAP "launch" request. - address = (LOCALHOST, port_num) - run_main(address, filename, run_as, *args, **kwargs) + run()
{"golden_diff": "diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py\n--- a/src/ptvsd/debugger.py\n+++ b/src/ptvsd/debugger.py\n@@ -2,50 +2,38 @@\n # Licensed under the MIT License. See LICENSE in the project root\n # for license information.\n \n-import sys\n-\n import ptvsd.log\n-from ptvsd._local import run_module, run_file, run_main\n-\n-\n-# TODO: not needed?\n-DONT_DEBUG = []\n+import ptvsd.options\n+from ptvsd.__main__ import run_file, run_module, run_code\n \n-LOCALHOST = 'localhost'\n \n RUNNERS = {\n- 'module': run_module, # python -m spam\n- 'script': run_file, # python spam.py\n- 'code': run_file, # python -c 'print(\"spam\")'\n- None: run_file, # catchall\n+ 'module': run_module,\n+ 'script': run_file,\n+ 'code': run_code,\n }\n \n+# Not actually used, but VS will try to add entries to it.\n+DONT_DEBUG = []\n \n-def debug(filename, port_num, debug_id, debug_options, run_as,\n- _runners=RUNNERS, _extra=None, *args, **kwargs):\n \n+# A legacy entrypoint for Visual Studio, to allow older versions to work with new ptvsd.\n+# All new code should use the entrypoints in __main__ directly.\n+def debug(filename, port_num, debug_id, debug_options, run_as):\n ptvsd.log.to_file()\n ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))\n \n- if _extra is None:\n- _extra = sys.argv[1:]\n- address = (LOCALHOST, port_num)\n try:\n- run = _runners[run_as]\n+ run = RUNNERS[run_as]\n except KeyError:\n- # TODO: fail?\n- run = _runners[None]\n- if _extra:\n- args = _extra + list(args)\n- kwargs.setdefault('singlesession', True)\n- run(address, filename, *args, **kwargs)\n-\n+ raise ValueError('run_as must be one of: {0!r}'.format(tuple(RUNNERS.keys())))\n \n-def run(filename, port_num, run_as,\n- *args, **kwargs):\n+ ptvsd.options.target_kind = 'file' if run_as == 'script' else run_as\n+ ptvsd.options.target = filename\n+ ptvsd.options.port = port_num\n+ ptvsd.options.client = True\n \n- ptvsd.log.to_file()\n- ptvsd.log.info('run{0!r}', (filename, port_num, run_as))\n+ # debug_id is ignored because it has no meaning in DAP.\n+ # debug_options are ignored, because they will be passed later via DAP \"launch\" request.\n \n- address = (LOCALHOST, port_num)\n- run_main(address, filename, run_as, *args, **kwargs)\n+ run()\n", "issue": "Stop on entry hangs the debugger\n## Environment data\r\n\r\n- PTVSD version: master\r\n- OS and version: windows\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.6\r\n- Using VS Code or Visual Studio: VS or VSC\r\n\r\n## Actual behavior\r\n\r\nNote that you have to change the default for STOP_ON_ENTRY in wrapper.py#1198 to True.\r\nRepros almost always on VS, but infrequently on VSC.\r\n\r\n## Expected behavior\r\n\r\nShould run and break on entry\r\n\r\n## Steps to reproduce:\r\nFor VS:\r\n1. Change wrapper.py:1198 default for STOP_ON_ENTRY to True.\r\n2. Use F5 to start the debugger.\r\n\r\nFor VSC:\r\n1. Set `stopOnEntry` in launch json.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nimport ptvsd.log\nfrom ptvsd._local import run_module, run_file, run_main\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n\n ptvsd.log.to_file()\n ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))\n\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n\n\ndef run(filename, port_num, run_as,\n *args, **kwargs):\n\n ptvsd.log.to_file()\n ptvsd.log.info('run{0!r}', (filename, port_num, run_as))\n\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "path": "src/ptvsd/debugger.py"}]}
1,191
707
gh_patches_debug_32318
rasdani/github-patches
git_diff
opsdroid__opsdroid-210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add case-insensitive kwarg to the regex matcher It would be nice to specify case insensitive matching in the regex matcher. e.g ```python @match_regex(r'myregex', case_sensitive=False) async def myskill(opsdroid, config, message): pass ``` </issue> <code> [start of opsdroid/parsers/regex.py] 1 """A helper function for parsing and executing regex skills.""" 2 3 import logging 4 import re 5 6 7 _LOGGER = logging.getLogger(__name__) 8 9 10 async def parse_regex(opsdroid, message): 11 """Parse a message against all regex skills.""" 12 # pylint: disable=broad-except 13 # We want to catch all exceptions coming from a skill module and not 14 # halt the application. If a skill throws an exception it just doesn't 15 # give a response to the user, so an error response should be given. 16 for skill in opsdroid.skills: 17 if "regex" in skill: 18 regex = re.match(skill["regex"], message.text) 19 if regex: 20 message.regex = regex 21 try: 22 await skill["skill"](opsdroid, skill["config"], message) 23 except Exception: 24 await message.respond( 25 "Whoops there has been an error") 26 await message.respond( 27 "Check the log for details") 28 _LOGGER.exception("Exception when parsing '" + 29 message.text + 30 "' against skill '" + 31 skill["regex"] + "'") 32 [end of opsdroid/parsers/regex.py] [start of opsdroid/matchers.py] 1 """Decorator functions to use when creating skill modules.""" 2 3 import logging 4 5 from opsdroid.helper import get_opsdroid 6 from opsdroid.web import Web 7 8 9 _LOGGER = logging.getLogger(__name__) 10 11 12 def match_regex(regex): 13 """Return regex match decorator.""" 14 def matcher(func): 15 """Add decorated function to skills list for regex matching.""" 16 opsdroid = get_opsdroid() 17 opsdroid.skills.append({"regex": regex, "skill": func, 18 "config": 19 opsdroid.loader.current_import_config}) 20 return func 21 return matcher 22 23 24 def match_apiai_action(action): 25 """Return apiai action match decorator.""" 26 def matcher(func): 27 """Add decorated function to skills list for apiai matching.""" 28 opsdroid = get_opsdroid() 29 opsdroid.skills.append({"apiai_action": action, "skill": func, 30 "config": 31 opsdroid.loader.current_import_config}) 32 return func 33 return matcher 34 35 36 def match_apiai_intent(intent): 37 """Return apiai intent match decorator.""" 38 def matcher(func): 39 """Add decorated function to skills list for apiai matching.""" 40 opsdroid = get_opsdroid() 41 opsdroid.skills.append({"apiai_intent": intent, "skill": func, 42 "config": 43 opsdroid.loader.current_import_config}) 44 return func 45 return matcher 46 47 48 def match_crontab(crontab, timezone=None): 49 """Return crontab match decorator.""" 50 def matcher(func): 51 """Add decorated function to skills list for crontab matching.""" 52 opsdroid = get_opsdroid() 53 config = opsdroid.loader.current_import_config 54 opsdroid.skills.append({"crontab": crontab, "skill": func, 55 "config": config, "timezone": timezone}) 56 return func 57 return matcher 58 59 60 def match_webhook(webhook): 61 """Return webhook match decorator.""" 62 def matcher(func): 63 """Add decorated function to skills list for webhook matching.""" 64 opsdroid = get_opsdroid() 65 config = opsdroid.loader.current_import_config 66 opsdroid.skills.append({"webhook": webhook, "skill": func, 67 "config": config}) 68 69 async def wrapper(req, opsdroid=opsdroid, config=config): 70 """Wrap up the aiohttp handler.""" 71 _LOGGER.info("Running skill %s via webhook", webhook) 72 opsdroid.stats["webhooks_called"] = \ 73 opsdroid.stats["webhooks_called"] + 1 74 await func(opsdroid, config, req) 75 return Web.build_response(200, {"called_skill": webhook}) 76 77 opsdroid.web_server.web_app.router.add_post( 78 "/skill/{}/{}".format(config["name"], webhook), wrapper) 79 opsdroid.web_server.web_app.router.add_post( 80 "/skill/{}/{}/".format(config["name"], webhook), wrapper) 81 82 return func 83 return matcher 84 [end of opsdroid/matchers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py --- a/opsdroid/matchers.py +++ b/opsdroid/matchers.py @@ -9,12 +9,14 @@ _LOGGER = logging.getLogger(__name__) -def match_regex(regex): +def match_regex(regex, case_sensitive=True): """Return regex match decorator.""" def matcher(func): """Add decorated function to skills list for regex matching.""" opsdroid = get_opsdroid() - opsdroid.skills.append({"regex": regex, "skill": func, + opsdroid.skills.append({"regex": {"expression": regex, + "case_sensitive": case_sensitive}, + "skill": func, "config": opsdroid.loader.current_import_config}) return func diff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py --- a/opsdroid/parsers/regex.py +++ b/opsdroid/parsers/regex.py @@ -15,7 +15,12 @@ # give a response to the user, so an error response should be given. for skill in opsdroid.skills: if "regex" in skill: - regex = re.match(skill["regex"], message.text) + if skill["regex"]["case_sensitive"]: + regex = re.match(skill["regex"]["expression"], + message.text) + else: + regex = re.match(skill["regex"]["expression"], + message.text, re.IGNORECASE) if regex: message.regex = regex try: @@ -28,4 +33,4 @@ _LOGGER.exception("Exception when parsing '" + message.text + "' against skill '" + - skill["regex"] + "'") + skill["regex"]["expression"] + "'")
{"golden_diff": "diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py\n--- a/opsdroid/matchers.py\n+++ b/opsdroid/matchers.py\n@@ -9,12 +9,14 @@\n _LOGGER = logging.getLogger(__name__)\n \n \n-def match_regex(regex):\n+def match_regex(regex, case_sensitive=True):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n- opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n+ opsdroid.skills.append({\"regex\": {\"expression\": regex,\n+ \"case_sensitive\": case_sensitive},\n+ \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\ndiff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py\n--- a/opsdroid/parsers/regex.py\n+++ b/opsdroid/parsers/regex.py\n@@ -15,7 +15,12 @@\n # give a response to the user, so an error response should be given.\n for skill in opsdroid.skills:\n if \"regex\" in skill:\n- regex = re.match(skill[\"regex\"], message.text)\n+ if skill[\"regex\"][\"case_sensitive\"]:\n+ regex = re.match(skill[\"regex\"][\"expression\"],\n+ message.text)\n+ else:\n+ regex = re.match(skill[\"regex\"][\"expression\"],\n+ message.text, re.IGNORECASE)\n if regex:\n message.regex = regex\n try:\n@@ -28,4 +33,4 @@\n _LOGGER.exception(\"Exception when parsing '\" +\n message.text +\n \"' against skill '\" +\n- skill[\"regex\"] + \"'\")\n+ skill[\"regex\"][\"expression\"] + \"'\")\n", "issue": "Add case-insensitive kwarg to the regex matcher\nIt would be nice to specify case insensitive matching in the regex matcher.\r\n\r\ne.g\r\n```python\r\n@match_regex(r'myregex', case_sensitive=False)\r\nasync def myskill(opsdroid, config, message):\r\n pass\r\n```\n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing regex skills.\"\"\"\n\nimport logging\nimport re\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_regex(opsdroid, message):\n \"\"\"Parse a message against all regex skills.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n for skill in opsdroid.skills:\n if \"regex\" in skill:\n regex = re.match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], message)\n except Exception:\n await message.respond(\n \"Whoops there has been an error\")\n await message.respond(\n \"Check the log for details\")\n _LOGGER.exception(\"Exception when parsing '\" +\n message.text +\n \"' against skill '\" +\n skill[\"regex\"] + \"'\")\n", "path": "opsdroid/parsers/regex.py"}, {"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}]}
1,703
405
gh_patches_debug_14244
rasdani/github-patches
git_diff
scrapy__scrapy-1188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change the scrapy short description The Scrapy short description says: > Scrapy, a fast high-level screen scraping and web crawling framework. I think would be better: > Scrapy, a fast high-level web crawling and screen scraping framework. Because it highlights first its difference with simple screen scraping tools (i.e. Nokogiri. Mechanize, etc). Screen scraping can be done even with curl and grep, but I don't think you could do web crawling with such simple tools. Perhaps this can be an alternative: > Scrapy, a fast and scalable web crawling and screen scraping framework. Also the term "web data mining" can be a good fit for Scrapy (along with Scrapely and similar tools) and help to shape its roadmap. </issue> <code> [start of setup.py] 1 from os.path import dirname, join 2 from setuptools import setup, find_packages 3 4 5 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f: 6 version = f.read().decode('ascii').strip() 7 8 9 setup( 10 name='Scrapy', 11 version=version, 12 url='http://scrapy.org', 13 description='A high-level Web Crawling and Screen Scraping framework', 14 long_description=open('README.rst').read(), 15 author='Scrapy developers', 16 maintainer='Pablo Hoffman', 17 maintainer_email='[email protected]', 18 license='BSD', 19 packages=find_packages(exclude=('tests', 'tests.*')), 20 include_package_data=True, 21 zip_safe=False, 22 entry_points={ 23 'console_scripts': ['scrapy = scrapy.cmdline:execute'] 24 }, 25 classifiers=[ 26 'Framework :: Scrapy', 27 'Development Status :: 5 - Production/Stable', 28 'Environment :: Console', 29 'Intended Audience :: Developers', 30 'License :: OSI Approved :: BSD License', 31 'Operating System :: OS Independent', 32 'Programming Language :: Python', 33 'Programming Language :: Python :: 2', 34 'Programming Language :: Python :: 2.7', 35 'Topic :: Internet :: WWW/HTTP', 36 'Topic :: Software Development :: Libraries :: Application Frameworks', 37 'Topic :: Software Development :: Libraries :: Python Modules', 38 ], 39 install_requires=[ 40 'Twisted>=10.0.0', 41 'w3lib>=1.8.0', 42 'queuelib', 43 'lxml', 44 'pyOpenSSL', 45 'cssselect>=0.9', 46 'six>=1.5.2', 47 ], 48 ) 49 [end of setup.py] [start of scrapy/__init__.py] 1 """ 2 Scrapy - a web crawling and screen scraping framework written for Python 3 """ 4 5 __all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version', 6 'Spider', 'Request', 'FormRequest', 'Selector', 'Item', 'Field'] 7 8 # Scrapy version 9 import pkgutil 10 __version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip() 11 version_info = tuple(int(v) if v.isdigit() else v 12 for v in __version__.split('.')) 13 del pkgutil 14 15 # Check minimum required Python version 16 import sys 17 if sys.version_info < (2, 7): 18 print("Scrapy %s requires Python 2.7" % __version__) 19 sys.exit(1) 20 21 # Ignore noisy twisted deprecation warnings 22 import warnings 23 warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted') 24 del warnings 25 26 # Apply monkey patches to fix issues in external libraries 27 from . import _monkeypatches 28 del _monkeypatches 29 30 # WARNING: optional_features set is deprecated and will be removed soon. Do not use. 31 optional_features = set() 32 # TODO: backwards compatibility, remove for Scrapy 0.20 33 optional_features.add('ssl') 34 try: 35 import boto 36 del boto 37 except ImportError: 38 pass 39 else: 40 optional_features.add('boto') 41 42 from twisted import version as _txv 43 twisted_version = (_txv.major, _txv.minor, _txv.micro) 44 if twisted_version >= (11, 1, 0): 45 optional_features.add('http11') 46 47 # Declare top-level shortcuts 48 from scrapy.spider import Spider 49 from scrapy.http import Request, FormRequest 50 from scrapy.selector import Selector 51 from scrapy.item import Item, Field 52 53 del sys 54 [end of scrapy/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/__init__.py b/scrapy/__init__.py --- a/scrapy/__init__.py +++ b/scrapy/__init__.py @@ -1,5 +1,5 @@ """ -Scrapy - a web crawling and screen scraping framework written for Python +Scrapy - a web crawling and web scraping framework written for Python """ __all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version', diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ name='Scrapy', version=version, url='http://scrapy.org', - description='A high-level Web Crawling and Screen Scraping framework', + description='A high-level Web Crawling and Web Scraping framework', long_description=open('README.rst').read(), author='Scrapy developers', maintainer='Pablo Hoffman',
{"golden_diff": "diff --git a/scrapy/__init__.py b/scrapy/__init__.py\n--- a/scrapy/__init__.py\n+++ b/scrapy/__init__.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Scrapy - a web crawling and screen scraping framework written for Python\n+Scrapy - a web crawling and web scraping framework written for Python\n \"\"\"\n \n __all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n name='Scrapy',\n version=version,\n url='http://scrapy.org',\n- description='A high-level Web Crawling and Screen Scraping framework',\n+ description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n", "issue": "Change the scrapy short description\nThe Scrapy short description says:\n\n> Scrapy, a fast high-level screen scraping and web crawling framework.\n\nI think would be better:\n\n> Scrapy, a fast high-level web crawling and screen scraping framework.\n\nBecause it highlights first its difference with simple screen scraping tools (i.e. Nokogiri. Mechanize, etc).\n\nScreen scraping can be done even with curl and grep, but I don't think you could do web crawling with such simple tools.\n\nPerhaps this can be an alternative:\n\n> Scrapy, a fast and scalable web crawling and screen scraping framework.\n\nAlso the term \"web data mining\" can be a good fit for Scrapy (along with Scrapely and similar tools) and help to shape its roadmap.\n\n", "before_files": [{"content": "from os.path import dirname, join\nfrom setuptools import setup, find_packages\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='http://scrapy.org',\n description='A high-level Web Crawling and Screen Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'Twisted>=10.0.0',\n 'w3lib>=1.8.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n ],\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and screen scraping framework written for Python\n\"\"\"\n\n__all__ = ['__version__', 'version_info', 'optional_features', 'twisted_version',\n 'Spider', 'Request', 'FormRequest', 'Selector', 'Item', 'Field']\n\n# Scrapy version\nimport pkgutil\n__version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()\nversion_info = tuple(int(v) if v.isdigit() else v\n for v in __version__.split('.'))\ndel pkgutil\n\n# Check minimum required Python version\nimport sys\nif sys.version_info < (2, 7):\n print(\"Scrapy %s requires Python 2.7\" % __version__)\n sys.exit(1)\n\n# Ignore noisy twisted deprecation warnings\nimport warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\ndel warnings\n\n# Apply monkey patches to fix issues in external libraries\nfrom . import _monkeypatches\ndel _monkeypatches\n\n# WARNING: optional_features set is deprecated and will be removed soon. Do not use.\noptional_features = set()\n# TODO: backwards compatibility, remove for Scrapy 0.20\noptional_features.add('ssl')\ntry:\n import boto\n del boto\nexcept ImportError:\n pass\nelse:\n optional_features.add('boto')\n\nfrom twisted import version as _txv\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\nif twisted_version >= (11, 1, 0):\n optional_features.add('http11')\n\n# Declare top-level shortcuts\nfrom scrapy.spider import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\ndel sys\n", "path": "scrapy/__init__.py"}]}
1,638
211
gh_patches_debug_59247
rasdani/github-patches
git_diff
projectmesa__mesa-1860
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mesa.visualization.chartmodule doesn't work As shown in the picture, I run the boltzmann_wealth_model in the mesa example, but the line chart is not displayed normally. Can anyone help me? <img width="788" alt="屏幕截图 2023-11-04 183542" src="https://github.com/projectmesa/mesa/assets/75169342/89ba1b20-4011-471b-909e-5fea97da6b73"> </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import re 3 from codecs import open 4 5 from setuptools import find_packages, setup 6 7 requires = [ 8 "click", 9 "cookiecutter", 10 "matplotlib", 11 "mesa_viz_tornado", 12 "networkx", 13 "numpy", 14 "pandas", 15 "solara", 16 "tqdm", 17 ] 18 19 extras_require = { 20 "dev": [ 21 "black", 22 "ruff~=0.1.1", # Update periodically 23 "coverage", 24 "pytest >= 4.6", 25 "pytest-cov", 26 "sphinx", 27 ], 28 # Explicitly install ipykernel for Python 3.8. 29 # See https://stackoverflow.com/questions/28831854/how-do-i-add-python3-kernel-to-jupyter-ipython 30 # Could be removed in the future 31 "docs": [ 32 "sphinx", 33 "ipython", 34 "ipykernel", 35 "pydata_sphinx_theme", 36 "seaborn", 37 "myst-nb", 38 ], 39 } 40 41 version = "" 42 with open("mesa/__init__.py") as fd: 43 version = re.search( 44 r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE 45 ).group(1) 46 47 with open("README.rst", "rb", encoding="utf-8") as f: 48 readme = f.read() 49 50 51 setup( 52 name="Mesa", 53 version=version, 54 description="Agent-based modeling (ABM) in Python 3+", 55 long_description=readme, 56 author="Project Mesa Team", 57 author_email="[email protected]", 58 url="https://github.com/projectmesa/mesa", 59 packages=find_packages(), 60 package_data={ 61 "cookiecutter-mesa": ["cookiecutter-mesa/*"], 62 }, 63 include_package_data=True, 64 install_requires=requires, 65 extras_require=extras_require, 66 keywords="agent based modeling model ABM simulation multi-agent", 67 license="Apache 2.0", 68 zip_safe=False, 69 classifiers=[ 70 "Topic :: Scientific/Engineering", 71 "Topic :: Scientific/Engineering :: Artificial Life", 72 "Topic :: Scientific/Engineering :: Artificial Intelligence", 73 "Intended Audience :: Science/Research", 74 "Programming Language :: Python :: 3 :: Only", 75 "Programming Language :: Python :: 3.8", 76 "Programming Language :: Python :: 3.9", 77 "Programming Language :: Python :: 3.10", 78 "License :: OSI Approved :: Apache Software License", 79 "Operating System :: OS Independent", 80 "Development Status :: 3 - Alpha", 81 "Natural Language :: English", 82 ], 83 entry_points=""" 84 [console_scripts] 85 mesa=mesa.main:cli 86 """, 87 python_requires=">=3.8", 88 ) 89 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ "click", "cookiecutter", "matplotlib", - "mesa_viz_tornado", + "mesa_viz_tornado~=0.1.0,>=0.1.2", "networkx", "numpy", "pandas",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,7 +8,7 @@\n \"click\",\n \"cookiecutter\",\n \"matplotlib\",\n- \"mesa_viz_tornado\",\n+ \"mesa_viz_tornado~=0.1.0,>=0.1.2\",\n \"networkx\",\n \"numpy\",\n \"pandas\",\n", "issue": "mesa.visualization.chartmodule doesn't work\nAs shown in the picture, I run the boltzmann_wealth_model in the mesa example, but the line chart is not displayed normally. Can anyone help me?\r\n<img width=\"788\" alt=\"\u5c4f\u5e55\u622a\u56fe 2023-11-04 183542\" src=\"https://github.com/projectmesa/mesa/assets/75169342/89ba1b20-4011-471b-909e-5fea97da6b73\">\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\nrequires = [\n \"click\",\n \"cookiecutter\",\n \"matplotlib\",\n \"mesa_viz_tornado\",\n \"networkx\",\n \"numpy\",\n \"pandas\",\n \"solara\",\n \"tqdm\",\n]\n\nextras_require = {\n \"dev\": [\n \"black\",\n \"ruff~=0.1.1\", # Update periodically\n \"coverage\",\n \"pytest >= 4.6\",\n \"pytest-cov\",\n \"sphinx\",\n ],\n # Explicitly install ipykernel for Python 3.8.\n # See https://stackoverflow.com/questions/28831854/how-do-i-add-python3-kernel-to-jupyter-ipython\n # Could be removed in the future\n \"docs\": [\n \"sphinx\",\n \"ipython\",\n \"ipykernel\",\n \"pydata_sphinx_theme\",\n \"seaborn\",\n \"myst-nb\",\n ],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]}
1,451
92
gh_patches_debug_11847
rasdani/github-patches
git_diff
CTFd__CTFd-2030
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SubmissionSchema needs more nested fields I'm having trouble accessing a user's name from a SubmissionSchema dump. This is probably because we need more Nested Fields on the Schema in addition to just the nested challenge schema. </issue> <code> [start of CTFd/schemas/submissions.py] 1 from marshmallow import fields 2 3 from CTFd.models import Submissions, ma 4 from CTFd.schemas.challenges import ChallengeSchema 5 from CTFd.utils import string_types 6 7 8 class SubmissionSchema(ma.ModelSchema): 9 challenge = fields.Nested(ChallengeSchema, only=["name", "category", "value"]) 10 11 class Meta: 12 model = Submissions 13 include_fk = True 14 dump_only = ("id",) 15 16 views = { 17 "admin": [ 18 "provided", 19 "ip", 20 "challenge_id", 21 "challenge", 22 "user", 23 "team", 24 "date", 25 "type", 26 "id", 27 ], 28 "user": ["challenge_id", "challenge", "user", "team", "date", "type", "id"], 29 } 30 31 def __init__(self, view=None, *args, **kwargs): 32 if view: 33 if isinstance(view, string_types): 34 kwargs["only"] = self.views[view] 35 elif isinstance(view, list): 36 kwargs["only"] = view 37 38 super(SubmissionSchema, self).__init__(*args, **kwargs) 39 [end of CTFd/schemas/submissions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/schemas/submissions.py b/CTFd/schemas/submissions.py --- a/CTFd/schemas/submissions.py +++ b/CTFd/schemas/submissions.py @@ -2,11 +2,15 @@ from CTFd.models import Submissions, ma from CTFd.schemas.challenges import ChallengeSchema +from CTFd.schemas.teams import TeamSchema +from CTFd.schemas.users import UserSchema from CTFd.utils import string_types class SubmissionSchema(ma.ModelSchema): - challenge = fields.Nested(ChallengeSchema, only=["name", "category", "value"]) + challenge = fields.Nested(ChallengeSchema, only=["id", "name", "category", "value"]) + user = fields.Nested(UserSchema, only=["id", "name"]) + team = fields.Nested(TeamSchema, only=["id", "name"]) class Meta: model = Submissions
{"golden_diff": "diff --git a/CTFd/schemas/submissions.py b/CTFd/schemas/submissions.py\n--- a/CTFd/schemas/submissions.py\n+++ b/CTFd/schemas/submissions.py\n@@ -2,11 +2,15 @@\n \n from CTFd.models import Submissions, ma\n from CTFd.schemas.challenges import ChallengeSchema\n+from CTFd.schemas.teams import TeamSchema\n+from CTFd.schemas.users import UserSchema\n from CTFd.utils import string_types\n \n \n class SubmissionSchema(ma.ModelSchema):\n- challenge = fields.Nested(ChallengeSchema, only=[\"name\", \"category\", \"value\"])\n+ challenge = fields.Nested(ChallengeSchema, only=[\"id\", \"name\", \"category\", \"value\"])\n+ user = fields.Nested(UserSchema, only=[\"id\", \"name\"])\n+ team = fields.Nested(TeamSchema, only=[\"id\", \"name\"])\n \n class Meta:\n model = Submissions\n", "issue": "SubmissionSchema needs more nested fields\nI'm having trouble accessing a user's name from a SubmissionSchema dump. This is probably because we need more Nested Fields on the Schema in addition to just the nested challenge schema. \n", "before_files": [{"content": "from marshmallow import fields\n\nfrom CTFd.models import Submissions, ma\nfrom CTFd.schemas.challenges import ChallengeSchema\nfrom CTFd.utils import string_types\n\n\nclass SubmissionSchema(ma.ModelSchema):\n challenge = fields.Nested(ChallengeSchema, only=[\"name\", \"category\", \"value\"])\n\n class Meta:\n model = Submissions\n include_fk = True\n dump_only = (\"id\",)\n\n views = {\n \"admin\": [\n \"provided\",\n \"ip\",\n \"challenge_id\",\n \"challenge\",\n \"user\",\n \"team\",\n \"date\",\n \"type\",\n \"id\",\n ],\n \"user\": [\"challenge_id\", \"challenge\", \"user\", \"team\", \"date\", \"type\", \"id\"],\n }\n\n def __init__(self, view=None, *args, **kwargs):\n if view:\n if isinstance(view, string_types):\n kwargs[\"only\"] = self.views[view]\n elif isinstance(view, list):\n kwargs[\"only\"] = view\n\n super(SubmissionSchema, self).__init__(*args, **kwargs)\n", "path": "CTFd/schemas/submissions.py"}]}
897
215
gh_patches_debug_24458
rasdani/github-patches
git_diff
chainer__chainer-4108
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support optimization for rsqrt CUDA provides `rsqrt` function, which is faster than doing `1.0 / sqrt(x)`. It is better to provide custom kernel in Chainer or directly support in CuPy. (Note that NumPy does not provide `rsqrt`) </issue> <code> [start of chainer/functions/math/sqrt.py] 1 from chainer.backends import cuda 2 from chainer import function_node 3 from chainer import utils 4 from chainer.utils import type_check 5 6 7 class Sqrt(function_node.FunctionNode): 8 9 @property 10 def label(self): 11 return 'sqrt' 12 13 def check_type_forward(self, in_types): 14 type_check.expect( 15 in_types.size() == 1, 16 in_types[0].dtype.kind == 'f', 17 ) 18 19 def forward(self, x): 20 self.retain_outputs((0,)) 21 xp = cuda.get_array_module(*x) 22 return utils.force_array(xp.sqrt(x[0], dtype=x[0].dtype)), 23 24 def backward(self, indexes, grad_outputs): 25 gx = self.get_retained_outputs()[0] 26 gy = grad_outputs[0] 27 return gy / (gx * 2.0), 28 29 30 def sqrt(x): 31 """Elementwise square root function. 32 33 .. math:: 34 y_i = \\sqrt x_i. 35 36 If the value of :math:`x_i` is negative, it returns ``Nan`` for :math:`y_i` 37 respect to underlying numpy and cupy specification. 38 39 Args: 40 x (~chainer.Variable): Input variable. 41 42 Returns: 43 ~chainer.Variable: Output variable. 44 """ 45 return Sqrt().apply((x,))[0] 46 47 48 def rsqrt(x): 49 """Computes elementwise reciprocal of square root of input :math:`x_i`. 50 51 .. math:: 52 y_i = {1 \\over \\sqrt x_i}. 53 54 Args: 55 x (~chainer.Variable): Input variable. 56 57 Returns: 58 ~chainer.Variable: Output variable. 59 60 .. seealso:: :func:`~chainer.functions.sqrt` 61 """ 62 return 1.0 / sqrt(x) 63 [end of chainer/functions/math/sqrt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/math/sqrt.py b/chainer/functions/math/sqrt.py --- a/chainer/functions/math/sqrt.py +++ b/chainer/functions/math/sqrt.py @@ -1,3 +1,5 @@ +import numpy + from chainer.backends import cuda from chainer import function_node from chainer import utils @@ -27,6 +29,36 @@ return gy / (gx * 2.0), +class Rsqrt(function_node.FunctionNode): + + @property + def label(self): + return 'rsqrt' + + def check_type_forward(self, in_types): + type_check.expect( + in_types.size() == 1, + in_types[0].dtype.kind == 'f', + ) + + def forward(self, inputs): + self.retain_inputs((0,)) + x, = inputs + xp = cuda.get_array_module(x) + dtype = x.dtype + if xp is numpy: + out = xp.reciprocal(xp.sqrt(x, dtype=dtype), dtype=dtype) + else: + # CuPy provides `rsqrt` which is faster than `1.0 / sqrt(x)`. + out = cuda.cupyx.rsqrt(x, dtype=dtype) + return utils.force_array(out), + + def backward(self, indexes, grad_outputs): + x, = self.get_retained_inputs() + gy, = grad_outputs + return gy * (x ** -1.5) * -0.5, + + def sqrt(x): """Elementwise square root function. @@ -59,4 +91,4 @@ .. seealso:: :func:`~chainer.functions.sqrt` """ - return 1.0 / sqrt(x) + return Rsqrt().apply((x,))[0]
{"golden_diff": "diff --git a/chainer/functions/math/sqrt.py b/chainer/functions/math/sqrt.py\n--- a/chainer/functions/math/sqrt.py\n+++ b/chainer/functions/math/sqrt.py\n@@ -1,3 +1,5 @@\n+import numpy\n+\n from chainer.backends import cuda\n from chainer import function_node\n from chainer import utils\n@@ -27,6 +29,36 @@\n return gy / (gx * 2.0),\n \n \n+class Rsqrt(function_node.FunctionNode):\n+\n+ @property\n+ def label(self):\n+ return 'rsqrt'\n+\n+ def check_type_forward(self, in_types):\n+ type_check.expect(\n+ in_types.size() == 1,\n+ in_types[0].dtype.kind == 'f',\n+ )\n+\n+ def forward(self, inputs):\n+ self.retain_inputs((0,))\n+ x, = inputs\n+ xp = cuda.get_array_module(x)\n+ dtype = x.dtype\n+ if xp is numpy:\n+ out = xp.reciprocal(xp.sqrt(x, dtype=dtype), dtype=dtype)\n+ else:\n+ # CuPy provides `rsqrt` which is faster than `1.0 / sqrt(x)`.\n+ out = cuda.cupyx.rsqrt(x, dtype=dtype)\n+ return utils.force_array(out),\n+\n+ def backward(self, indexes, grad_outputs):\n+ x, = self.get_retained_inputs()\n+ gy, = grad_outputs\n+ return gy * (x ** -1.5) * -0.5,\n+\n+\n def sqrt(x):\n \"\"\"Elementwise square root function.\n \n@@ -59,4 +91,4 @@\n \n .. seealso:: :func:`~chainer.functions.sqrt`\n \"\"\"\n- return 1.0 / sqrt(x)\n+ return Rsqrt().apply((x,))[0]\n", "issue": "Support optimization for rsqrt\nCUDA provides `rsqrt` function, which is faster than doing `1.0 / sqrt(x)`.\r\nIt is better to provide custom kernel in Chainer or directly support in CuPy.\r\n(Note that NumPy does not provide `rsqrt`)\n", "before_files": [{"content": "from chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer import utils\nfrom chainer.utils import type_check\n\n\nclass Sqrt(function_node.FunctionNode):\n\n @property\n def label(self):\n return 'sqrt'\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype.kind == 'f',\n )\n\n def forward(self, x):\n self.retain_outputs((0,))\n xp = cuda.get_array_module(*x)\n return utils.force_array(xp.sqrt(x[0], dtype=x[0].dtype)),\n\n def backward(self, indexes, grad_outputs):\n gx = self.get_retained_outputs()[0]\n gy = grad_outputs[0]\n return gy / (gx * 2.0),\n\n\ndef sqrt(x):\n \"\"\"Elementwise square root function.\n\n .. math::\n y_i = \\\\sqrt x_i.\n\n If the value of :math:`x_i` is negative, it returns ``Nan`` for :math:`y_i`\n respect to underlying numpy and cupy specification.\n\n Args:\n x (~chainer.Variable): Input variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n \"\"\"\n return Sqrt().apply((x,))[0]\n\n\ndef rsqrt(x):\n \"\"\"Computes elementwise reciprocal of square root of input :math:`x_i`.\n\n .. math::\n y_i = {1 \\\\over \\\\sqrt x_i}.\n\n Args:\n x (~chainer.Variable): Input variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :func:`~chainer.functions.sqrt`\n \"\"\"\n return 1.0 / sqrt(x)\n", "path": "chainer/functions/math/sqrt.py"}]}
1,096
412
gh_patches_debug_16856
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3348
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error reporting system tests needed Follow up to #3263. </issue> <code> [start of error_reporting/nox.py] 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 17 import os 18 19 import nox 20 21 22 LOCAL_DEPS = ('../core/', '../logging/') 23 24 25 @nox.session 26 @nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6']) 27 def unit_tests(session, python_version): 28 """Run the unit test suite.""" 29 30 # Run unit tests against all supported versions of Python. 31 session.interpreter = 'python{}'.format(python_version) 32 33 # Install all test dependencies, then install this package in-place. 34 session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) 35 session.install('-e', '.') 36 37 # Run py.test against the unit tests. 38 session.run( 39 'py.test', '--quiet', '--cov=google.cloud.error_reporting', 40 '--cov=tests.unit', '--cov-append', '--cov-config=.coveragerc', 41 '--cov-report=', '--cov-fail-under=97', 'tests/unit', 42 ) 43 44 45 @nox.session 46 def lint(session): 47 """Run flake8. 48 49 Returns a failure if flake8 finds linting errors or sufficiently 50 serious code quality issues. 51 """ 52 session.interpreter = 'python3.6' 53 session.install('flake8', *LOCAL_DEPS) 54 session.install('.') 55 session.run('flake8', 'google/cloud/error_reporting') 56 57 58 @nox.session 59 def lint_setup_py(session): 60 """Verify that setup.py is valid (including RST check).""" 61 session.interpreter = 'python3.6' 62 session.install('docutils', 'Pygments') 63 session.run( 64 'python', 'setup.py', 'check', '--restructuredtext', '--strict') 65 66 67 @nox.session 68 def cover(session): 69 """Run the final coverage report. 70 71 This outputs the coverage report aggregating coverage from the unit 72 test runs (not system test runs), and then erases coverage data. 73 """ 74 session.interpreter = 'python3.6' 75 session.install('coverage', 'pytest-cov') 76 session.run('coverage', 'report', '--show-missing', '--fail-under=100') 77 session.run('coverage', 'erase') 78 [end of error_reporting/nox.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/error_reporting/nox.py b/error_reporting/nox.py --- a/error_reporting/nox.py +++ b/error_reporting/nox.py @@ -64,6 +64,28 @@ 'python', 'setup.py', 'check', '--restructuredtext', '--strict') [email protected] [email protected]('python_version', ['2.7', '3.6']) +def system_tests(session, python_version): + """Run the system test suite.""" + + # Sanity check: Only run system tests if the environment variable is set. + if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): + return + + # Run the system tests against latest Python 2 and Python 3 only. + session.interpreter = 'python{}'.format(python_version) + + # Install all test dependencies, then install this package into the + # virtualenv's dist-packages. + session.install('mock', 'pytest', *LOCAL_DEPS) + session.install('../test_utils/') + session.install('.') + + # Run py.test against the system tests. + session.run('py.test', '-vvv', 'tests/system.py') + + @nox.session def cover(session): """Run the final coverage report.
{"golden_diff": "diff --git a/error_reporting/nox.py b/error_reporting/nox.py\n--- a/error_reporting/nox.py\n+++ b/error_reporting/nox.py\n@@ -64,6 +64,28 @@\n 'python', 'setup.py', 'check', '--restructuredtext', '--strict')\n \n \[email protected]\[email protected]('python_version', ['2.7', '3.6'])\n+def system_tests(session, python_version):\n+ \"\"\"Run the system test suite.\"\"\"\n+\n+ # Sanity check: Only run system tests if the environment variable is set.\n+ if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):\n+ return\n+\n+ # Run the system tests against latest Python 2 and Python 3 only.\n+ session.interpreter = 'python{}'.format(python_version)\n+\n+ # Install all test dependencies, then install this package into the\n+ # virtualenv's dist-packages.\n+ session.install('mock', 'pytest', *LOCAL_DEPS)\n+ session.install('../test_utils/')\n+ session.install('.')\n+\n+ # Run py.test against the system tests.\n+ session.run('py.test', '-vvv', 'tests/system.py')\n+\n+\n @nox.session\n def cover(session):\n \"\"\"Run the final coverage report.\n", "issue": "Error reporting system tests needed\nFollow up to #3263.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport os\n\nimport nox\n\n\nLOCAL_DEPS = ('../core/', '../logging/')\n\n\[email protected]\[email protected]('python_version', ['2.7', '3.4', '3.5', '3.6'])\ndef unit_tests(session, python_version):\n \"\"\"Run the unit test suite.\"\"\"\n\n # Run unit tests against all supported versions of Python.\n session.interpreter = 'python{}'.format(python_version)\n\n # Install all test dependencies, then install this package in-place.\n session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)\n session.install('-e', '.')\n\n # Run py.test against the unit tests.\n session.run(\n 'py.test', '--quiet', '--cov=google.cloud.error_reporting',\n '--cov=tests.unit', '--cov-append', '--cov-config=.coveragerc',\n '--cov-report=', '--cov-fail-under=97', 'tests/unit',\n )\n\n\[email protected]\ndef lint(session):\n \"\"\"Run flake8.\n\n Returns a failure if flake8 finds linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n session.interpreter = 'python3.6'\n session.install('flake8', *LOCAL_DEPS)\n session.install('.')\n session.run('flake8', 'google/cloud/error_reporting')\n\n\[email protected]\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n session.interpreter = 'python3.6'\n session.install('docutils', 'Pygments')\n session.run(\n 'python', 'setup.py', 'check', '--restructuredtext', '--strict')\n\n\[email protected]\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.interpreter = 'python3.6'\n session.install('coverage', 'pytest-cov')\n session.run('coverage', 'report', '--show-missing', '--fail-under=100')\n session.run('coverage', 'erase')\n", "path": "error_reporting/nox.py"}]}
1,302
279
gh_patches_debug_12060
rasdani/github-patches
git_diff
ivy-llc__ivy-21310
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> adaptive_avg_pool2d Close #20804 </issue> <code> [start of ivy/functional/frontends/mindspore/ops/function/nn_func.py] 1 """Includes Mindspore Frontend functions listed in the TODO list 2 https://github.com/unifyai/ivy/issues/14951.""" 3 4 # local 5 import ivy 6 from ivy.func_wrapper import with_supported_dtypes 7 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back 8 9 10 @with_supported_dtypes( 11 { 12 "2.0.0 and below": ( 13 "int8", 14 "int16", 15 "int32", 16 "int64", 17 "float16", 18 "float32", 19 "float64", 20 ) 21 }, 22 "mindspore", 23 ) 24 @to_ivy_arrays_and_back 25 def dropout2d(input, p=0.5, training=True): 26 return ivy.dropout2d(input, p, training=training, data_format="NCHW") 27 28 29 @with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore") 30 @to_ivy_arrays_and_back 31 def selu(input_x): 32 return ivy.selu(input_x) 33 34 35 @with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore") 36 @to_ivy_arrays_and_back 37 def softsign(x): 38 return ivy.divide(x, ivy.add(1, ivy.abs(x))) 39 40 @with_supported_dtypes({"2.0 and below": ("int8", "int16", "int32", "int64", "float16", "float32", "float64")}, "mindspore") 41 @to_ivy_arrays_and_back 42 def pad(input, pad_width, mode='constant', constant_values=0): 43 return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values) [end of ivy/functional/frontends/mindspore/ops/function/nn_func.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py --- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py +++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py @@ -40,4 +40,10 @@ @with_supported_dtypes({"2.0 and below": ("int8", "int16", "int32", "int64", "float16", "float32", "float64")}, "mindspore") @to_ivy_arrays_and_back def pad(input, pad_width, mode='constant', constant_values=0): - return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values) \ No newline at end of file + return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values) + + +@with_supported_dtypes({"2.0.0 and below": ("float16", "float32", "float64")}, "mindspore") +@to_ivy_arrays_and_back +def adaptive_avg_pool2d(input, output_size): + return ivy.adaptive_avg_pool2d(input, output_size)
{"golden_diff": "diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n@@ -40,4 +40,10 @@\n @with_supported_dtypes({\"2.0 and below\": (\"int8\", \"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")}, \"mindspore\")\n @to_ivy_arrays_and_back\n def pad(input, pad_width, mode='constant', constant_values=0):\n- return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)\n\\ No newline at end of file\n+ return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)\n+\n+\n+@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\", \"float64\")}, \"mindspore\")\n+@to_ivy_arrays_and_back\n+def adaptive_avg_pool2d(input, output_size):\n+ return ivy.adaptive_avg_pool2d(input, output_size)\n", "issue": "adaptive_avg_pool2d\nClose #20804\n", "before_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_supported_dtypes(\n {\n \"2.0.0 and below\": (\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"mindspore\",\n)\n@to_ivy_arrays_and_back\ndef dropout2d(input, p=0.5, training=True):\n return ivy.dropout2d(input, p, training=training, data_format=\"NCHW\")\n\n\n@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef selu(input_x):\n return ivy.selu(input_x)\n\n\n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n\n@with_supported_dtypes({\"2.0 and below\": (\"int8\", \"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef pad(input, pad_width, mode='constant', constant_values=0):\n return ivy.pad(input, pad_width, mode=mode, constant_values=constant_values)", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}]}
1,049
295
gh_patches_debug_10346
rasdani/github-patches
git_diff
liberapay__liberapay.com-180
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix connecting a GitHub org I tried connecting the GitHub liberapay org to the [LiberapayOrg](https://liberapay.com/LiberapayOrg/) account but I ended up on the take-over confirmation page asking me to transfer my personal account. </issue> <code> [start of liberapay/elsewhere/github.py] 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 3 from liberapay.elsewhere import PlatformOAuth2 4 from liberapay.elsewhere._extractors import key 5 from liberapay.elsewhere._paginators import header_links_paginator 6 7 8 class GitHub(PlatformOAuth2): 9 10 # Platform attributes 11 name = 'github' 12 display_name = 'GitHub' 13 account_url = 'https://github.com/{user_name}' 14 allows_team_connect = True 15 16 # Auth attributes 17 auth_url = 'https://github.com/login/oauth/authorize' 18 access_token_url = 'https://github.com/login/oauth/access_token' 19 oauth_email_scope = 'user:email' 20 oauth_default_scope = ['read:org'] 21 22 # API attributes 23 api_format = 'json' 24 api_paginator = header_links_paginator() 25 api_url = 'https://api.github.com' 26 api_user_info_path = '/user/{user_id}' 27 api_user_name_info_path = '/users/{user_name}' 28 api_user_self_info_path = '/user' 29 api_team_members_path = '/orgs/{user_name}/public_members' 30 api_friends_path = '/users/{user_name}/following' 31 ratelimit_headers_prefix = 'x-ratelimit-' 32 33 # User info extractors 34 x_user_id = key('id') 35 x_user_name = key('login') 36 x_display_name = key('name') 37 x_email = key('email') 38 x_gravatar_id = key('gravatar_id') 39 x_avatar_url = key('avatar_url') 40 x_is_team = key('type', clean=lambda t: t.lower() == 'organization') 41 42 def is_team_admin(self, team_name, sess): 43 user_teams = self.api_parser(self.api_get('/user/teams', sess=sess)) 44 return any(team.get('organization', {}).get('login') == team_name and 45 team.get('permission') == 'admin' 46 for team in user_teams) 47 [end of liberapay/elsewhere/github.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py --- a/liberapay/elsewhere/github.py +++ b/liberapay/elsewhere/github.py @@ -39,8 +39,7 @@ x_avatar_url = key('avatar_url') x_is_team = key('type', clean=lambda t: t.lower() == 'organization') - def is_team_admin(self, team_name, sess): - user_teams = self.api_parser(self.api_get('/user/teams', sess=sess)) - return any(team.get('organization', {}).get('login') == team_name and - team.get('permission') == 'admin' - for team in user_teams) + def is_team_member(self, org_name, sess): + org_name = org_name.lower() + user_orgs = self.api_parser(self.api_get('/user/orgs', sess=sess)) + return any(org.get('login') == org_name for org in user_orgs)
{"golden_diff": "diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py\n--- a/liberapay/elsewhere/github.py\n+++ b/liberapay/elsewhere/github.py\n@@ -39,8 +39,7 @@\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n \n- def is_team_admin(self, team_name, sess):\n- user_teams = self.api_parser(self.api_get('/user/teams', sess=sess))\n- return any(team.get('organization', {}).get('login') == team_name and\n- team.get('permission') == 'admin'\n- for team in user_teams)\n+ def is_team_member(self, org_name, sess):\n+ org_name = org_name.lower()\n+ user_orgs = self.api_parser(self.api_get('/user/orgs', sess=sess))\n+ return any(org.get('login') == org_name for org in user_orgs)\n", "issue": "Fix connecting a GitHub org\nI tried connecting the GitHub liberapay org to the [LiberapayOrg](https://liberapay.com/LiberapayOrg/) account but I ended up on the take-over confirmation page asking me to transfer my personal account.\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom liberapay.elsewhere import PlatformOAuth2\nfrom liberapay.elsewhere._extractors import key\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitHub(PlatformOAuth2):\n\n # Platform attributes\n name = 'github'\n display_name = 'GitHub'\n account_url = 'https://github.com/{user_name}'\n allows_team_connect = True\n\n # Auth attributes\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n oauth_default_scope = ['read:org']\n\n # API attributes\n api_format = 'json'\n api_paginator = header_links_paginator()\n api_url = 'https://api.github.com'\n api_user_info_path = '/user/{user_id}'\n api_user_name_info_path = '/users/{user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/orgs/{user_name}/public_members'\n api_friends_path = '/users/{user_name}/following'\n ratelimit_headers_prefix = 'x-ratelimit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('login')\n x_display_name = key('name')\n x_email = key('email')\n x_gravatar_id = key('gravatar_id')\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n\n def is_team_admin(self, team_name, sess):\n user_teams = self.api_parser(self.api_get('/user/teams', sess=sess))\n return any(team.get('organization', {}).get('login') == team_name and\n team.get('permission') == 'admin'\n for team in user_teams)\n", "path": "liberapay/elsewhere/github.py"}]}
1,109
225
gh_patches_debug_9374
rasdani/github-patches
git_diff
pyca__cryptography-1422
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Key versioning/key rotation support (wishlist) It would be nice if Fernet had built-in support for key versioning. I don't know exactly what the API would look like, but the idea would be to allow encrypting new data under a new key while still retaining the ability to decrypt old data that was encrypted under older keys. If Fernet supported this natively, then I could tell developers to "just use Fernet" when they first write their applications, and later when they get a real security engineer, that person could start doing key rotation without having to change application-level code or data structures. </issue> <code> [start of cryptography/fernet.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 # implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from __future__ import absolute_import, division, print_function 15 16 import base64 17 import binascii 18 import os 19 import struct 20 import time 21 22 import six 23 24 from cryptography.exceptions import InvalidSignature 25 from cryptography.hazmat.backends import default_backend 26 from cryptography.hazmat.primitives import hashes, padding 27 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes 28 from cryptography.hazmat.primitives.hmac import HMAC 29 30 31 class InvalidToken(Exception): 32 pass 33 34 35 _MAX_CLOCK_SKEW = 60 36 37 38 class Fernet(object): 39 def __init__(self, key, backend=None): 40 if backend is None: 41 backend = default_backend() 42 43 key = base64.urlsafe_b64decode(key) 44 if len(key) != 32: 45 raise ValueError( 46 "Fernet key must be 32 url-safe base64-encoded bytes." 47 ) 48 49 self._signing_key = key[:16] 50 self._encryption_key = key[16:] 51 self._backend = backend 52 53 @classmethod 54 def generate_key(cls): 55 return base64.urlsafe_b64encode(os.urandom(32)) 56 57 def encrypt(self, data): 58 current_time = int(time.time()) 59 iv = os.urandom(16) 60 return self._encrypt_from_parts(data, current_time, iv) 61 62 def _encrypt_from_parts(self, data, current_time, iv): 63 if not isinstance(data, bytes): 64 raise TypeError("data must be bytes.") 65 66 padder = padding.PKCS7(algorithms.AES.block_size).padder() 67 padded_data = padder.update(data) + padder.finalize() 68 encryptor = Cipher( 69 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend 70 ).encryptor() 71 ciphertext = encryptor.update(padded_data) + encryptor.finalize() 72 73 basic_parts = ( 74 b"\x80" + struct.pack(">Q", current_time) + iv + ciphertext 75 ) 76 77 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend) 78 h.update(basic_parts) 79 hmac = h.finalize() 80 return base64.urlsafe_b64encode(basic_parts + hmac) 81 82 def decrypt(self, token, ttl=None): 83 if not isinstance(token, bytes): 84 raise TypeError("token must be bytes.") 85 86 current_time = int(time.time()) 87 88 try: 89 data = base64.urlsafe_b64decode(token) 90 except (TypeError, binascii.Error): 91 raise InvalidToken 92 93 if not data or six.indexbytes(data, 0) != 0x80: 94 raise InvalidToken 95 96 try: 97 timestamp, = struct.unpack(">Q", data[1:9]) 98 except struct.error: 99 raise InvalidToken 100 if ttl is not None: 101 if timestamp + ttl < current_time: 102 raise InvalidToken 103 if current_time + _MAX_CLOCK_SKEW < timestamp: 104 raise InvalidToken 105 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend) 106 h.update(data[:-32]) 107 try: 108 h.verify(data[-32:]) 109 except InvalidSignature: 110 raise InvalidToken 111 112 iv = data[9:25] 113 ciphertext = data[25:-32] 114 decryptor = Cipher( 115 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend 116 ).decryptor() 117 plaintext_padded = decryptor.update(ciphertext) 118 try: 119 plaintext_padded += decryptor.finalize() 120 except ValueError: 121 raise InvalidToken 122 unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder() 123 124 unpadded = unpadder.update(plaintext_padded) 125 try: 126 unpadded += unpadder.finalize() 127 except ValueError: 128 raise InvalidToken 129 return unpadded 130 [end of cryptography/fernet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cryptography/fernet.py b/cryptography/fernet.py --- a/cryptography/fernet.py +++ b/cryptography/fernet.py @@ -127,3 +127,24 @@ except ValueError: raise InvalidToken return unpadded + + +class MultiFernet(object): + def __init__(self, fernets): + fernets = list(fernets) + if not fernets: + raise ValueError( + "MultiFernet requires at least one Fernet instance" + ) + self._fernets = fernets + + def encrypt(self, msg): + return self._fernets[0].encrypt(msg) + + def decrypt(self, msg, ttl=None): + for f in self._fernets: + try: + return f.decrypt(msg, ttl) + except InvalidToken: + pass + raise InvalidToken
{"golden_diff": "diff --git a/cryptography/fernet.py b/cryptography/fernet.py\n--- a/cryptography/fernet.py\n+++ b/cryptography/fernet.py\n@@ -127,3 +127,24 @@\n except ValueError:\n raise InvalidToken\n return unpadded\n+\n+\n+class MultiFernet(object):\n+ def __init__(self, fernets):\n+ fernets = list(fernets)\n+ if not fernets:\n+ raise ValueError(\n+ \"MultiFernet requires at least one Fernet instance\"\n+ )\n+ self._fernets = fernets\n+\n+ def encrypt(self, msg):\n+ return self._fernets[0].encrypt(msg)\n+\n+ def decrypt(self, msg, ttl=None):\n+ for f in self._fernets:\n+ try:\n+ return f.decrypt(msg, ttl)\n+ except InvalidToken:\n+ pass\n+ raise InvalidToken\n", "issue": "Key versioning/key rotation support (wishlist)\nIt would be nice if Fernet had built-in support for key versioning. I don't know exactly what the API would look like, but the idea would be to allow encrypting new data under a new key while still retaining the ability to decrypt old data that was encrypted under older keys.\n\nIf Fernet supported this natively, then I could tell developers to \"just use Fernet\" when they first write their applications, and later when they get a real security engineer, that person could start doing key rotation without having to change application-level code or data structures.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport binascii\nimport os\nimport struct\nimport time\n\nimport six\n\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass InvalidToken(Exception):\n pass\n\n\n_MAX_CLOCK_SKEW = 60\n\n\nclass Fernet(object):\n def __init__(self, key, backend=None):\n if backend is None:\n backend = default_backend()\n\n key = base64.urlsafe_b64decode(key)\n if len(key) != 32:\n raise ValueError(\n \"Fernet key must be 32 url-safe base64-encoded bytes.\"\n )\n\n self._signing_key = key[:16]\n self._encryption_key = key[16:]\n self._backend = backend\n\n @classmethod\n def generate_key(cls):\n return base64.urlsafe_b64encode(os.urandom(32))\n\n def encrypt(self, data):\n current_time = int(time.time())\n iv = os.urandom(16)\n return self._encrypt_from_parts(data, current_time, iv)\n\n def _encrypt_from_parts(self, data, current_time, iv):\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n\n padder = padding.PKCS7(algorithms.AES.block_size).padder()\n padded_data = padder.update(data) + padder.finalize()\n encryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).encryptor()\n ciphertext = encryptor.update(padded_data) + encryptor.finalize()\n\n basic_parts = (\n b\"\\x80\" + struct.pack(\">Q\", current_time) + iv + ciphertext\n )\n\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(basic_parts)\n hmac = h.finalize()\n return base64.urlsafe_b64encode(basic_parts + hmac)\n\n def decrypt(self, token, ttl=None):\n if not isinstance(token, bytes):\n raise TypeError(\"token must be bytes.\")\n\n current_time = int(time.time())\n\n try:\n data = base64.urlsafe_b64decode(token)\n except (TypeError, binascii.Error):\n raise InvalidToken\n\n if not data or six.indexbytes(data, 0) != 0x80:\n raise InvalidToken\n\n try:\n timestamp, = struct.unpack(\">Q\", data[1:9])\n except struct.error:\n raise InvalidToken\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n if current_time + _MAX_CLOCK_SKEW < timestamp:\n raise InvalidToken\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n h.verify(data[-32:])\n except InvalidSignature:\n raise InvalidToken\n\n iv = data[9:25]\n ciphertext = data[25:-32]\n decryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).decryptor()\n plaintext_padded = decryptor.update(ciphertext)\n try:\n plaintext_padded += decryptor.finalize()\n except ValueError:\n raise InvalidToken\n unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()\n\n unpadded = unpadder.update(plaintext_padded)\n try:\n unpadded += unpadder.finalize()\n except ValueError:\n raise InvalidToken\n return unpadded\n", "path": "cryptography/fernet.py"}]}
1,915
208
gh_patches_debug_24010
rasdani/github-patches
git_diff
jazzband__pip-tools-798
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> --cert option for pip-sync pip-sync is missing the `--cert` and `--client-cert` options, that are (somehow, see #712) implemented for pip-compile. </issue> <code> [start of piptools/scripts/sync.py] 1 # coding: utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import os 5 import sys 6 7 from .. import click, sync 8 from .._compat import get_installed_distributions, parse_requirements 9 from ..exceptions import PipToolsError 10 from ..logging import log 11 from ..utils import flat_map 12 13 DEFAULT_REQUIREMENTS_FILE = "requirements.txt" 14 15 16 @click.command() 17 @click.version_option() 18 @click.option( 19 "-n", 20 "--dry-run", 21 is_flag=True, 22 help="Only show what would happen, don't change anything", 23 ) 24 @click.option("--force", is_flag=True, help="Proceed even if conflicts are found") 25 @click.option( 26 "-f", 27 "--find-links", 28 multiple=True, 29 help="Look for archives in this directory or on this HTML page", 30 envvar="PIP_FIND_LINKS", 31 ) 32 @click.option( 33 "-i", 34 "--index-url", 35 help="Change index URL (defaults to PyPI)", 36 envvar="PIP_INDEX_URL", 37 ) 38 @click.option( 39 "--extra-index-url", 40 multiple=True, 41 help="Add additional index URL to search", 42 envvar="PIP_EXTRA_INDEX_URL", 43 ) 44 @click.option( 45 "--trusted-host", 46 multiple=True, 47 help="Mark this host as trusted, even though it does not have valid or any HTTPS.", 48 ) 49 @click.option( 50 "--no-index", 51 is_flag=True, 52 help="Ignore package index (only looking at --find-links URLs instead)", 53 ) 54 @click.option("-q", "--quiet", default=False, is_flag=True, help="Give less output") 55 @click.option( 56 "--user", "user_only", is_flag=True, help="Restrict attention to user directory" 57 ) 58 @click.argument("src_files", required=False, type=click.Path(exists=True), nargs=-1) 59 def cli( 60 dry_run, 61 force, 62 find_links, 63 index_url, 64 extra_index_url, 65 trusted_host, 66 no_index, 67 quiet, 68 user_only, 69 src_files, 70 ): 71 """Synchronize virtual environment with requirements.txt.""" 72 if not src_files: 73 if os.path.exists(DEFAULT_REQUIREMENTS_FILE): 74 src_files = (DEFAULT_REQUIREMENTS_FILE,) 75 else: 76 msg = "No requirement files given and no {} found in the current directory" 77 log.error(msg.format(DEFAULT_REQUIREMENTS_FILE)) 78 sys.exit(2) 79 80 if any(src_file.endswith(".in") for src_file in src_files): 81 msg = ( 82 "Some input files have the .in extension, which is most likely an error " 83 "and can cause weird behaviour. You probably meant to use " 84 "the corresponding *.txt file?" 85 ) 86 if force: 87 log.warning("WARNING: " + msg) 88 else: 89 log.error("ERROR: " + msg) 90 sys.exit(2) 91 92 requirements = flat_map( 93 lambda src: parse_requirements(src, session=True), src_files 94 ) 95 96 try: 97 requirements = sync.merge(requirements, ignore_conflicts=force) 98 except PipToolsError as e: 99 log.error(str(e)) 100 sys.exit(2) 101 102 installed_dists = get_installed_distributions(skip=[], user_only=user_only) 103 to_install, to_uninstall = sync.diff(requirements, installed_dists) 104 105 install_flags = [] 106 for link in find_links or []: 107 install_flags.extend(["-f", link]) 108 if no_index: 109 install_flags.append("--no-index") 110 if index_url: 111 install_flags.extend(["-i", index_url]) 112 if extra_index_url: 113 for extra_index in extra_index_url: 114 install_flags.extend(["--extra-index-url", extra_index]) 115 if trusted_host: 116 for host in trusted_host: 117 install_flags.extend(["--trusted-host", host]) 118 if user_only: 119 install_flags.append("--user") 120 121 sys.exit( 122 sync.sync( 123 to_install, 124 to_uninstall, 125 verbose=(not quiet), 126 dry_run=dry_run, 127 install_flags=install_flags, 128 ) 129 ) 130 [end of piptools/scripts/sync.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/piptools/scripts/sync.py b/piptools/scripts/sync.py --- a/piptools/scripts/sync.py +++ b/piptools/scripts/sync.py @@ -55,6 +55,12 @@ @click.option( "--user", "user_only", is_flag=True, help="Restrict attention to user directory" ) [email protected]("--cert", help="Path to alternate CA bundle.") [email protected]( + "--client-cert", + help="Path to SSL client certificate, a single file containing " + "the private key and the certificate in PEM format.", +) @click.argument("src_files", required=False, type=click.Path(exists=True), nargs=-1) def cli( dry_run, @@ -66,6 +72,8 @@ no_index, quiet, user_only, + cert, + client_cert, src_files, ): """Synchronize virtual environment with requirements.txt.""" @@ -117,6 +125,10 @@ install_flags.extend(["--trusted-host", host]) if user_only: install_flags.append("--user") + if cert: + install_flags.extend(["--cert", cert]) + if client_cert: + install_flags.extend(["--client-cert", client_cert]) sys.exit( sync.sync(
{"golden_diff": "diff --git a/piptools/scripts/sync.py b/piptools/scripts/sync.py\n--- a/piptools/scripts/sync.py\n+++ b/piptools/scripts/sync.py\n@@ -55,6 +55,12 @@\n @click.option(\n \"--user\", \"user_only\", is_flag=True, help=\"Restrict attention to user directory\"\n )\[email protected](\"--cert\", help=\"Path to alternate CA bundle.\")\[email protected](\n+ \"--client-cert\",\n+ help=\"Path to SSL client certificate, a single file containing \"\n+ \"the private key and the certificate in PEM format.\",\n+)\n @click.argument(\"src_files\", required=False, type=click.Path(exists=True), nargs=-1)\n def cli(\n dry_run,\n@@ -66,6 +72,8 @@\n no_index,\n quiet,\n user_only,\n+ cert,\n+ client_cert,\n src_files,\n ):\n \"\"\"Synchronize virtual environment with requirements.txt.\"\"\"\n@@ -117,6 +125,10 @@\n install_flags.extend([\"--trusted-host\", host])\n if user_only:\n install_flags.append(\"--user\")\n+ if cert:\n+ install_flags.extend([\"--cert\", cert])\n+ if client_cert:\n+ install_flags.extend([\"--client-cert\", client_cert])\n \n sys.exit(\n sync.sync(\n", "issue": "--cert option for pip-sync\npip-sync is missing the `--cert` and `--client-cert` options, that are (somehow, see #712) implemented for pip-compile.\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom .. import click, sync\nfrom .._compat import get_installed_distributions, parse_requirements\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..utils import flat_map\n\nDEFAULT_REQUIREMENTS_FILE = \"requirements.txt\"\n\n\[email protected]()\[email protected]_option()\[email protected](\n \"-n\",\n \"--dry-run\",\n is_flag=True,\n help=\"Only show what would happen, don't change anything\",\n)\[email protected](\"--force\", is_flag=True, help=\"Proceed even if conflicts are found\")\[email protected](\n \"-f\",\n \"--find-links\",\n multiple=True,\n help=\"Look for archives in this directory or on this HTML page\",\n envvar=\"PIP_FIND_LINKS\",\n)\[email protected](\n \"-i\",\n \"--index-url\",\n help=\"Change index URL (defaults to PyPI)\",\n envvar=\"PIP_INDEX_URL\",\n)\[email protected](\n \"--extra-index-url\",\n multiple=True,\n help=\"Add additional index URL to search\",\n envvar=\"PIP_EXTRA_INDEX_URL\",\n)\[email protected](\n \"--trusted-host\",\n multiple=True,\n help=\"Mark this host as trusted, even though it does not have valid or any HTTPS.\",\n)\[email protected](\n \"--no-index\",\n is_flag=True,\n help=\"Ignore package index (only looking at --find-links URLs instead)\",\n)\[email protected](\"-q\", \"--quiet\", default=False, is_flag=True, help=\"Give less output\")\[email protected](\n \"--user\", \"user_only\", is_flag=True, help=\"Restrict attention to user directory\"\n)\[email protected](\"src_files\", required=False, type=click.Path(exists=True), nargs=-1)\ndef cli(\n dry_run,\n force,\n find_links,\n index_url,\n extra_index_url,\n trusted_host,\n no_index,\n quiet,\n user_only,\n src_files,\n):\n \"\"\"Synchronize virtual environment with requirements.txt.\"\"\"\n if not src_files:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n else:\n msg = \"No requirement files given and no {} found in the current directory\"\n log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))\n sys.exit(2)\n\n if any(src_file.endswith(\".in\") for src_file in src_files):\n msg = (\n \"Some input files have the .in extension, which is most likely an error \"\n \"and can cause weird behaviour. You probably meant to use \"\n \"the corresponding *.txt file?\"\n )\n if force:\n log.warning(\"WARNING: \" + msg)\n else:\n log.error(\"ERROR: \" + msg)\n sys.exit(2)\n\n requirements = flat_map(\n lambda src: parse_requirements(src, session=True), src_files\n )\n\n try:\n requirements = sync.merge(requirements, ignore_conflicts=force)\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n installed_dists = get_installed_distributions(skip=[], user_only=user_only)\n to_install, to_uninstall = sync.diff(requirements, installed_dists)\n\n install_flags = []\n for link in find_links or []:\n install_flags.extend([\"-f\", link])\n if no_index:\n install_flags.append(\"--no-index\")\n if index_url:\n install_flags.extend([\"-i\", index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n install_flags.extend([\"--extra-index-url\", extra_index])\n if trusted_host:\n for host in trusted_host:\n install_flags.extend([\"--trusted-host\", host])\n if user_only:\n install_flags.append(\"--user\")\n\n sys.exit(\n sync.sync(\n to_install,\n to_uninstall,\n verbose=(not quiet),\n dry_run=dry_run,\n install_flags=install_flags,\n )\n )\n", "path": "piptools/scripts/sync.py"}]}
1,718
294
gh_patches_debug_11925
rasdani/github-patches
git_diff
elastic__apm-agent-python-1155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> RemovedInDjango41Warning **Describe the bug**: ... ```shell ../../../../.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91 /Users/mingyu.wu/.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91: RemovedInDjango41Warning: 'elasticapm.contrib.django' defines default_app_config = 'elasticapm.contrib.django.apps.ElasticAPMConfig'. Django now detects this configuration automatically. You can remove default_app_config. app_config = AppConfig.create(entry) -- Docs: https://docs.pytest.org/en/stable/warnings.html ``` **To Reproduce** 1. run unit test or start server **Environment (please complete the following information)** - OS: Linux/MacOS - Python version:3.8.2 - Framework and version: Django 3.2.2 - APM Server version: * - Agent version: 6.1.3 </issue> <code> [start of elasticapm/contrib/django/__init__.py] 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 from elasticapm.contrib.django.client import * # noqa E401 32 33 default_app_config = "elasticapm.contrib.django.apps.ElasticAPMConfig" 34 [end of elasticapm/contrib/django/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticapm/contrib/django/__init__.py b/elasticapm/contrib/django/__init__.py --- a/elasticapm/contrib/django/__init__.py +++ b/elasticapm/contrib/django/__init__.py @@ -27,7 +27,9 @@ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from django import VERSION as DJANGO_VERSION from elasticapm.contrib.django.client import * # noqa E401 -default_app_config = "elasticapm.contrib.django.apps.ElasticAPMConfig" +if DJANGO_VERSION < (3, 2): + default_app_config = "elasticapm.contrib.django.apps.ElasticAPMConfig"
{"golden_diff": "diff --git a/elasticapm/contrib/django/__init__.py b/elasticapm/contrib/django/__init__.py\n--- a/elasticapm/contrib/django/__init__.py\n+++ b/elasticapm/contrib/django/__init__.py\n@@ -27,7 +27,9 @@\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+from django import VERSION as DJANGO_VERSION\n \n from elasticapm.contrib.django.client import * # noqa E401\n \n-default_app_config = \"elasticapm.contrib.django.apps.ElasticAPMConfig\"\n+if DJANGO_VERSION < (3, 2):\n+ default_app_config = \"elasticapm.contrib.django.apps.ElasticAPMConfig\"\n", "issue": "RemovedInDjango41Warning\n**Describe the bug**: ...\r\n\r\n```shell\r\n../../../../.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91\r\n /Users/mingyu.wu/.pyenv/versions/3.8.2/lib/python3.8/site-packages/django/apps/registry.py:91: RemovedInDjango41Warning: 'elasticapm.contrib.django' defines default_app_config = 'elasticapm.contrib.django.apps.ElasticAPMConfig'. Django now detects this configuration automatically. You can remove default_app_config.\r\n app_config = AppConfig.create(entry)\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/warnings.html\r\n```\r\n\r\n**To Reproduce**\r\n\r\n1. run unit test or start server\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux/MacOS\r\n- Python version:3.8.2\r\n- Framework and version: Django 3.2.2\r\n- APM Server version: *\r\n- Agent version: 6.1.3\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.django.client import * # noqa E401\n\ndefault_app_config = \"elasticapm.contrib.django.apps.ElasticAPMConfig\"\n", "path": "elasticapm/contrib/django/__init__.py"}]}
1,216
203
gh_patches_debug_11882
rasdani/github-patches
git_diff
translate__pootle-6471
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'QuerySet' object has no attribute 'keys' The following appears when you first go to `/` and Pootle tries to redirect you to your language based on your Accept-Lang headers. Note this will set a cookie so you need to have cookies reset to replicate. ```python AttributeError: 'QuerySet' object has no attribute 'keys' File "pootle_app/views/index/index.py", line 94, in dispatch else self.languages).values_list('code', flat=True)) File "pootle/i18n/override.py", line 81, in get_lang_from_http_header for lang in supported.keys(): AttributeError: 'QuerySet' object has no attribute 'keys' ``` </issue> <code> [start of pootle/apps/pootle_app/views/index/index.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from django.contrib.auth import REDIRECT_FIELD_NAME 10 from django.shortcuts import redirect 11 from django.urls import reverse 12 from django.utils.functional import cached_property 13 from django.utils.translation import get_language 14 from django.views.generic import TemplateView, View 15 16 from pootle.core.decorators import persistent_property 17 from pootle.core.delegate import revision, scores 18 from pootle.i18n.override import get_lang_from_http_header 19 from pootle_language.models import Language 20 from pootle_project.models import Project, ProjectSet 21 22 23 COOKIE_NAME = 'pootle-language' 24 25 26 class WelcomeView(TemplateView): 27 ns = "pootle.web.welcome" 28 template_name = "welcome.html" 29 30 @property 31 def revision(self): 32 return revision.get(self.project_set.directory.__class__)( 33 self.project_set.directory).get(key="stats") 34 35 @property 36 def cache_key(self): 37 return ( 38 "%s.%s.%s" 39 % (self.request.user.username, 40 self.revision, 41 self.request_lang)) 42 43 @cached_property 44 def project_set(self): 45 user_projects = Project.accessible_by_user(self.request.user) 46 user_projects = ( 47 Project.objects.for_user(self.request.user) 48 .filter(code__in=user_projects)) 49 return ProjectSet(user_projects) 50 51 @property 52 def request_lang(self): 53 return get_language() 54 55 @persistent_property 56 def score_data(self): 57 return scores.get(ProjectSet)( 58 self.project_set).display(language=self.request_lang) 59 60 def get_context_data(self, **kwargs): 61 context = super(WelcomeView, self).get_context_data(**kwargs) 62 context.update(dict(score_data=self.score_data)) 63 return context 64 65 66 class IndexView(View): 67 68 @property 69 def active_languages(self): 70 return Language.objects.filter( 71 translationproject__isnull=False, 72 translationproject__directory__obsolete=False) 73 74 @property 75 def all_languages(self): 76 return self.active_languages 77 78 @property 79 def languages(self): 80 return self.active_languages.filter( 81 translationproject__project__disabled=False) 82 83 def dispatch(self, request, *args, **kwargs): 84 if not request.user.is_authenticated: 85 ctx = { 86 'next': request.GET.get(REDIRECT_FIELD_NAME, '')} 87 return WelcomeView.as_view()(request, ctx) 88 lang = request.COOKIES.get(COOKIE_NAME, None) 89 if lang is None: 90 lang = get_lang_from_http_header( 91 request, 92 (self.all_languages 93 if request.user.is_superuser 94 else self.languages).values_list('code', flat=True)) 95 if lang is not None and lang not in ('projects', ''): 96 url = reverse('pootle-language-browse', args=[lang]) 97 else: 98 url = reverse('pootle-projects-browse') 99 # Preserve query strings 100 args = request.GET.urlencode() 101 qs = '?%s' % args if args else '' 102 redirect_url = '%s%s' % (url, qs) 103 return redirect(redirect_url) 104 105 106 class AboutView(TemplateView): 107 template_name = 'about.html' 108 109 def get_context_data(self, **kwargs): 110 from translate.__version__ import sver as toolkit_version 111 from pootle import __version__ 112 113 return { 114 'pootle_version': __version__, 115 'toolkit_version': toolkit_version, 116 } 117 [end of pootle/apps/pootle_app/views/index/index.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/pootle_app/views/index/index.py b/pootle/apps/pootle_app/views/index/index.py --- a/pootle/apps/pootle_app/views/index/index.py +++ b/pootle/apps/pootle_app/views/index/index.py @@ -89,9 +89,9 @@ if lang is None: lang = get_lang_from_http_header( request, - (self.all_languages - if request.user.is_superuser - else self.languages).values_list('code', flat=True)) + dict((self.all_languages + if request.user.is_superuser + else self.languages).values_list('code', 'fullname'))) if lang is not None and lang not in ('projects', ''): url = reverse('pootle-language-browse', args=[lang]) else:
{"golden_diff": "diff --git a/pootle/apps/pootle_app/views/index/index.py b/pootle/apps/pootle_app/views/index/index.py\n--- a/pootle/apps/pootle_app/views/index/index.py\n+++ b/pootle/apps/pootle_app/views/index/index.py\n@@ -89,9 +89,9 @@\n if lang is None:\n lang = get_lang_from_http_header(\n request,\n- (self.all_languages\n- if request.user.is_superuser\n- else self.languages).values_list('code', flat=True))\n+ dict((self.all_languages\n+ if request.user.is_superuser\n+ else self.languages).values_list('code', 'fullname')))\n if lang is not None and lang not in ('projects', ''):\n url = reverse('pootle-language-browse', args=[lang])\n else:\n", "issue": "AttributeError: 'QuerySet' object has no attribute 'keys'\nThe following appears when you first go to `/` and Pootle tries to redirect you to your language based on your Accept-Lang headers. Note this will set a cookie so you need to have cookies reset to replicate.\r\n\r\n```python\r\nAttributeError: 'QuerySet' object has no attribute 'keys'\r\n File \"pootle_app/views/index/index.py\", line 94, in dispatch\r\n else self.languages).values_list('code', flat=True))\r\n File \"pootle/i18n/override.py\", line 81, in get_lang_from_http_header\r\n for lang in supported.keys():\r\n\r\nAttributeError: 'QuerySet' object has no attribute 'keys'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language\nfrom django.views.generic import TemplateView, View\n\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.delegate import revision, scores\nfrom pootle.i18n.override import get_lang_from_http_header\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project, ProjectSet\n\n\nCOOKIE_NAME = 'pootle-language'\n\n\nclass WelcomeView(TemplateView):\n ns = \"pootle.web.welcome\"\n template_name = \"welcome.html\"\n\n @property\n def revision(self):\n return revision.get(self.project_set.directory.__class__)(\n self.project_set.directory).get(key=\"stats\")\n\n @property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (self.request.user.username,\n self.revision,\n self.request_lang))\n\n @cached_property\n def project_set(self):\n user_projects = Project.accessible_by_user(self.request.user)\n user_projects = (\n Project.objects.for_user(self.request.user)\n .filter(code__in=user_projects))\n return ProjectSet(user_projects)\n\n @property\n def request_lang(self):\n return get_language()\n\n @persistent_property\n def score_data(self):\n return scores.get(ProjectSet)(\n self.project_set).display(language=self.request_lang)\n\n def get_context_data(self, **kwargs):\n context = super(WelcomeView, self).get_context_data(**kwargs)\n context.update(dict(score_data=self.score_data))\n return context\n\n\nclass IndexView(View):\n\n @property\n def active_languages(self):\n return Language.objects.filter(\n translationproject__isnull=False,\n translationproject__directory__obsolete=False)\n\n @property\n def all_languages(self):\n return self.active_languages\n\n @property\n def languages(self):\n return self.active_languages.filter(\n translationproject__project__disabled=False)\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n ctx = {\n 'next': request.GET.get(REDIRECT_FIELD_NAME, '')}\n return WelcomeView.as_view()(request, ctx)\n lang = request.COOKIES.get(COOKIE_NAME, None)\n if lang is None:\n lang = get_lang_from_http_header(\n request,\n (self.all_languages\n if request.user.is_superuser\n else self.languages).values_list('code', flat=True))\n if lang is not None and lang not in ('projects', ''):\n url = reverse('pootle-language-browse', args=[lang])\n else:\n url = reverse('pootle-projects-browse')\n # Preserve query strings\n args = request.GET.urlencode()\n qs = '?%s' % args if args else ''\n redirect_url = '%s%s' % (url, qs)\n return redirect(redirect_url)\n\n\nclass AboutView(TemplateView):\n template_name = 'about.html'\n\n def get_context_data(self, **kwargs):\n from translate.__version__ import sver as toolkit_version\n from pootle import __version__\n\n return {\n 'pootle_version': __version__,\n 'toolkit_version': toolkit_version,\n }\n", "path": "pootle/apps/pootle_app/views/index/index.py"}]}
1,749
184
gh_patches_debug_10221
rasdani/github-patches
git_diff
data-for-change__anyway-164
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move to postgres That's the database of choice hasadna uses <!--- @huboard:{"order":125.5,"milestone_order":81,"custom_state":""} --> </issue> <code> [start of config.py] 1 import os 2 3 # 4 # This is the configuration file of the application 5 # 6 # Please make sure you don't store here any secret information and use environment 7 # variables 8 # 9 10 11 SQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL') 12 SQLALCHEMY_POOL_RECYCLE = 60 13 14 15 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf' 16 17 FACEBOOK_KEY = "157028231131213" 18 FACEBOOK_SECRET = "0437ee70207dca46609219b990be0614" 19 [end of config.py] [start of mysqlshell.py] 1 #!/usr/bin/env python 2 import sys 3 import os 4 import urlparse 5 6 def main(): 7 DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL') 8 9 if not DATABASE_URI: 10 print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set' 11 sys.exit(1) 12 13 db = urlparse.urlparse(DATABASE_URI) 14 os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:]) 15 16 if __name__ == '__main__': 17 main() 18 [end of mysqlshell.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/config.py b/config.py --- a/config.py +++ b/config.py @@ -8,7 +8,7 @@ # -SQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL') +SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') SQLALCHEMY_POOL_RECYCLE = 60 diff --git a/mysqlshell.py b/mysqlshell.py deleted file mode 100755 --- a/mysqlshell.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -import sys -import os -import urlparse - -def main(): - DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL') - - if not DATABASE_URI: - print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set' - sys.exit(1) - - db = urlparse.urlparse(DATABASE_URI) - os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:]) - -if __name__ == '__main__': - main()
{"golden_diff": "diff --git a/config.py b/config.py\n--- a/config.py\n+++ b/config.py\n@@ -8,7 +8,7 @@\n #\n \n \n-SQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL')\n+SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')\n SQLALCHEMY_POOL_RECYCLE = 60\n \n \ndiff --git a/mysqlshell.py b/mysqlshell.py\ndeleted file mode 100755\n--- a/mysqlshell.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-#!/usr/bin/env python\n-import sys\n-import os\n-import urlparse\n-\n-def main():\n- DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL')\n-\n- if not DATABASE_URI:\n- print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set'\n- sys.exit(1)\n-\n- db = urlparse.urlparse(DATABASE_URI)\n- os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:])\n-\n-if __name__ == '__main__':\n- main()\n", "issue": "Move to postgres\nThat's the database of choice hasadna uses\n\n<!---\n@huboard:{\"order\":125.5,\"milestone_order\":81,\"custom_state\":\"\"}\n-->\n\n", "before_files": [{"content": "import os\n\n#\n# This is the configuration file of the application\n#\n# Please make sure you don't store here any secret information and use environment\n# variables\n#\n\n\nSQLALCHEMY_DATABASE_URI = os.environ.get('CLEARDB_DATABASE_URL')\nSQLALCHEMY_POOL_RECYCLE = 60\n\n\nSECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf'\n\nFACEBOOK_KEY = \"157028231131213\"\nFACEBOOK_SECRET = \"0437ee70207dca46609219b990be0614\"\n", "path": "config.py"}, {"content": "#!/usr/bin/env python\nimport sys\nimport os\nimport urlparse\n\ndef main():\n DATABASE_URI = os.getenv('CLEARDB_DATABASE_URL')\n\n if not DATABASE_URI:\n print >>sys.stderr, 'Environment CLEARDB_DATABASE_URL not set'\n sys.exit(1)\n\n db = urlparse.urlparse(DATABASE_URI)\n os.execlp('mysql', 'mysql', '-u', db.username, '-p' + db.password, '-h', db.hostname, db.path[1:])\n\nif __name__ == '__main__':\n main()\n", "path": "mysqlshell.py"}]}
907
243
gh_patches_debug_38347
rasdani/github-patches
git_diff
NVIDIA-Merlin__NVTabular-568
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fill Missing Op option to create a new binary column indicating the value was replaced. Fill Missing Op should have the option to create a new binary column indicating whether the column was filled or not for continuous variables. This is a common feature used when dealing with missing values of categoricals. </issue> <code> [start of nvtabular/ops/fill.py] 1 # 2 # Copyright (c) 2020, NVIDIA CORPORATION. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 import cudf 17 import dask_cudf 18 from nvtx import annotate 19 20 from .operator import ColumnNames, Operator 21 from .stat_operator import StatOperator 22 23 24 class FillMissing(Operator): 25 """ 26 This operation replaces missing values with a constant pre-defined value 27 28 Example usage:: 29 30 # Use FillMissing to define a workflow for continuous columns and specify the fill value 31 # Default is 0 32 cont_features = ['cont1', 'cont2', 'cont3'] >> ops.FillMissing() >> ... 33 processor = nvtabular.Workflow(cont_features) 34 35 Parameters 36 ----------- 37 fill_val : float, default 0 38 The constant value to replace missing values with. 39 """ 40 41 def __init__(self, fill_val=0): 42 super().__init__() 43 self.fill_val = fill_val 44 45 @annotate("FillMissing_op", color="darkgreen", domain="nvt_python") 46 def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame: 47 return gdf[columns].fillna(self.fill_val) 48 49 transform.__doc__ = Operator.transform.__doc__ 50 51 52 class FillMedian(StatOperator): 53 """ 54 This operation replaces missing values with the median value for the column. 55 56 Example usage:: 57 58 # Initialize the workflow 59 proc = nvt.Workflow( 60 cat_names=CATEGORICAL_COLUMNS, 61 cont_names=CONTINUOUS_COLUMNS, 62 label_name=LABEL_COLUMNS 63 ) 64 65 # Add FillMedian to the workflow for continuous columns 66 proc.add_cont_feature(nvt.ops.FillMedian()) 67 """ 68 69 def __init__(self): 70 super().__init__() 71 self.medians = {} 72 73 @annotate("FillMedian_transform", color="darkgreen", domain="nvt_python") 74 def transform(self, columns: ColumnNames, gdf: cudf.DataFrame) -> cudf.DataFrame: 75 if not self.medians: 76 raise RuntimeError("need to call 'fit' before running transform") 77 78 for col in columns: 79 gdf[col] = gdf[col].fillna(self.medians[col]) 80 return gdf 81 82 @annotate("FillMedian_fit", color="green", domain="nvt_python") 83 def fit(self, columns: ColumnNames, ddf: dask_cudf.DataFrame): 84 # TODO: Use `method="tidigest"` when crick supports device 85 dask_stats = ddf[columns].quantile(q=0.5, method="dask") 86 return dask_stats 87 88 @annotate("FillMedian_finalize", color="green", domain="nvt_python") 89 def fit_finalize(self, dask_stats): 90 for col in dask_stats.index.values_host: 91 self.medians[col] = float(dask_stats[col]) 92 93 transform.__doc__ = Operator.transform.__doc__ 94 fit.__doc__ = StatOperator.fit.__doc__ 95 fit_finalize.__doc__ = StatOperator.fit_finalize.__doc__ 96 97 def clear(self): 98 self.medians = {} 99 [end of nvtabular/ops/fill.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvtabular/ops/fill.py b/nvtabular/ops/fill.py --- a/nvtabular/ops/fill.py +++ b/nvtabular/ops/fill.py @@ -36,18 +36,34 @@ ----------- fill_val : float, default 0 The constant value to replace missing values with. + add_binary_cols : boolean, default False + When True, adds binary columns that indicate whether cells in each column were filled """ - def __init__(self, fill_val=0): + def __init__(self, fill_val=0, add_binary_cols=False): super().__init__() self.fill_val = fill_val + self.add_binary_cols = add_binary_cols @annotate("FillMissing_op", color="darkgreen", domain="nvt_python") def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame: - return gdf[columns].fillna(self.fill_val) + if self.add_binary_cols: + for col in columns: + gdf[f"{col}_filled"] = gdf[col].isna() + gdf[col] = gdf[col].fillna(self.fill_val) + else: + gdf[columns] = gdf[columns].fillna(self.fill_val) + + return gdf transform.__doc__ = Operator.transform.__doc__ + def output_column_names(self, columns: ColumnNames) -> ColumnNames: + output_cols = columns[:] + if self.add_binary_cols: + output_cols.extend([f"{col}_filled" for col in columns]) + return output_cols + class FillMedian(StatOperator): """ @@ -64,10 +80,16 @@ # Add FillMedian to the workflow for continuous columns proc.add_cont_feature(nvt.ops.FillMedian()) + + Parameters + ----------- + add_binary_cols : boolean, default False + When True, adds binary columns that indicate whether cells in each column were filled """ - def __init__(self): + def __init__(self, add_binary_cols=False): super().__init__() + self.add_binary_cols = add_binary_cols self.medians = {} @annotate("FillMedian_transform", color="darkgreen", domain="nvt_python") @@ -76,6 +98,8 @@ raise RuntimeError("need to call 'fit' before running transform") for col in columns: + if self.add_binary_cols: + gdf[f"{col}_filled"] = gdf[col].isna() gdf[col] = gdf[col].fillna(self.medians[col]) return gdf @@ -96,3 +120,9 @@ def clear(self): self.medians = {} + + def output_column_names(self, columns: ColumnNames) -> ColumnNames: + output_cols = columns[:] + if self.add_binary_cols: + output_cols.extend([f"{col}_filled" for col in columns]) + return output_cols
{"golden_diff": "diff --git a/nvtabular/ops/fill.py b/nvtabular/ops/fill.py\n--- a/nvtabular/ops/fill.py\n+++ b/nvtabular/ops/fill.py\n@@ -36,18 +36,34 @@\n -----------\n fill_val : float, default 0\n The constant value to replace missing values with.\n+ add_binary_cols : boolean, default False\n+ When True, adds binary columns that indicate whether cells in each column were filled\n \"\"\"\n \n- def __init__(self, fill_val=0):\n+ def __init__(self, fill_val=0, add_binary_cols=False):\n super().__init__()\n self.fill_val = fill_val\n+ self.add_binary_cols = add_binary_cols\n \n @annotate(\"FillMissing_op\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:\n- return gdf[columns].fillna(self.fill_val)\n+ if self.add_binary_cols:\n+ for col in columns:\n+ gdf[f\"{col}_filled\"] = gdf[col].isna()\n+ gdf[col] = gdf[col].fillna(self.fill_val)\n+ else:\n+ gdf[columns] = gdf[columns].fillna(self.fill_val)\n+\n+ return gdf\n \n transform.__doc__ = Operator.transform.__doc__\n \n+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:\n+ output_cols = columns[:]\n+ if self.add_binary_cols:\n+ output_cols.extend([f\"{col}_filled\" for col in columns])\n+ return output_cols\n+\n \n class FillMedian(StatOperator):\n \"\"\"\n@@ -64,10 +80,16 @@\n \n # Add FillMedian to the workflow for continuous columns\n proc.add_cont_feature(nvt.ops.FillMedian())\n+\n+ Parameters\n+ -----------\n+ add_binary_cols : boolean, default False\n+ When True, adds binary columns that indicate whether cells in each column were filled\n \"\"\"\n \n- def __init__(self):\n+ def __init__(self, add_binary_cols=False):\n super().__init__()\n+ self.add_binary_cols = add_binary_cols\n self.medians = {}\n \n @annotate(\"FillMedian_transform\", color=\"darkgreen\", domain=\"nvt_python\")\n@@ -76,6 +98,8 @@\n raise RuntimeError(\"need to call 'fit' before running transform\")\n \n for col in columns:\n+ if self.add_binary_cols:\n+ gdf[f\"{col}_filled\"] = gdf[col].isna()\n gdf[col] = gdf[col].fillna(self.medians[col])\n return gdf\n \n@@ -96,3 +120,9 @@\n \n def clear(self):\n self.medians = {}\n+\n+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:\n+ output_cols = columns[:]\n+ if self.add_binary_cols:\n+ output_cols.extend([f\"{col}_filled\" for col in columns])\n+ return output_cols\n", "issue": "Fill Missing Op option to create a new binary column indicating the value was replaced.\nFill Missing Op should have the option to create a new binary column indicating whether the column was filled or not for continuous variables.\r\n\r\nThis is a common feature used when dealing with missing values of categoricals.\n", "before_files": [{"content": "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport cudf\nimport dask_cudf\nfrom nvtx import annotate\n\nfrom .operator import ColumnNames, Operator\nfrom .stat_operator import StatOperator\n\n\nclass FillMissing(Operator):\n \"\"\"\n This operation replaces missing values with a constant pre-defined value\n\n Example usage::\n\n # Use FillMissing to define a workflow for continuous columns and specify the fill value\n # Default is 0\n cont_features = ['cont1', 'cont2', 'cont3'] >> ops.FillMissing() >> ...\n processor = nvtabular.Workflow(cont_features)\n\n Parameters\n -----------\n fill_val : float, default 0\n The constant value to replace missing values with.\n \"\"\"\n\n def __init__(self, fill_val=0):\n super().__init__()\n self.fill_val = fill_val\n\n @annotate(\"FillMissing_op\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:\n return gdf[columns].fillna(self.fill_val)\n\n transform.__doc__ = Operator.transform.__doc__\n\n\nclass FillMedian(StatOperator):\n \"\"\"\n This operation replaces missing values with the median value for the column.\n\n Example usage::\n\n # Initialize the workflow\n proc = nvt.Workflow(\n cat_names=CATEGORICAL_COLUMNS,\n cont_names=CONTINUOUS_COLUMNS,\n label_name=LABEL_COLUMNS\n )\n\n # Add FillMedian to the workflow for continuous columns\n proc.add_cont_feature(nvt.ops.FillMedian())\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.medians = {}\n\n @annotate(\"FillMedian_transform\", color=\"darkgreen\", domain=\"nvt_python\")\n def transform(self, columns: ColumnNames, gdf: cudf.DataFrame) -> cudf.DataFrame:\n if not self.medians:\n raise RuntimeError(\"need to call 'fit' before running transform\")\n\n for col in columns:\n gdf[col] = gdf[col].fillna(self.medians[col])\n return gdf\n\n @annotate(\"FillMedian_fit\", color=\"green\", domain=\"nvt_python\")\n def fit(self, columns: ColumnNames, ddf: dask_cudf.DataFrame):\n # TODO: Use `method=\"tidigest\"` when crick supports device\n dask_stats = ddf[columns].quantile(q=0.5, method=\"dask\")\n return dask_stats\n\n @annotate(\"FillMedian_finalize\", color=\"green\", domain=\"nvt_python\")\n def fit_finalize(self, dask_stats):\n for col in dask_stats.index.values_host:\n self.medians[col] = float(dask_stats[col])\n\n transform.__doc__ = Operator.transform.__doc__\n fit.__doc__ = StatOperator.fit.__doc__\n fit_finalize.__doc__ = StatOperator.fit_finalize.__doc__\n\n def clear(self):\n self.medians = {}\n", "path": "nvtabular/ops/fill.py"}]}
1,560
683
gh_patches_debug_10704
rasdani/github-patches
git_diff
facebookresearch__fairseq-4805
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()` This is a no-op in eager and in ONNX export, but it's better for other tracers if this is preserved as shapes directly instead of converted to a tensor. There is a little annoying code duplication with `torch.jit.is_scripting()`, which is unforunately necessary because we didn't implement compile-time short circuiting correctly in TorchScript lol. </issue> <code> [start of fairseq/modules/sinusoidal_positional_embedding.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 import math 7 from typing import Any, Optional 8 9 import torch 10 import torch.onnx.operators 11 from fairseq import utils 12 from torch import Tensor, nn 13 14 15 class SinusoidalPositionalEmbedding(nn.Module): 16 """This module produces sinusoidal positional embeddings of any length. 17 18 Padding symbols are ignored. 19 """ 20 21 def __init__(self, embedding_dim, padding_idx, init_size=1024): 22 super().__init__() 23 self.embedding_dim = embedding_dim 24 self.padding_idx = padding_idx if padding_idx is not None else 0 25 self.weights = SinusoidalPositionalEmbedding.get_embedding( 26 init_size, embedding_dim, padding_idx 27 ) 28 self.onnx_trace = False 29 self.register_buffer("_float_tensor", torch.FloatTensor(1)) 30 self.max_positions = int(1e5) 31 32 def prepare_for_onnx_export_(self): 33 self.onnx_trace = True 34 35 @staticmethod 36 def get_embedding( 37 num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None 38 ): 39 """Build sinusoidal embeddings. 40 41 This matches the implementation in tensor2tensor, but differs slightly 42 from the description in Section 3.5 of "Attention Is All You Need". 43 """ 44 half_dim = embedding_dim // 2 45 emb = math.log(10000) / (half_dim - 1) 46 emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) 47 emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze( 48 1 49 ) * emb.unsqueeze(0) 50 emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view( 51 num_embeddings, -1 52 ) 53 if embedding_dim % 2 == 1: 54 # zero pad 55 emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) 56 if padding_idx is not None: 57 emb[padding_idx, :] = 0 58 return emb 59 60 def forward( 61 self, 62 input, 63 incremental_state: Optional[Any] = None, 64 timestep: Optional[Tensor] = None, 65 positions: Optional[Any] = None, 66 ): 67 """Input is expected to be of size [bsz x seqlen].""" 68 if torch.jit.is_scripting(): 69 bspair = torch.onnx.operators.shape_as_tensor(input) 70 elif torch.onnx.is_in_onnx_export(): 71 bspair = torch.onnx.operators.shape_as_tensor(input) 72 else: 73 bspair = input.size() 74 bsz, seq_len = bspair[0], bspair[1] 75 max_pos = self.padding_idx + 1 + seq_len 76 if self.weights is None or max_pos > self.weights.size(0): 77 # recompute/expand embeddings if needed 78 self.weights = SinusoidalPositionalEmbedding.get_embedding( 79 max_pos, self.embedding_dim, self.padding_idx 80 ) 81 self.weights = self.weights.to(self._float_tensor) 82 83 if incremental_state is not None: 84 # positions is the same for every token when decoding a single step 85 pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len 86 if self.onnx_trace: 87 return ( 88 self.weights.index_select(index=self.padding_idx + pos, dim=0) 89 .unsqueeze(1) 90 .repeat(bsz, 1, 1) 91 ) 92 return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) 93 94 positions = utils.make_positions( 95 input, self.padding_idx, onnx_trace=self.onnx_trace 96 ) 97 if self.onnx_trace: 98 flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) 99 embedding_shape = torch.cat( 100 (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) 101 ) 102 embeddings = torch.onnx.operators.reshape_from_tensor_shape( 103 flat_embeddings, embedding_shape 104 ) 105 return embeddings 106 return ( 107 self.weights.index_select(0, positions.view(-1)) 108 .view(bsz, seq_len, -1) 109 .detach() 110 ) 111 [end of fairseq/modules/sinusoidal_positional_embedding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py --- a/fairseq/modules/sinusoidal_positional_embedding.py +++ b/fairseq/modules/sinusoidal_positional_embedding.py @@ -97,7 +97,7 @@ if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat( - (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long)) + (bsz, seq_len, torch.tensor([-1], dtype=torch.long)) ) embeddings = torch.onnx.operators.reshape_from_tensor_shape( flat_embeddings, embedding_shape
{"golden_diff": "diff --git a/fairseq/modules/sinusoidal_positional_embedding.py b/fairseq/modules/sinusoidal_positional_embedding.py\n--- a/fairseq/modules/sinusoidal_positional_embedding.py\n+++ b/fairseq/modules/sinusoidal_positional_embedding.py\n@@ -97,7 +97,7 @@\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n- (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n+ (bsz, seq_len, torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n", "issue": "[fairseq] Guard call to `shape_as_tensor` with `is_in_onnx_export()`\nThis is a no-op in eager and in ONNX export, but it's better for other\ntracers if this is preserved as shapes directly instead of converted to\na tensor.\n\nThere is a little annoying code duplication with\n`torch.jit.is_scripting()`, which is unforunately necessary because we\ndidn't implement compile-time short circuiting correctly in TorchScript\nlol.\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Optional\n\nimport torch\nimport torch.onnx.operators\nfrom fairseq import utils\nfrom torch import Tensor, nn\n\n\nclass SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n\n Padding symbols are ignored.\n \"\"\"\n\n def __init__(self, embedding_dim, padding_idx, init_size=1024):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.padding_idx = padding_idx if padding_idx is not None else 0\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n init_size, embedding_dim, padding_idx\n )\n self.onnx_trace = False\n self.register_buffer(\"_float_tensor\", torch.FloatTensor(1))\n self.max_positions = int(1e5)\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n @staticmethod\n def get_embedding(\n num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None\n ):\n \"\"\"Build sinusoidal embeddings.\n\n This matches the implementation in tensor2tensor, but differs slightly\n from the description in Section 3.5 of \"Attention Is All You Need\".\n \"\"\"\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)\n emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(\n 1\n ) * emb.unsqueeze(0)\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(\n num_embeddings, -1\n )\n if embedding_dim % 2 == 1:\n # zero pad\n emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)\n if padding_idx is not None:\n emb[padding_idx, :] = 0\n return emb\n\n def forward(\n self,\n input,\n incremental_state: Optional[Any] = None,\n timestep: Optional[Tensor] = None,\n positions: Optional[Any] = None,\n ):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n if torch.jit.is_scripting():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n elif torch.onnx.is_in_onnx_export():\n bspair = torch.onnx.operators.shape_as_tensor(input)\n else:\n bspair = input.size()\n bsz, seq_len = bspair[0], bspair[1]\n max_pos = self.padding_idx + 1 + seq_len\n if self.weights is None or max_pos > self.weights.size(0):\n # recompute/expand embeddings if needed\n self.weights = SinusoidalPositionalEmbedding.get_embedding(\n max_pos, self.embedding_dim, self.padding_idx\n )\n self.weights = self.weights.to(self._float_tensor)\n\n if incremental_state is not None:\n # positions is the same for every token when decoding a single step\n pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len\n if self.onnx_trace:\n return (\n self.weights.index_select(index=self.padding_idx + pos, dim=0)\n .unsqueeze(1)\n .repeat(bsz, 1, 1)\n )\n return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)\n\n positions = utils.make_positions(\n input, self.padding_idx, onnx_trace=self.onnx_trace\n )\n if self.onnx_trace:\n flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))\n embedding_shape = torch.cat(\n (bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))\n )\n embeddings = torch.onnx.operators.reshape_from_tensor_shape(\n flat_embeddings, embedding_shape\n )\n return embeddings\n return (\n self.weights.index_select(0, positions.view(-1))\n .view(bsz, seq_len, -1)\n .detach()\n )\n", "path": "fairseq/modules/sinusoidal_positional_embedding.py"}]}
1,825
165
gh_patches_debug_8031
rasdani/github-patches
git_diff
ansible__ansible-22664
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> syslog_json callback doesn't cast SYSLOG_PORT value from environment as integer ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Bug Report ##### COMPONENT NAME <!--- Name of the module/plugin/task/feature --> syslog_json callback plugin ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.2.1.0 ``` ##### CONFIGURATION Specifying SYSLOG_PORT environment variable to target a non-default port: SYSLOG_PORT=1514 ##### OS / ENVIRONMENT Redhat 6 (but this should affect any UNIX platform) ##### SUMMARY The syslog_json callback plugin allows configuration of its target port by specifying the SYSLOG_PORT environment variable; however, it doesn't cast the value it obtains from get_env as int, so specifying a port number this way results in errors on each task and no syslog output: Traceback (most recent call last): File "/usr/lib64/python2.6/logging/handlers.py", line 806, in emit self.socket.sendto(msg, self.address) TypeError: an integer is required ##### STEPS TO REPRODUCE For any playbook, specify the syslog_json callback plugin and the syslog port environment variable: <!--- Paste example playbooks or commands between quotes below --> ```yaml ANSIBLE_STDOUT_CALLBACK=syslog_json SYSLOG_PORT=1514 ansible-playbook playbook.yml ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> No output to stdout, JSON output directed to syslog for each task. ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> <!--- Paste verbatim command output between quotes below --> ``` Traceback (most recent call last): File "/usr/lib64/python2.6/logging/handlers.py", line 806, in emit self.socket.sendto(msg, self.address) TypeError: an integer is required ``` </issue> <code> [start of lib/ansible/plugins/callback/syslog_json.py] 1 # Make coding more python3-ish 2 from __future__ import (absolute_import, division, print_function) 3 __metaclass__ = type 4 5 import os 6 import json 7 8 import logging 9 import logging.handlers 10 11 import socket 12 13 from ansible.plugins.callback import CallbackBase 14 15 class CallbackModule(CallbackBase): 16 """ 17 logs ansible-playbook and ansible runs to a syslog server in json format 18 make sure you have in ansible.cfg: 19 callback_plugins = <path_to_callback_plugins_folder> 20 and put the plugin in <path_to_callback_plugins_folder> 21 22 This plugin makes use of the following environment variables: 23 SYSLOG_SERVER (optional): defaults to localhost 24 SYSLOG_PORT (optional): defaults to 514 25 SYSLOG_FACILITY (optional): defaults to user 26 """ 27 CALLBACK_VERSION = 2.0 28 CALLBACK_TYPE = 'aggregate' 29 CALLBACK_NAME = 'syslog_json' 30 CALLBACK_NEEDS_WHITELIST = True 31 32 def __init__(self): 33 34 super(CallbackModule, self).__init__() 35 36 self.logger = logging.getLogger('ansible logger') 37 self.logger.setLevel(logging.DEBUG) 38 39 self.handler = logging.handlers.SysLogHandler( 40 address = (os.getenv('SYSLOG_SERVER','localhost'), 41 os.getenv('SYSLOG_PORT',514)), 42 facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER) 43 ) 44 self.logger.addHandler(self.handler) 45 self.hostname = socket.gethostname() 46 47 48 def runner_on_failed(self, host, res, ignore_errors=False): 49 self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) 50 51 def runner_on_ok(self, host, res): 52 self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) 53 54 def runner_on_skipped(self, host, item=None): 55 self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped')) 56 57 def runner_on_unreachable(self, host, res): 58 self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) 59 60 def runner_on_async_failed(self, host, res, jid): 61 self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) 62 63 def playbook_on_import_for_host(self, host, imported_file): 64 self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname,host,imported_file)) 65 66 def playbook_on_not_import_for_host(self, host, missing_file): 67 self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname,host,missing_file)) 68 [end of lib/ansible/plugins/callback/syslog_json.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py --- a/lib/ansible/plugins/callback/syslog_json.py +++ b/lib/ansible/plugins/callback/syslog_json.py @@ -38,7 +38,7 @@ self.handler = logging.handlers.SysLogHandler( address = (os.getenv('SYSLOG_SERVER','localhost'), - os.getenv('SYSLOG_PORT',514)), + int(os.getenv('SYSLOG_PORT',514))), facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER) ) self.logger.addHandler(self.handler)
{"golden_diff": "diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py\n--- a/lib/ansible/plugins/callback/syslog_json.py\n+++ b/lib/ansible/plugins/callback/syslog_json.py\n@@ -38,7 +38,7 @@\n \n self.handler = logging.handlers.SysLogHandler(\n address = (os.getenv('SYSLOG_SERVER','localhost'),\n- os.getenv('SYSLOG_PORT',514)),\n+ int(os.getenv('SYSLOG_PORT',514))),\n facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER)\n )\n self.logger.addHandler(self.handler)\n", "issue": "syslog_json callback doesn't cast SYSLOG_PORT value from environment as integer\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Name of the module/plugin/task/feature -->\r\nsyslog_json callback plugin\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\r\n```\r\nansible 2.2.1.0\r\n```\r\n\r\n##### CONFIGURATION\r\nSpecifying SYSLOG_PORT environment variable to target a non-default port:\r\nSYSLOG_PORT=1514\r\n\r\n##### OS / ENVIRONMENT\r\nRedhat 6 (but this should affect any UNIX platform)\r\n\r\n##### SUMMARY\r\nThe syslog_json callback plugin allows configuration of its target port by specifying the SYSLOG_PORT environment variable; however, it doesn't cast the value it obtains from get_env as int, so specifying a port number this way results in errors on each task and no syslog output:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python2.6/logging/handlers.py\", line 806, in emit\r\n self.socket.sendto(msg, self.address)\r\nTypeError: an integer is required\r\n\r\n##### STEPS TO REPRODUCE\r\nFor any playbook, specify the syslog_json callback plugin and the syslog port environment variable:\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\nANSIBLE_STDOUT_CALLBACK=syslog_json SYSLOG_PORT=1514 ansible-playbook playbook.yml\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nNo output to stdout, JSON output directed to syslog for each task.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python2.6/logging/handlers.py\", line 806, in emit\r\n self.socket.sendto(msg, self.address)\r\nTypeError: an integer is required\r\n```\r\n\n", "before_files": [{"content": "# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport json\n\nimport logging\nimport logging.handlers\n\nimport socket\n\nfrom ansible.plugins.callback import CallbackBase\n\nclass CallbackModule(CallbackBase):\n \"\"\"\n logs ansible-playbook and ansible runs to a syslog server in json format\n make sure you have in ansible.cfg:\n callback_plugins = <path_to_callback_plugins_folder>\n and put the plugin in <path_to_callback_plugins_folder>\n\n This plugin makes use of the following environment variables:\n SYSLOG_SERVER (optional): defaults to localhost\n SYSLOG_PORT (optional): defaults to 514\n SYSLOG_FACILITY (optional): defaults to user\n \"\"\"\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'aggregate'\n CALLBACK_NAME = 'syslog_json'\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self):\n\n super(CallbackModule, self).__init__()\n\n self.logger = logging.getLogger('ansible logger')\n self.logger.setLevel(logging.DEBUG)\n\n self.handler = logging.handlers.SysLogHandler(\n address = (os.getenv('SYSLOG_SERVER','localhost'),\n os.getenv('SYSLOG_PORT',514)),\n facility= os.getenv('SYSLOG_FACILITY',logging.handlers.SysLogHandler.LOG_USER)\n )\n self.logger.addHandler(self.handler)\n self.hostname = socket.gethostname()\n\n\n def runner_on_failed(self, host, res, ignore_errors=False):\n self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def runner_on_ok(self, host, res):\n self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def runner_on_skipped(self, host, item=None):\n self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped'))\n\n def runner_on_unreachable(self, host, res):\n self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def runner_on_async_failed(self, host, res, jid):\n self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res)))\n\n def playbook_on_import_for_host(self, host, imported_file):\n self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname,host,imported_file))\n\n def playbook_on_not_import_for_host(self, host, missing_file):\n self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname,host,missing_file))\n", "path": "lib/ansible/plugins/callback/syslog_json.py"}]}
1,792
141
gh_patches_debug_26977
rasdani/github-patches
git_diff
dotkom__onlineweb4-781
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Registration form needs help text To be a valid online user the user must register with their student emails. This is not indicated in the registration form. </issue> <code> [start of apps/authentication/forms.py] 1 # -*- coding: utf-8 -*- 2 3 import datetime 4 import re 5 6 from django import forms 7 from django.contrib import auth 8 from django.utils.translation import ugettext as _ 9 10 from apps.authentication.models import OnlineUser as User, Email 11 12 class LoginForm(forms.Form): 13 username = forms.CharField(widget=forms.TextInput(), label=_("Brukernavn"), max_length=50) 14 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Passord")) 15 user = None 16 17 def clean(self): 18 if self._errors: 19 return 20 21 user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password']) 22 23 if user: 24 if user.is_active: 25 self.user = user 26 else: 27 self._errors['username'] = self.error_class([_(u"Din konto er ikke aktiv. Forsøk gjenoppretning av passord.")]) 28 else: 29 self._errors['username'] = self.error_class([_(u"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.")]) 30 return self.cleaned_data 31 32 def login(self, request): 33 try: 34 User.objects.get(username=request.POST['username']) 35 except: 36 return False 37 if self.is_valid(): 38 auth.login(request, self.user) 39 return True 40 return False 41 42 class RegisterForm(forms.Form): 43 username = forms.CharField(label=_("Brukernavn"), max_length=20) 44 first_name = forms.CharField(label=_("Fornavn"), max_length=50) 45 last_name = forms.CharField(label=_("Etternavn"), max_length=50) 46 email = forms.EmailField(label=_("Epost"), max_length=50) 47 password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord")) 48 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Gjenta passord")) 49 address = forms.CharField(label=_("Adresse"), max_length=100, required=False) 50 zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False) 51 phone = forms.CharField(label=_("Telefon"), max_length=20, required=False) 52 53 def clean(self): 54 super(RegisterForm, self).clean() 55 if self.is_valid(): 56 cleaned_data = self.cleaned_data 57 58 # Check passwords 59 if cleaned_data['password'] != cleaned_data['repeat_password']: 60 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")]) 61 62 # Check username 63 username = cleaned_data['username'] 64 if User.objects.filter(username=username).count() > 0: 65 self._errors['username'] = self.error_class([_(u"Brukernavnet er allerede registrert.")]) 66 if not re.match("^[a-zA-Z0-9_-]+$", username): 67 self._errors['username'] = self.error_class([_(u"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _")]) 68 69 # Check email 70 email = cleaned_data['email'].lower() 71 if Email.objects.filter(email=email).count() > 0: 72 self._errors['email'] = self.error_class([_(u"Det fins allerede en bruker med denne epostadressen.")]) 73 74 # ZIP code digits only 75 zip_code = cleaned_data['zip_code'] 76 if len(zip_code) != 0: 77 if len(zip_code) != 4 or not zip_code.isdigit(): 78 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")]) 79 80 return cleaned_data 81 82 class RecoveryForm(forms.Form): 83 email = forms.EmailField(label="Email", max_length=50) 84 85 class ChangePasswordForm(forms.Form): 86 new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Nytt passord")) 87 repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u"Gjenta passord")) 88 89 def clean(self): 90 super(ChangePasswordForm, self).clean() 91 if self.is_valid(): 92 cleaned_data = self.cleaned_data 93 94 # Check passwords 95 if cleaned_data['new_password'] != cleaned_data['repeat_password']: 96 self._errors['repeat_password'] = self.error_class([_(u"Passordene er ikke like.")]) 97 98 return cleaned_data 99 100 101 class NewEmailForm(forms.Form): 102 new_email = forms.EmailField(label=_(u"ny epostadresse")) 103 [end of apps/authentication/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py --- a/apps/authentication/forms.py +++ b/apps/authentication/forms.py @@ -40,14 +40,14 @@ return False class RegisterForm(forms.Form): - username = forms.CharField(label=_("Brukernavn"), max_length=20) - first_name = forms.CharField(label=_("Fornavn"), max_length=50) + username = forms.CharField(label=_("Brukernavn"), max_length=20, help_text=u'Valgfritt brukernavn') + first_name = forms.CharField(label=_("Fornavn"), max_length=50, help_text=u'Mellomnavn inkluderer du etter fornavnet ditt') last_name = forms.CharField(label=_("Etternavn"), max_length=50) - email = forms.EmailField(label=_("Epost"), max_length=50) + email = forms.EmailField(label=_("Epost"), max_length=50, help_text=u'Du kan legge til flere epostadresser senere i din profil.') password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Passord")) repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_("Gjenta passord")) - address = forms.CharField(label=_("Adresse"), max_length=100, required=False) - zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False) + address = forms.CharField(label=_("Adresse"), max_length=100, required=False, widget=forms.Textarea(attrs={'rows':3})) + zip_code = forms.CharField(label=_("Postnummer"), max_length=4, required=False, help_text=u'Vi henter by basert på postnummer') phone = forms.CharField(label=_("Telefon"), max_length=20, required=False) def clean(self):
{"golden_diff": "diff --git a/apps/authentication/forms.py b/apps/authentication/forms.py\n--- a/apps/authentication/forms.py\n+++ b/apps/authentication/forms.py\n@@ -40,14 +40,14 @@\n return False\n \n class RegisterForm(forms.Form):\n- username = forms.CharField(label=_(\"Brukernavn\"), max_length=20)\n- first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50)\n+ username = forms.CharField(label=_(\"Brukernavn\"), max_length=20, help_text=u'Valgfritt brukernavn')\n+ first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50, help_text=u'Mellomnavn inkluderer du etter fornavnet ditt')\n last_name = forms.CharField(label=_(\"Etternavn\"), max_length=50)\n- email = forms.EmailField(label=_(\"Epost\"), max_length=50)\n+ email = forms.EmailField(label=_(\"Epost\"), max_length=50, help_text=u'Du kan legge til flere epostadresser senere i din profil.')\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Gjenta passord\"))\n- address = forms.CharField(label=_(\"Adresse\"), max_length=100, required=False)\n- zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False)\n+ address = forms.CharField(label=_(\"Adresse\"), max_length=100, required=False, widget=forms.Textarea(attrs={'rows':3}))\n+ zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False, help_text=u'Vi henter by basert p\u00e5 postnummer')\n phone = forms.CharField(label=_(\"Telefon\"), max_length=20, required=False)\n \n def clean(self):\n", "issue": "Registration form needs help text\nTo be a valid online user the user must register with their student emails. This is not indicated in the registration form.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser as User, Email\n\nclass LoginForm(forms.Form):\n username = forms.CharField(widget=forms.TextInput(), label=_(\"Brukernavn\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Passord\"))\n user = None\n\n def clean(self):\n if self._errors:\n return\n \n user = auth.authenticate(username=self.cleaned_data['username'], password=self.cleaned_data['password'])\n\n if user:\n if user.is_active:\n self.user = user\n else:\n self._errors['username'] = self.error_class([_(u\"Din konto er ikke aktiv. Fors\u00f8k gjenoppretning av passord.\")])\n else:\n self._errors['username'] = self.error_class([_(u\"Kontoen eksisterer ikke, eller kombinasjonen av brukernavn og passord er feil.\")])\n return self.cleaned_data\n\n def login(self, request):\n try:\n User.objects.get(username=request.POST['username'])\n except:\n return False\n if self.is_valid():\n auth.login(request, self.user)\n return True\n return False\n\nclass RegisterForm(forms.Form):\n username = forms.CharField(label=_(\"Brukernavn\"), max_length=20)\n first_name = forms.CharField(label=_(\"Fornavn\"), max_length=50)\n last_name = forms.CharField(label=_(\"Etternavn\"), max_length=50)\n email = forms.EmailField(label=_(\"Epost\"), max_length=50)\n password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(\"Gjenta passord\"))\n address = forms.CharField(label=_(\"Adresse\"), max_length=100, required=False)\n zip_code = forms.CharField(label=_(\"Postnummer\"), max_length=4, required=False)\n phone = forms.CharField(label=_(\"Telefon\"), max_length=20, required=False)\n \n def clean(self):\n super(RegisterForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n # Check username\n username = cleaned_data['username']\n if User.objects.filter(username=username).count() > 0:\n self._errors['username'] = self.error_class([_(u\"Brukernavnet er allerede registrert.\")])\n if not re.match(\"^[a-zA-Z0-9_-]+$\", username):\n self._errors['username'] = self.error_class([_(u\"Ditt brukernavn inneholdt ulovlige tegn. Lovlige tegn: a-Z 0-9 - _\")])\n\n # Check email\n email = cleaned_data['email'].lower()\n if Email.objects.filter(email=email).count() > 0:\n self._errors['email'] = self.error_class([_(u\"Det fins allerede en bruker med denne epostadressen.\")])\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0:\n if len(zip_code) != 4 or not zip_code.isdigit():\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data \n\nclass RecoveryForm(forms.Form):\n email = forms.EmailField(label=\"Email\", max_length=50)\n\nclass ChangePasswordForm(forms.Form):\n new_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Nytt passord\"))\n repeat_password = forms.CharField(widget=forms.PasswordInput(render_value=False), label=_(u\"Gjenta passord\"))\n\n def clean(self):\n super(ChangePasswordForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # Check passwords\n if cleaned_data['new_password'] != cleaned_data['repeat_password']:\n self._errors['repeat_password'] = self.error_class([_(u\"Passordene er ikke like.\")])\n\n return cleaned_data\n\n\nclass NewEmailForm(forms.Form):\n new_email = forms.EmailField(label=_(u\"ny epostadresse\"))\n", "path": "apps/authentication/forms.py"}]}
1,745
403