problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_14335
rasdani/github-patches
git_diff
web2py__web2py-2099
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Extend RConn to be able to connect to different Redis servers from within the same web2py application Right now it's not possible to connect to different Redis servers from within the same web2py application. Taking a look at the [code of RConn class](https://github.com/web2py/web2py/blob/f06c60b963a373f661e3bb09d5af49d2098902ec/gluon/contrib/redis_utils.py#L39), you can see that the first stablished connection made to a Redis server is linked to the current web2py application. And subsequent calls to RConn from within that web2py application will return the first created connection, no matter what the connection parameters are. This is a problem if you need to connect to different Redis servers from within the same web2py application. Notice this is also a problem if some of the connection arguments change (host, port, password, etc). I'm not shure what's the reason for returning always the first stablished connection, but I think a couple of fixes could be done in order to avoid this issues. I'll prepare a pull request with a proposal. </issue> <code> [start of gluon/contrib/redis_utils.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Developed by [email protected] 5 License MIT/BSD/GPL 6 7 Serves as base to implement Redis connection object and various utils 8 for redis_cache, redis_session and redis_scheduler in the future 9 Should-could be overriden in case redis doesn't keep up (e.g. cluster support) 10 to ensure compatibility with another - similar - library 11 """ 12 13 import logging 14 from threading import Lock 15 import time 16 from gluon import current 17 18 logger = logging.getLogger("web2py.redis_utils") 19 20 try: 21 import redis 22 from redis.exceptions import WatchError as RWatchError 23 from redis.exceptions import ConnectionError as RConnectionError 24 except ImportError: 25 logger.error("Needs redis library to work") 26 raise RuntimeError('Needs redis library to work') 27 28 29 locker = Lock() 30 31 32 def RConn(*args, **vars): 33 """ 34 Istantiates a StrictRedis connection with parameters, at the first time 35 only 36 """ 37 locker.acquire() 38 try: 39 instance_name = 'redis_conn_' + current.request.application 40 if not hasattr(RConn, instance_name): 41 setattr(RConn, instance_name, redis.StrictRedis(*args, **vars)) 42 return getattr(RConn, instance_name) 43 finally: 44 locker.release() 45 46 def acquire_lock(conn, lockname, identifier, ltime=10): 47 while True: 48 if conn.set(lockname, identifier, ex=ltime, nx=True): 49 return identifier 50 time.sleep(.01) 51 52 53 _LUA_RELEASE_LOCK = """ 54 if redis.call("get", KEYS[1]) == ARGV[1] 55 then 56 return redis.call("del", KEYS[1]) 57 else 58 return 0 59 end 60 """ 61 62 63 def release_lock(instance, lockname, identifier): 64 return instance._release_script( 65 keys=[lockname], args=[identifier]) 66 67 68 def register_release_lock(conn): 69 rtn = conn.register_script(_LUA_RELEASE_LOCK) 70 return rtn 71 [end of gluon/contrib/redis_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gluon/contrib/redis_utils.py b/gluon/contrib/redis_utils.py --- a/gluon/contrib/redis_utils.py +++ b/gluon/contrib/redis_utils.py @@ -29,14 +29,16 @@ locker = Lock() -def RConn(*args, **vars): +def RConn(application=None, *args, **vars): """ Istantiates a StrictRedis connection with parameters, at the first time only """ locker.acquire() try: - instance_name = 'redis_conn_' + current.request.application + if application is None: + application = current.request.application + instance_name = 'redis_conn_' + application if not hasattr(RConn, instance_name): setattr(RConn, instance_name, redis.StrictRedis(*args, **vars)) return getattr(RConn, instance_name)
{"golden_diff": "diff --git a/gluon/contrib/redis_utils.py b/gluon/contrib/redis_utils.py\n--- a/gluon/contrib/redis_utils.py\n+++ b/gluon/contrib/redis_utils.py\n@@ -29,14 +29,16 @@\n locker = Lock()\n \n \n-def RConn(*args, **vars):\n+def RConn(application=None, *args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n- instance_name = 'redis_conn_' + current.request.application\n+ if application is None:\n+ application = current.request.application\n+ instance_name = 'redis_conn_' + application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n", "issue": "Extend RConn to be able to connect to different Redis servers from within the same web2py application\nRight now it's not possible to connect to different Redis servers from within the same web2py application. Taking a look at the [code of RConn class](https://github.com/web2py/web2py/blob/f06c60b963a373f661e3bb09d5af49d2098902ec/gluon/contrib/redis_utils.py#L39), you can see that the first stablished connection made to a Redis server is linked to the current web2py application. And subsequent calls to RConn from within that web2py application will return the first created connection, no matter what the connection parameters are.\r\n\r\nThis is a problem if you need to connect to different Redis servers from within the same web2py application. Notice this is also a problem if some of the connection arguments change (host, port, password, etc). \r\n\r\nI'm not shure what's the reason for returning always the first stablished connection, but I think a couple of fixes could be done in order to avoid this issues. I'll prepare a pull request with a proposal. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nServes as base to implement Redis connection object and various utils\nfor redis_cache, redis_session and redis_scheduler in the future\nShould-could be overriden in case redis doesn't keep up (e.g. cluster support)\nto ensure compatibility with another - similar - library\n\"\"\"\n\nimport logging\nfrom threading import Lock\nimport time\nfrom gluon import current\n\nlogger = logging.getLogger(\"web2py.redis_utils\")\n\ntry:\n import redis\n from redis.exceptions import WatchError as RWatchError\n from redis.exceptions import ConnectionError as RConnectionError\nexcept ImportError:\n logger.error(\"Needs redis library to work\")\n raise RuntimeError('Needs redis library to work')\n\n\nlocker = Lock()\n\n\ndef RConn(*args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n instance_name = 'redis_conn_' + current.request.application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n finally:\n locker.release()\n\ndef acquire_lock(conn, lockname, identifier, ltime=10):\n while True:\n if conn.set(lockname, identifier, ex=ltime, nx=True):\n return identifier\n time.sleep(.01)\n\n\n_LUA_RELEASE_LOCK = \"\"\"\nif redis.call(\"get\", KEYS[1]) == ARGV[1]\nthen\n return redis.call(\"del\", KEYS[1])\nelse\n return 0\nend\n\"\"\"\n\n\ndef release_lock(instance, lockname, identifier):\n return instance._release_script(\n keys=[lockname], args=[identifier])\n\n\ndef register_release_lock(conn):\n rtn = conn.register_script(_LUA_RELEASE_LOCK)\n return rtn\n", "path": "gluon/contrib/redis_utils.py"}]}
1,365
197
gh_patches_debug_11402
rasdani/github-patches
git_diff
xorbitsai__inference-777
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> XINFERENCE_HOME环境变量问题 hi , 我这边设置了XINFERENCE_HOME环境变量,但是去指定的目录下看到里面的模型都是软连接,这是什么原因,谢谢! ![image](https://github.com/xorbitsai/inference/assets/9452272/2dade7e0-b1b7-45e1-b1fa-a0ca52ef18e4) </issue> <code> [start of xinference/constants.py] 1 # Copyright 2022-2023 XProbe Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 from pathlib import Path 17 18 XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT" 19 XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC" 20 XINFERENCE_ENV_HOME_PATH = "XINFERENCE_HOME" 21 XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = "XINFERENCE_HEALTH_CHECK_ATTEMPTS" 22 XINFERENCE_ENV_HEALTH_CHECK_INTERVAL = "XINFERENCE_HEALTH_CHECK_INTERVAL" 23 XINFERENCE_ENV_DISABLE_VLLM = "XINFERENCE_DISABLE_VLLM" 24 25 26 def get_xinference_home(): 27 return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference")) 28 29 30 XINFERENCE_HOME = get_xinference_home() 31 XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache") 32 XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model") 33 XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs") 34 XINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, "image") 35 36 XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1" 37 XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0" 38 XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997 39 XINFERENCE_DEFAULT_LOG_FILE_NAME = "xinference.log" 40 XINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024 41 XINFERENCE_LOG_BACKUP_COUNT = 30 42 XINFERENCE_HEALTH_CHECK_ATTEMPTS = int( 43 os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3) 44 ) 45 XINFERENCE_HEALTH_CHECK_INTERVAL = int( 46 os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3) 47 ) 48 XINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0))) 49 [end of xinference/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xinference/constants.py b/xinference/constants.py --- a/xinference/constants.py +++ b/xinference/constants.py @@ -23,8 +23,15 @@ XINFERENCE_ENV_DISABLE_VLLM = "XINFERENCE_DISABLE_VLLM" -def get_xinference_home(): - return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference")) +def get_xinference_home() -> str: + home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH) + if home_path is None: + home_path = str(Path.home() / ".xinference") + else: + # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path + os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.join(home_path, "huggingface") + os.environ["MODELSCOPE_CACHE"] = os.path.join(home_path, "modelscope") + return home_path XINFERENCE_HOME = get_xinference_home()
{"golden_diff": "diff --git a/xinference/constants.py b/xinference/constants.py\n--- a/xinference/constants.py\n+++ b/xinference/constants.py\n@@ -23,8 +23,15 @@\n XINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n \n \n-def get_xinference_home():\n- return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n+def get_xinference_home() -> str:\n+ home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH)\n+ if home_path is None:\n+ home_path = str(Path.home() / \".xinference\")\n+ else:\n+ # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path\n+ os.environ[\"HUGGINGFACE_HUB_CACHE\"] = os.path.join(home_path, \"huggingface\")\n+ os.environ[\"MODELSCOPE_CACHE\"] = os.path.join(home_path, \"modelscope\")\n+ return home_path\n \n \n XINFERENCE_HOME = get_xinference_home()\n", "issue": "XINFERENCE_HOME\u73af\u5883\u53d8\u91cf\u95ee\u9898\nhi , \u6211\u8fd9\u8fb9\u8bbe\u7f6e\u4e86XINFERENCE_HOME\u73af\u5883\u53d8\u91cf\uff0c\u4f46\u662f\u53bb\u6307\u5b9a\u7684\u76ee\u5f55\u4e0b\u770b\u5230\u91cc\u9762\u7684\u6a21\u578b\u90fd\u662f\u8f6f\u8fde\u63a5\uff0c\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\uff0c\u8c22\u8c22!\r\n\r\n![image](https://github.com/xorbitsai/inference/assets/9452272/2dade7e0-b1b7-45e1-b1fa-a0ca52ef18e4)\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\nXINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\nXINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = \"XINFERENCE_HEALTH_CHECK_ATTEMPTS\"\nXINFERENCE_ENV_HEALTH_CHECK_INTERVAL = \"XINFERENCE_HEALTH_CHECK_INTERVAL\"\nXINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n\n\ndef get_xinference_home():\n return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n\n\nXINFERENCE_HOME = get_xinference_home()\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\nXINFERENCE_DEFAULT_LOG_FILE_NAME = \"xinference.log\"\nXINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024\nXINFERENCE_LOG_BACKUP_COUNT = 30\nXINFERENCE_HEALTH_CHECK_ATTEMPTS = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3)\n)\nXINFERENCE_HEALTH_CHECK_INTERVAL = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3)\n)\nXINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0)))\n", "path": "xinference/constants.py"}]}
1,245
237
gh_patches_debug_22760
rasdani/github-patches
git_diff
carpentries__amy-1065
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bulk import workflow encounters IntegrityError when saving an organization Currently, we allow organizations with the domain that contains the `www` subdomain. For eg: Google can exist as `www.google.com` as well as `google.com`, leading to `IntegrityError` while saving the first while the second exists. Shouldn't we enforce one URL pattern and trim/add `www` to the `domain` field when saving an organization? Testcase: ``` py In [5]: Organization.objects.create(fullname='Google', domain='google.com') Out[5]: <Organization: google.com> In [6]: Organization.objects.create(fullname='Google', domain='www.google.com') --------------------------------------------------------------------------- IntegrityError Traceback (most recent call last) ``` </issue> <code> [start of pydata/api.py] 1 from functools import lru_cache 2 from json import JSONDecodeError 3 from urllib.parse import urljoin, urlparse 4 5 import requests 6 from django.conf import settings 7 8 from workshops.models import ( 9 Person, 10 Role, 11 Organization, 12 Sponsorship, 13 Task, 14 ) 15 from workshops.util import create_username 16 17 18 class BaseAPIClient(requests.Session): 19 """ 20 An API client that abstracts away the work of dealing with URLs. 21 Usage: 22 > client = APIClient(event) 23 > list(client) -> returns a list of all objects returned by the API. 24 > client[23] -> returns the object with pk=23 25 """ 26 ROOT_ENDPOINT = 'api/' 27 28 @lru_cache(maxsize=None) 29 def __new__(cls, event): 30 """ 31 Returns an instance of APIClient. 32 Throws NotImplementedError if an API does not exist at the root URL. 33 """ 34 try: 35 r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT)) 36 r.raise_for_status() 37 r.json() 38 except (requests.exceptions.HTTPError, JSONDecodeError): 39 raise NotImplementedError('Conference site does not support an API') 40 return super().__new__(cls) 41 42 def __init__(self, event): 43 '''Populate API endpoint and set up basic authentication''' 44 super().__init__() 45 self.event = event 46 self.endpoint = urljoin(event.url, self.ENDPOINT) 47 self.auth = ( 48 settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET) 49 50 def __iter__(self): 51 try: 52 r = self.get(self.endpoint) 53 r.raise_for_status() 54 pydata_objs = r.json() 55 except (requests.exceptions.HTTPError, JSONDecodeError) as e: 56 raise IOError('Cannot fetch instances from API: {}'.format(str(e))) 57 for obj in pydata_objs: 58 yield self.parse(obj) 59 60 def __contains__(self, pk): 61 try: 62 self.get(self.endpoint + str(pk)).raise_for_status() 63 except requests.exceptions.HTTPError: 64 return False 65 else: 66 return True 67 68 def __getitem__(self, pk): 69 if pk not in self: 70 raise KeyError( 71 '{} does not exist'.format(self.model._meta.verbose_name) 72 ) 73 obj = self.get(self.endpoint + str(pk)).json() 74 return self.parse(obj) 75 76 77 class PersonAPIClient(BaseAPIClient): 78 ENDPOINT = 'api/speaker/' 79 model = Person 80 81 def parse(self, speaker): 82 speaker['name'] = speaker['name'].strip() 83 personal = speaker['name'].rsplit(' ', 1)[0] 84 family = speaker['name'].rsplit(' ', 1)[-1] 85 return Person( 86 username=speaker['username'], 87 personal=personal, 88 family=family, 89 email=speaker['email'], 90 url=speaker['absolute_url'], 91 ) 92 93 94 class TaskAPIClient(BaseAPIClient): 95 ENDPOINT = 'api/presentation/' 96 model = Task 97 98 def parse(self, presentation): 99 return Task( 100 event=self.event, 101 person=Person.objects.get_or_create( 102 email=presentation['speaker']['email'], 103 defaults={ 104 'username': create_username('', presentation['speaker']['username']), 105 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0], 106 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1], 107 'url': presentation['speaker']['absolute_url'], 108 } 109 )[0], 110 role=Role.objects.get(name='presenter'), 111 title=presentation['title'], 112 url=presentation['absolute_url'], 113 ) 114 115 116 class SponsorshipAPIClient(BaseAPIClient): 117 ENDPOINT = 'api/sponsor/' 118 model = Sponsorship 119 120 def parse(self, sponsor): 121 return Sponsorship( 122 organization=Organization.objects.get_or_create( 123 domain=urlparse(sponsor['external_url']).netloc, 124 defaults={ 125 'fullname': sponsor['name'], 126 'notes': sponsor['annotation'], 127 }, 128 )[0], 129 event=self.event, 130 amount=sponsor['level']['cost'], 131 contact=Person.objects.get_or_create( 132 email=sponsor['contact_email'], 133 defaults={ 134 'username': create_username('', sponsor['contact_name']), 135 'personal': sponsor['contact_name'].rsplit(' ', 1)[0], 136 'family': sponsor['contact_name'].rsplit(' ', 1)[-1], 137 }, 138 )[0], 139 ) 140 [end of pydata/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pydata/api.py b/pydata/api.py --- a/pydata/api.py +++ b/pydata/api.py @@ -4,6 +4,7 @@ import requests from django.conf import settings +from django.db.models import Q from workshops.models import ( Person, @@ -118,14 +119,18 @@ model = Sponsorship def parse(self, sponsor): + domain = urlparse(sponsor['external_url']).netloc + organization = Organization.objects.filter( + Q(fullname=sponsor['name']) | Q(domain=domain) + ).first() + if not organization: + organization = Organization.objects.create( + fullname=sponsor['name'], + domain=domain, + notes=sponsor['annotation'], + ) return Sponsorship( - organization=Organization.objects.get_or_create( - domain=urlparse(sponsor['external_url']).netloc, - defaults={ - 'fullname': sponsor['name'], - 'notes': sponsor['annotation'], - }, - )[0], + organization=organization, event=self.event, amount=sponsor['level']['cost'], contact=Person.objects.get_or_create(
{"golden_diff": "diff --git a/pydata/api.py b/pydata/api.py\n--- a/pydata/api.py\n+++ b/pydata/api.py\n@@ -4,6 +4,7 @@\n \n import requests\n from django.conf import settings\n+from django.db.models import Q\n \n from workshops.models import (\n Person,\n@@ -118,14 +119,18 @@\n model = Sponsorship\n \n def parse(self, sponsor):\n+ domain = urlparse(sponsor['external_url']).netloc\n+ organization = Organization.objects.filter(\n+ Q(fullname=sponsor['name']) | Q(domain=domain)\n+ ).first()\n+ if not organization:\n+ organization = Organization.objects.create(\n+ fullname=sponsor['name'],\n+ domain=domain,\n+ notes=sponsor['annotation'],\n+ )\n return Sponsorship(\n- organization=Organization.objects.get_or_create(\n- domain=urlparse(sponsor['external_url']).netloc,\n- defaults={\n- 'fullname': sponsor['name'],\n- 'notes': sponsor['annotation'],\n- },\n- )[0],\n+ organization=organization,\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n", "issue": "Bulk import workflow encounters IntegrityError when saving an organization\nCurrently, we allow organizations with the domain that contains the `www` subdomain. For eg: Google can exist as `www.google.com` as well as `google.com`, leading to `IntegrityError` while saving the first while the second exists.\n\nShouldn't we enforce one URL pattern and trim/add `www` to the `domain` field when saving an organization?\n\nTestcase:\n\n``` py\nIn [5]: Organization.objects.create(fullname='Google', domain='google.com')\nOut[5]: <Organization: google.com>\n\nIn [6]: Organization.objects.create(fullname='Google', domain='www.google.com')\n---------------------------------------------------------------------------\nIntegrityError Traceback (most recent call last)\n```\n\n", "before_files": [{"content": "from functools import lru_cache\nfrom json import JSONDecodeError\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom django.conf import settings\n\nfrom workshops.models import (\n Person,\n Role,\n Organization,\n Sponsorship,\n Task,\n)\nfrom workshops.util import create_username\n\n\nclass BaseAPIClient(requests.Session):\n \"\"\"\n An API client that abstracts away the work of dealing with URLs.\n Usage:\n > client = APIClient(event)\n > list(client) -> returns a list of all objects returned by the API.\n > client[23] -> returns the object with pk=23\n \"\"\"\n ROOT_ENDPOINT = 'api/'\n\n @lru_cache(maxsize=None)\n def __new__(cls, event):\n \"\"\"\n Returns an instance of APIClient.\n Throws NotImplementedError if an API does not exist at the root URL.\n \"\"\"\n try:\n r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))\n r.raise_for_status()\n r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n raise NotImplementedError('Conference site does not support an API')\n return super().__new__(cls)\n\n def __init__(self, event):\n '''Populate API endpoint and set up basic authentication'''\n super().__init__()\n self.event = event\n self.endpoint = urljoin(event.url, self.ENDPOINT)\n self.auth = (\n settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)\n\n def __iter__(self):\n try:\n r = self.get(self.endpoint)\n r.raise_for_status()\n pydata_objs = r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError) as e:\n raise IOError('Cannot fetch instances from API: {}'.format(str(e)))\n for obj in pydata_objs:\n yield self.parse(obj)\n\n def __contains__(self, pk):\n try:\n self.get(self.endpoint + str(pk)).raise_for_status()\n except requests.exceptions.HTTPError:\n return False\n else:\n return True\n\n def __getitem__(self, pk):\n if pk not in self:\n raise KeyError(\n '{} does not exist'.format(self.model._meta.verbose_name)\n )\n obj = self.get(self.endpoint + str(pk)).json()\n return self.parse(obj)\n\n\nclass PersonAPIClient(BaseAPIClient):\n ENDPOINT = 'api/speaker/'\n model = Person\n\n def parse(self, speaker):\n speaker['name'] = speaker['name'].strip()\n personal = speaker['name'].rsplit(' ', 1)[0]\n family = speaker['name'].rsplit(' ', 1)[-1]\n return Person(\n username=speaker['username'],\n personal=personal,\n family=family,\n email=speaker['email'],\n url=speaker['absolute_url'],\n )\n\n\nclass TaskAPIClient(BaseAPIClient):\n ENDPOINT = 'api/presentation/'\n model = Task\n\n def parse(self, presentation):\n return Task(\n event=self.event,\n person=Person.objects.get_or_create(\n email=presentation['speaker']['email'],\n defaults={\n 'username': create_username('', presentation['speaker']['username']),\n 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],\n 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],\n 'url': presentation['speaker']['absolute_url'],\n }\n )[0],\n role=Role.objects.get(name='presenter'),\n title=presentation['title'],\n url=presentation['absolute_url'],\n )\n\n\nclass SponsorshipAPIClient(BaseAPIClient):\n ENDPOINT = 'api/sponsor/'\n model = Sponsorship\n\n def parse(self, sponsor):\n return Sponsorship(\n organization=Organization.objects.get_or_create(\n domain=urlparse(sponsor['external_url']).netloc,\n defaults={\n 'fullname': sponsor['name'],\n 'notes': sponsor['annotation'],\n },\n )[0],\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n email=sponsor['contact_email'],\n defaults={\n 'username': create_username('', sponsor['contact_name']),\n 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],\n 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],\n },\n )[0],\n )\n", "path": "pydata/api.py"}]}
1,941
264
gh_patches_debug_23049
rasdani/github-patches
git_diff
StackStorm__st2-5775
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add query type to linux.dig action ## SUMMARY I would like the ability to query TXT records and noticed there is no way to specify a query type to the dig action. ### STACKSTORM VERSION `st2 3.6.0, on Python 3.6.8` ## Steps to reproduce the problem I attempted a few ways to add "TXT" to the query by adding to queryopts or try appending to the string hostname. Upon looking at the code I realized nothing like that would work. ## Expected Results Get a list returned of TXT records ## Some sample code to add it ``` class DigAction(Action): def run(self, rand, count, nameserver, hostname, queryopts, querytype): # Add querytype parameter opt_list = [] output = [] cmd_args = ["dig"] if nameserver: nameserver = "@" + nameserver cmd_args.append(nameserver) if isinstance(queryopts, str) and "," in queryopts: opt_list = queryopts.split(",") else: opt_list.append(queryopts) cmd_args.extend(["+" + option for option in opt_list]) cmd_args.append(hostname) cmd_args.append(querytype) # append query type (Default is set to "A" in dig.yaml) try: raw_result = subprocess.Popen( cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE ).communicate()[0] if sys.version_info >= (3,): # This function might call getpreferred encoding unless we pass # do_setlocale=False. encoding = locale.getpreferredencoding(do_setlocale=False) result_list_str = raw_result.decode(encoding) else: result_list_str = str(raw_result) if querytype.lower() == "txt": # improve the output formatting result of TXT records result_list_str = result_list_str.replace('"', '') # strip quotes so we don't see \" wrapped around output result_list = list(filter(None, result_list_str.split("\n"))) ``` I only spent a few minutes on this code to test making it work for me. It could be improved on to make sure works for other types as well. I added inline comments to show the only lines I added </issue> <code> [start of contrib/linux/actions/dig.py] 1 #! /usr/bin/python 2 3 # Copyright 2020 The StackStorm Authors. 4 # Copyright 2019 Extreme Networks, Inc. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 import errno 19 import locale 20 import subprocess 21 import random 22 import sys 23 24 from st2common.runners.base_action import Action 25 26 27 class DigAction(Action): 28 def run(self, rand, count, nameserver, hostname, queryopts): 29 opt_list = [] 30 output = [] 31 32 cmd_args = ["dig"] 33 if nameserver: 34 nameserver = "@" + nameserver 35 cmd_args.append(nameserver) 36 37 if isinstance(queryopts, str) and "," in queryopts: 38 opt_list = queryopts.split(",") 39 else: 40 opt_list.append(queryopts) 41 42 cmd_args.extend(["+" + option for option in opt_list]) 43 44 cmd_args.append(hostname) 45 46 try: 47 raw_result = subprocess.Popen( 48 cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE 49 ).communicate()[0] 50 51 if sys.version_info >= (3,): 52 # This function might call getpreferred encoding unless we pass 53 # do_setlocale=False. 54 encoding = locale.getpreferredencoding(do_setlocale=False) 55 result_list_str = raw_result.decode(encoding) 56 else: 57 result_list_str = str(raw_result) 58 59 result_list = list(filter(None, result_list_str.split("\n"))) 60 61 # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat 62 # for Python3: 63 # except FileNotFoundError as e: 64 except OSError as e: 65 if e.errno == errno.ENOENT: 66 return ( 67 False, 68 "Can't find dig installed in the path (usually /usr/bin/dig). If " 69 "dig isn't installed, you can install it with 'sudo yum install " 70 "bind-utils' or 'sudo apt install dnsutils'", 71 ) 72 else: 73 raise e 74 75 if int(count) > len(result_list) or count <= 0: 76 count = len(result_list) 77 78 output = result_list[0:count] 79 if rand is True: 80 random.shuffle(output) 81 return output 82 [end of contrib/linux/actions/dig.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/contrib/linux/actions/dig.py b/contrib/linux/actions/dig.py --- a/contrib/linux/actions/dig.py +++ b/contrib/linux/actions/dig.py @@ -25,7 +25,7 @@ class DigAction(Action): - def run(self, rand, count, nameserver, hostname, queryopts): + def run(self, rand, count, nameserver, hostname, queryopts, querytype): opt_list = [] output = [] @@ -42,6 +42,7 @@ cmd_args.extend(["+" + option for option in opt_list]) cmd_args.append(hostname) + cmd_args.append(querytype) try: raw_result = subprocess.Popen( @@ -56,6 +57,10 @@ else: result_list_str = str(raw_result) + # Better format the output when the type is TXT + if querytype.lower() == "txt": + result_list_str = result_list_str.replace('"', "") + result_list = list(filter(None, result_list_str.split("\n"))) # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat
{"golden_diff": "diff --git a/contrib/linux/actions/dig.py b/contrib/linux/actions/dig.py\n--- a/contrib/linux/actions/dig.py\n+++ b/contrib/linux/actions/dig.py\n@@ -25,7 +25,7 @@\n \n \n class DigAction(Action):\n- def run(self, rand, count, nameserver, hostname, queryopts):\n+ def run(self, rand, count, nameserver, hostname, queryopts, querytype):\n opt_list = []\n output = []\n \n@@ -42,6 +42,7 @@\n cmd_args.extend([\"+\" + option for option in opt_list])\n \n cmd_args.append(hostname)\n+ cmd_args.append(querytype)\n \n try:\n raw_result = subprocess.Popen(\n@@ -56,6 +57,10 @@\n else:\n result_list_str = str(raw_result)\n \n+ # Better format the output when the type is TXT\n+ if querytype.lower() == \"txt\":\n+ result_list_str = result_list_str.replace('\"', \"\")\n+\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n \n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n", "issue": "Add query type to linux.dig action\n## SUMMARY\r\n\r\nI would like the ability to query TXT records and noticed there is no way to specify a query type to the dig action. \r\n\r\n### STACKSTORM VERSION\r\n\r\n`st2 3.6.0, on Python 3.6.8`\r\n\r\n## Steps to reproduce the problem\r\n\r\nI attempted a few ways to add \"TXT\" to the query by adding to queryopts or try appending to the string hostname. Upon looking at the code I realized nothing like that would work.\r\n\r\n## Expected Results\r\n\r\nGet a list returned of TXT records\r\n\r\n## Some sample code to add it\r\n\r\n```\r\nclass DigAction(Action):\r\n def run(self, rand, count, nameserver, hostname, queryopts, querytype): # Add querytype parameter\r\n opt_list = []\r\n output = []\r\n\r\n cmd_args = [\"dig\"]\r\n if nameserver:\r\n nameserver = \"@\" + nameserver\r\n cmd_args.append(nameserver)\r\n\r\n if isinstance(queryopts, str) and \",\" in queryopts:\r\n opt_list = queryopts.split(\",\")\r\n else:\r\n opt_list.append(queryopts)\r\n\r\n cmd_args.extend([\"+\" + option for option in opt_list])\r\n\r\n cmd_args.append(hostname)\r\n cmd_args.append(querytype) # append query type (Default is set to \"A\" in dig.yaml)\r\n\r\n try:\r\n raw_result = subprocess.Popen(\r\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\r\n ).communicate()[0]\r\n\r\n if sys.version_info >= (3,):\r\n # This function might call getpreferred encoding unless we pass\r\n # do_setlocale=False.\r\n encoding = locale.getpreferredencoding(do_setlocale=False)\r\n result_list_str = raw_result.decode(encoding)\r\n else:\r\n result_list_str = str(raw_result)\r\n\r\n if querytype.lower() == \"txt\": # improve the output formatting result of TXT records\r\n result_list_str = result_list_str.replace('\"', '') # strip quotes so we don't see \\\" wrapped around output\r\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\r\n```\r\n\r\nI only spent a few minutes on this code to test making it work for me. It could be improved on to make sure works for other types as well. I added inline comments to show the only lines I added\n", "before_files": [{"content": "#! /usr/bin/python\n\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport locale\nimport subprocess\nimport random\nimport sys\n\nfrom st2common.runners.base_action import Action\n\n\nclass DigAction(Action):\n def run(self, rand, count, nameserver, hostname, queryopts):\n opt_list = []\n output = []\n\n cmd_args = [\"dig\"]\n if nameserver:\n nameserver = \"@\" + nameserver\n cmd_args.append(nameserver)\n\n if isinstance(queryopts, str) and \",\" in queryopts:\n opt_list = queryopts.split(\",\")\n else:\n opt_list.append(queryopts)\n\n cmd_args.extend([\"+\" + option for option in opt_list])\n\n cmd_args.append(hostname)\n\n try:\n raw_result = subprocess.Popen(\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\n ).communicate()[0]\n\n if sys.version_info >= (3,):\n # This function might call getpreferred encoding unless we pass\n # do_setlocale=False.\n encoding = locale.getpreferredencoding(do_setlocale=False)\n result_list_str = raw_result.decode(encoding)\n else:\n result_list_str = str(raw_result)\n\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n\n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n # for Python3:\n # except FileNotFoundError as e:\n except OSError as e:\n if e.errno == errno.ENOENT:\n return (\n False,\n \"Can't find dig installed in the path (usually /usr/bin/dig). If \"\n \"dig isn't installed, you can install it with 'sudo yum install \"\n \"bind-utils' or 'sudo apt install dnsutils'\",\n )\n else:\n raise e\n\n if int(count) > len(result_list) or count <= 0:\n count = len(result_list)\n\n output = result_list[0:count]\n if rand is True:\n random.shuffle(output)\n return output\n", "path": "contrib/linux/actions/dig.py"}]}
1,746
262
gh_patches_debug_12944
rasdani/github-patches
git_diff
Nitrate__Nitrate-438
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop Django 1.11 AC: - Remove from `tox.ini` - Remove from `.travis.yml` - Update Django verison range in `setup.py` </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 3 from setuptools import setup, find_packages 4 5 6 with open('VERSION.txt', 'r') as f: 7 pkg_version = f.read().strip() 8 9 10 def get_long_description(): 11 with open('README.rst', 'r') as f: 12 return f.read() 13 14 15 install_requires = [ 16 'beautifulsoup4 >= 4.1.1', 17 'django >= 1.11,<3.0', 18 'django-contrib-comments == 1.8.0', 19 'django-tinymce == 2.7.0', 20 'django-uuslug == 1.1.8', 21 'html2text', 22 'odfpy >= 0.9.6', 23 'python-bugzilla', 24 'xmltodict', 25 'kobo == 0.9.0' 26 ] 27 28 extras_require = { 29 'mysql': ['mysqlclient >= 1.2.3'], 30 'pgsql': ['psycopg2 == 2.7.5'], 31 32 # Required for tcms.auth.backends.KerberosBackend 33 'krbauth': [ 34 'kerberos == 1.2.5' 35 ], 36 37 # Packages for building documentation 38 'docs': [ 39 'Sphinx >= 1.1.2', 40 'sphinx_rtd_theme', 41 ], 42 43 # Necessary packages for running tests 44 'tests': [ 45 'beautifulsoup4', 46 'coverage', 47 'factory_boy', 48 'flake8', 49 'mock', 50 'pytest < 4.2.0', 51 'pytest-cov', 52 'pytest-django', 53 ], 54 55 # Contain tools that assists the development 56 'devtools': [ 57 'django-debug-toolbar == 1.7', 58 'tox', 59 'django-extensions', 60 'pygraphviz', 61 'future-breakpoint', 62 ], 63 64 # Required packages required to run async tasks 65 'async': [ 66 'celery == 4.2.0', 67 ], 68 69 'multiauth': [ 70 'social-auth-app-django == 3.1.0', 71 ] 72 } 73 74 setup( 75 name='Nitrate', 76 version=pkg_version, 77 description='Test Case Management System', 78 long_description=get_long_description(), 79 author='Nitrate Team', 80 maintainer='Chenxiong Qi', 81 maintainer_email='[email protected]', 82 url='https://github.com/Nitrate/Nitrate/', 83 license='GPLv2+', 84 keywords='test case', 85 install_requires=install_requires, 86 extras_require=extras_require, 87 python_requires='>=3.6', 88 package_dir={'': 'src'}, 89 packages=find_packages('src', exclude=['test*']), 90 include_package_data=True, 91 zip_safe=False, 92 classifiers=[ 93 'Framework :: Django', 94 'Framework :: Django :: 1.11', 95 'Framework :: Django :: 2.0', 96 'Framework :: Django :: 2.1', 97 'Intended Audience :: Developers', 98 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 99 'Programming Language :: Python :: 3', 100 'Programming Language :: Python :: 3.6', 101 'Programming Language :: Python :: 3.7', 102 'Programming Language :: Python :: 3 :: Only', 103 'Topic :: Software Development :: Quality Assurance', 104 'Topic :: Software Development :: Testing', 105 ], 106 project_urls={ 107 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues', 108 'Source Code': 'https://github.com/Nitrate/Nitrate', 109 'Documentation': 'https://nitrate.readthedocs.io/', 110 }, 111 ) 112 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ install_requires = [ 'beautifulsoup4 >= 4.1.1', - 'django >= 1.11,<3.0', + 'django >= 2.0,<3.0', 'django-contrib-comments == 1.8.0', 'django-tinymce == 2.7.0', 'django-uuslug == 1.1.8', @@ -91,7 +91,6 @@ zip_safe=False, classifiers=[ 'Framework :: Django', - 'Framework :: Django :: 1.11', 'Framework :: Django :: 2.0', 'Framework :: Django :: 2.1', 'Intended Audience :: Developers',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \n install_requires = [\n 'beautifulsoup4 >= 4.1.1',\n- 'django >= 1.11,<3.0',\n+ 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n@@ -91,7 +91,6 @@\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n- 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n", "issue": "Drop Django 1.11\nAC:\r\n\r\n- Remove from `tox.ini`\r\n- Remove from `.travis.yml`\r\n- Update Django verison range in `setup.py`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]}
1,595
190
gh_patches_debug_5982
rasdani/github-patches
git_diff
mdn__kuma-6250
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Macro search results are mangled for non-en-US locales See for example https://wiki.developer.mozilla.org/en-US/search?locale=*&kumascript_macros=WebExtAllExamples&topic=none This lists all pages that call WebExtAllExamples, across all locales. One entry looks like: <img width="893" alt="Screen Shot 2019-11-21 at 4 30 25 PM" src="https://user-images.githubusercontent.com/432915/69387936-3e5d4780-0c7c-11ea-9347-5916d638d12d.png"> This is the German translation of the https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Examples page. But the first link, "**Beispiele für Erweiterungen**", has the en-US locale in the URL, like this: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - note the translated slug but the en-US locale. If I click it, I get "Create a new page", because that page doesn't exist. After the short description, the entry is supposed to have "`${url} Score: 82.20941 translated from ${original}`, where `url` is the localized page, and `original` is the en-US version. But these are wrong too: * `url`: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - nonexistent page with en-US locale but de slug * `original`: https://developer.mozilla.org/de/docs/Mozilla/Add-ons/WebExtensions/Beispiele - the proper value for `url` I've seen some cases where the "`${url} Score: 82.20941 translated from ${original}` bit doesn't appear, and then there is no usable link to the actual page, and I have to guess what the locale is, to be able to fix the link. </issue> <code> [start of kuma/search/fields.py] 1 from django.conf import settings 2 from rest_framework import serializers 3 4 from kuma.core.urlresolvers import reverse 5 6 7 class SearchQueryField(serializers.ReadOnlyField): 8 """ 9 Field that returns the search query of the current request. 10 """ 11 def __init__(self, *args, **kwargs): 12 kwargs['source'] = '*' 13 super(SearchQueryField, self).__init__(*args, **kwargs) 14 15 def to_representation(self, value): 16 request = self.context.get('request') 17 if request is None: 18 return '' 19 else: 20 return request.query_params.get('q', None) 21 22 23 class SiteURLField(serializers.ReadOnlyField): 24 """ 25 A serializer field for creating URL for the given objects with the 26 given ``args``/``kwargs`` and a required ``locale`` attribute. 27 """ 28 def __init__(self, url_name, args=None, kwargs=None): 29 self.url_name = url_name 30 self.args = args or [] 31 self.kwargs = kwargs or [] 32 super(SiteURLField, self).__init__(source='*') 33 34 def to_representation(self, value): 35 if not value: 36 return None 37 args = [getattr(value, arg) for arg in self.args] 38 kwargs = {arg: getattr(value, arg) for arg in self.kwargs} 39 locale = getattr(value, 'locale', settings.LANGUAGE_CODE) 40 path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs) 41 return '%s%s' % (settings.SITE_URL, path) 42 [end of kuma/search/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/search/fields.py b/kuma/search/fields.py --- a/kuma/search/fields.py +++ b/kuma/search/fields.py @@ -37,5 +37,4 @@ args = [getattr(value, arg) for arg in self.args] kwargs = {arg: getattr(value, arg) for arg in self.kwargs} locale = getattr(value, 'locale', settings.LANGUAGE_CODE) - path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs) - return '%s%s' % (settings.SITE_URL, path) + return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)
{"golden_diff": "diff --git a/kuma/search/fields.py b/kuma/search/fields.py\n--- a/kuma/search/fields.py\n+++ b/kuma/search/fields.py\n@@ -37,5 +37,4 @@\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n- path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n- return '%s%s' % (settings.SITE_URL, path)\n+ return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n", "issue": "Macro search results are mangled for non-en-US locales\nSee for example https://wiki.developer.mozilla.org/en-US/search?locale=*&kumascript_macros=WebExtAllExamples&topic=none\r\n\r\nThis lists all pages that call WebExtAllExamples, across all locales. One entry looks like:\r\n\r\n<img width=\"893\" alt=\"Screen Shot 2019-11-21 at 4 30 25 PM\" src=\"https://user-images.githubusercontent.com/432915/69387936-3e5d4780-0c7c-11ea-9347-5916d638d12d.png\">\r\n\r\nThis is the German translation of the https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Examples page.\r\n\r\nBut the first link, \"**Beispiele f\u00fcr Erweiterungen**\", has the en-US locale in the URL, like this: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - note the translated slug but the en-US locale. If I click it, I get \"Create a new page\", because that page doesn't exist.\r\n\r\nAfter the short description, the entry is supposed to have \"`${url} Score: 82.20941 translated from ${original}`, where `url` is the localized page, and `original` is the en-US version. But these are wrong too:\r\n\r\n* `url`: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - nonexistent page with en-US locale but de slug\r\n* `original`: https://developer.mozilla.org/de/docs/Mozilla/Add-ons/WebExtensions/Beispiele - the proper value for `url`\r\n\r\n I've seen some cases where the \"`${url} Score: 82.20941 translated from ${original}` bit doesn't appear, and then there is no usable link to the actual page, and I have to guess what the locale is, to be able to fix the link.\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import serializers\n\nfrom kuma.core.urlresolvers import reverse\n\n\nclass SearchQueryField(serializers.ReadOnlyField):\n \"\"\"\n Field that returns the search query of the current request.\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super(SearchQueryField, self).__init__(*args, **kwargs)\n\n def to_representation(self, value):\n request = self.context.get('request')\n if request is None:\n return ''\n else:\n return request.query_params.get('q', None)\n\n\nclass SiteURLField(serializers.ReadOnlyField):\n \"\"\"\n A serializer field for creating URL for the given objects with the\n given ``args``/``kwargs`` and a required ``locale`` attribute.\n \"\"\"\n def __init__(self, url_name, args=None, kwargs=None):\n self.url_name = url_name\n self.args = args or []\n self.kwargs = kwargs or []\n super(SiteURLField, self).__init__(source='*')\n\n def to_representation(self, value):\n if not value:\n return None\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n return '%s%s' % (settings.SITE_URL, path)\n", "path": "kuma/search/fields.py"}]}
1,384
150
gh_patches_debug_10682
rasdani/github-patches
git_diff
encode__starlette-1609
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Gzip Middleware content-length is incorrect The following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware. ``` File "C:\Users\AberS\Documents\Github\index.py\.venv\lib\site-packages\h11\_writers.py", line 102, in send_eom raise LocalProtocolError("Too little data for declared Content-Length") h11._util.LocalProtocolError: Too little data for declared Content-Length ``` </issue> <code> [start of starlette/middleware/base.py] 1 import typing 2 3 import anyio 4 5 from starlette.requests import Request 6 from starlette.responses import Response, StreamingResponse 7 from starlette.types import ASGIApp, Receive, Scope, Send 8 9 RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]] 10 DispatchFunction = typing.Callable[ 11 [Request, RequestResponseEndpoint], typing.Awaitable[Response] 12 ] 13 14 15 class BaseHTTPMiddleware: 16 def __init__( 17 self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None 18 ) -> None: 19 self.app = app 20 self.dispatch_func = self.dispatch if dispatch is None else dispatch 21 22 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 23 if scope["type"] != "http": 24 await self.app(scope, receive, send) 25 return 26 27 async def call_next(request: Request) -> Response: 28 app_exc: typing.Optional[Exception] = None 29 send_stream, recv_stream = anyio.create_memory_object_stream() 30 31 async def coro() -> None: 32 nonlocal app_exc 33 34 async with send_stream: 35 try: 36 await self.app(scope, request.receive, send_stream.send) 37 except Exception as exc: 38 app_exc = exc 39 40 task_group.start_soon(coro) 41 42 try: 43 message = await recv_stream.receive() 44 except anyio.EndOfStream: 45 if app_exc is not None: 46 raise app_exc 47 raise RuntimeError("No response returned.") 48 49 assert message["type"] == "http.response.start" 50 51 async def body_stream() -> typing.AsyncGenerator[bytes, None]: 52 async with recv_stream: 53 async for message in recv_stream: 54 assert message["type"] == "http.response.body" 55 yield message.get("body", b"") 56 57 if app_exc is not None: 58 raise app_exc 59 60 response = StreamingResponse( 61 status_code=message["status"], content=body_stream() 62 ) 63 response.raw_headers = message["headers"] 64 return response 65 66 async with anyio.create_task_group() as task_group: 67 request = Request(scope, receive=receive) 68 response = await self.dispatch_func(request, call_next) 69 await response(scope, receive, send) 70 task_group.cancel_scope.cancel() 71 72 async def dispatch( 73 self, request: Request, call_next: RequestResponseEndpoint 74 ) -> Response: 75 raise NotImplementedError() # pragma: no cover 76 [end of starlette/middleware/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py --- a/starlette/middleware/base.py +++ b/starlette/middleware/base.py @@ -52,7 +52,11 @@ async with recv_stream: async for message in recv_stream: assert message["type"] == "http.response.body" - yield message.get("body", b"") + body = message.get("body", b"") + if body: + yield body + if not message.get("more_body", False): + break if app_exc is not None: raise app_exc
{"golden_diff": "diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py\n--- a/starlette/middleware/base.py\n+++ b/starlette/middleware/base.py\n@@ -52,7 +52,11 @@\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n- yield message.get(\"body\", b\"\")\n+ body = message.get(\"body\", b\"\")\n+ if body:\n+ yield body\n+ if not message.get(\"more_body\", False):\n+ break\n \n if app_exc is not None:\n raise app_exc\n", "issue": "Gzip Middleware content-length is incorrect\nThe following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware.\r\n\r\n```\r\n File \"C:\\Users\\AberS\\Documents\\Github\\index.py\\.venv\\lib\\site-packages\\h11\\_writers.py\", line 102, in send_eom\r\n raise LocalProtocolError(\"Too little data for declared Content-Length\") \r\nh11._util.LocalProtocolError: Too little data for declared Content-Length\r\n```\r\n\n", "before_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, request.receive, send_stream.send)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n yield message.get(\"body\", b\"\")\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n task_group.cancel_scope.cancel()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}]}
1,331
138
gh_patches_debug_23570
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-452
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement GA tracking of downloads From Luis: _I've done some research about how to track the number of downloads in the website. We can track those events using Google Analytics as you suggested. There is a slight change of code that has to be implemented following Google Analytic's developer manual [here](https://developers.google.com/analytics/devguides/collection/analyticsjs/events). It is a bit more refined than copying and pasting code, although at a glance it doesn't seem to be extremely complicated._ </issue> <code> [start of ckanext-metadata_fields/ckanext/metadata_fields/plugin.py] 1 ''' 2 Created on Apr 10, 2014 3 4 @author:alexandru-m-g 5 ''' 6 import logging 7 8 import ckan.plugins as plugins 9 import ckan.plugins.toolkit as tk 10 from routes.mapper import SubMapper 11 12 import ckanext.metadata_fields.custom_validator as vd 13 import ckanext.metadata_fields.update as update 14 15 def list_of_all_groups(): 16 groups = tk.get_action('group_list')(data_dict={'all_fields': True}) 17 return groups 18 19 20 class HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm): 21 plugins.implements(plugins.IConfigurer, inherit=False) 22 plugins.implements(plugins.IRoutes, inherit=True) 23 plugins.implements(plugins.IDatasetForm, inherit=False) 24 plugins.implements(plugins.ITemplateHelpers) 25 plugins.implements(plugins.IActions) 26 27 def update_config(self, config): 28 tk.add_template_directory(config, 'templates') 29 30 def before_map(self, map): 31 with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m: 32 m.connect('add dataset', '/dataset/new', action='new') 33 m.connect('/dataset/{action}/{id}', 34 requirements=dict(action='|'.join([ 35 'new_metadata', 36 'new_resource', 37 ]))) 38 return map 39 40 def is_fallback(self): 41 return True 42 43 def package_types(self): 44 # default - no specific package type 45 return [] 46 47 def _modify_package_schema(self, schema): 48 49 schema.update({ 50 'package_creator': [tk.get_validator('not_empty'), 51 tk.get_converter('convert_to_extras')], 52 'groups_list': [vd.groups_not_empty], 53 'caveats' : [tk.get_validator('ignore_missing'), 54 tk.get_converter('convert_to_extras')], 55 'dataset_source' : [tk.get_validator('not_empty'), 56 tk.get_converter('convert_to_extras')], 57 'dataset_date' : [tk.get_validator('ignore_missing'), 58 tk.get_converter('convert_to_extras')], 59 'methodology' : [tk.get_validator('ignore_missing'), 60 tk.get_converter('convert_to_extras')], 61 }) 62 63 return schema 64 65 66 def create_package_schema(self): 67 schema = super(HdxMetadataFieldsPlugin, self).create_package_schema() 68 schema = self._modify_package_schema(schema) 69 return schema 70 71 def update_package_schema(self): 72 schema = super(HdxMetadataFieldsPlugin, self).update_package_schema() 73 schema = self._modify_package_schema(schema) 74 return schema 75 76 def show_package_schema(self): 77 schema = super(HdxMetadataFieldsPlugin, self).show_package_schema() 78 79 schema.update({ 80 'package_creator': [tk.get_converter('convert_from_extras'), 81 tk.get_validator('ignore_missing')], 82 'caveats' : [tk.get_converter('convert_from_extras'), 83 tk.get_validator('ignore_missing')], 84 'dataset_source' : [tk.get_converter('convert_from_extras'), 85 tk.get_validator('ignore_missing')], 86 'dataset_date' : [tk.get_converter('convert_from_extras'), 87 tk.get_validator('ignore_missing')], 88 'methodology' : [tk.get_converter('convert_from_extras'), 89 tk.get_validator('ignore_missing')], 90 }) 91 return schema 92 93 94 def get_helpers(self): 95 return {'list_of_all_groups': list_of_all_groups} 96 97 def get_actions(self): 98 return {'package_update': update.package_update} 99 100 101 [end of ckanext-metadata_fields/ckanext/metadata_fields/plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py --- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py +++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py @@ -47,6 +47,7 @@ def _modify_package_schema(self, schema): schema.update({ + 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required 'package_creator': [tk.get_validator('not_empty'), tk.get_converter('convert_to_extras')], 'groups_list': [vd.groups_not_empty], @@ -75,8 +76,8 @@ def show_package_schema(self): schema = super(HdxMetadataFieldsPlugin, self).show_package_schema() - schema.update({ + 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required 'package_creator': [tk.get_converter('convert_from_extras'), tk.get_validator('ignore_missing')], 'caveats' : [tk.get_converter('convert_from_extras'),
{"golden_diff": "diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n--- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n+++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n@@ -47,6 +47,7 @@\n def _modify_package_schema(self, schema):\n \n schema.update({\n+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n@@ -75,8 +76,8 @@\n \n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n-\n schema.update({\n+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n", "issue": "Implement GA tracking of downloads\nFrom Luis: \n\n_I've done some research about how to track the number of downloads in the website. We can track those events using Google Analytics as you suggested. There is a slight change of code that has to be implemented following Google Analytic's developer manual [here](https://developers.google.com/analytics/devguides/collection/analyticsjs/events). It is a bit more refined than copying and pasting code, although at a glance it doesn't seem to be extremely complicated._\n\n", "before_files": [{"content": "'''\nCreated on Apr 10, 2014\n\n@author:alexandru-m-g\n'''\nimport logging\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nfrom routes.mapper import SubMapper\n\nimport ckanext.metadata_fields.custom_validator as vd\nimport ckanext.metadata_fields.update as update\n\ndef list_of_all_groups():\n groups = tk.get_action('group_list')(data_dict={'all_fields': True})\n return groups\n\n\nclass HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IDatasetForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def before_map(self, map):\n with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:\n m.connect('add dataset', '/dataset/new', action='new')\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'new_metadata',\n 'new_resource',\n ])))\n return map\n \n def is_fallback(self):\n return True\n\n def package_types(self):\n # default - no specific package type\n return []\n\n def _modify_package_schema(self, schema):\n \n schema.update({\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n 'caveats' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'dataset_source' : [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'dataset_date' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n })\n\n return schema\n\n\n def create_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n\n schema.update({\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_source' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_date' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n })\n return schema\n \n \n def get_helpers(self):\n return {'list_of_all_groups': list_of_all_groups}\n \n def get_actions(self):\n return {'package_update': update.package_update}\n\n\n", "path": "ckanext-metadata_fields/ckanext/metadata_fields/plugin.py"}]}
1,590
261
gh_patches_debug_14000
rasdani/github-patches
git_diff
ivy-llc__ivy-22412
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> scan </issue> <code> [start of ivy/functional/frontends/jax/lax/control_flow_operators.py] 1 # global 2 import ivy 3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back 4 5 6 @to_ivy_arrays_and_back 7 def cond(pred, true_fun, false_fun, *operands, operand=None, linear=None): 8 if operand is not None: 9 if operands: 10 raise ivy.utils.exceptions.IvyException( 11 "if `operand` is passed, positional `operands` should not be passed" 12 ) 13 operands = (operand,) 14 15 if pred: 16 return true_fun(*operands) 17 return false_fun(*operands) 18 19 20 @to_ivy_arrays_and_back 21 def map(f, xs): 22 return ivy.stack([f(x) for x in xs]) 23 24 25 @to_ivy_arrays_and_back 26 def switch(index, branches, *operands, operand=None): 27 if operand is not None: 28 if operands: 29 raise ivy.utils.exceptions.IvyException( 30 "if `operand` is passed, positional `operands` should not be passed" 31 ) 32 operands = (operand,) 33 34 index = max(index, 0) 35 index = min(len(branches) - 1, index) 36 return branches[index](*operands) 37 38 39 @to_ivy_arrays_and_back 40 def fori_loop(lower, upper, body_fun, init_val): 41 if not (callable(body_fun)): 42 raise ivy.exceptions.IvyException( 43 "jax.lax.fori_loop: Argument body_fun should be callable." 44 ) 45 val = init_val 46 for i in range(lower, upper): 47 val = body_fun(i, val) 48 return val 49 50 51 @to_ivy_arrays_and_back 52 def while_loop(cond_fun, body_fun, init_val): 53 if not (callable(body_fun) and callable(cond_fun)): 54 raise ivy.exceptions.IvyException( 55 "jax.lax.while_loop: Arguments body_fun and cond_fun should be callable." 56 ) 57 val = init_val 58 while cond_fun(val): 59 val = body_fun(val) 60 return val 61 [end of ivy/functional/frontends/jax/lax/control_flow_operators.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/jax/lax/control_flow_operators.py b/ivy/functional/frontends/jax/lax/control_flow_operators.py --- a/ivy/functional/frontends/jax/lax/control_flow_operators.py +++ b/ivy/functional/frontends/jax/lax/control_flow_operators.py @@ -58,3 +58,29 @@ while cond_fun(val): val = body_fun(val) return val + + +@to_ivy_arrays_and_back +def scan(f, init, xs, length=None, reverse=False, unroll=1): + if not (callable(f)): + raise ivy.exceptions.IvyException( + "jax.lax.scan: Argument f should be callable." + ) + if xs is None and length is None: + raise ivy.exceptions.IvyException( + "jax.lax.scan: Either xs or length must be provided." + ) + + if length is not None and (not isinstance(length, int) or length < 0): + raise ivy.exceptions.IvyException( + "jax.lax.scan: length must be a non-negative integer." + ) + if xs is None: + xs = [None] * length + + carry = init + ys = [] + for x in xs: + carry, y = f(carry, x) + ys.append(y) + return carry, ivy.stack(ys)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/lax/control_flow_operators.py b/ivy/functional/frontends/jax/lax/control_flow_operators.py\n--- a/ivy/functional/frontends/jax/lax/control_flow_operators.py\n+++ b/ivy/functional/frontends/jax/lax/control_flow_operators.py\n@@ -58,3 +58,29 @@\n while cond_fun(val):\n val = body_fun(val)\n return val\n+\n+\n+@to_ivy_arrays_and_back\n+def scan(f, init, xs, length=None, reverse=False, unroll=1):\n+ if not (callable(f)):\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: Argument f should be callable.\"\n+ )\n+ if xs is None and length is None:\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: Either xs or length must be provided.\"\n+ )\n+\n+ if length is not None and (not isinstance(length, int) or length < 0):\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: length must be a non-negative integer.\"\n+ )\n+ if xs is None:\n+ xs = [None] * length\n+\n+ carry = init\n+ ys = []\n+ for x in xs:\n+ carry, y = f(carry, x)\n+ ys.append(y)\n+ return carry, ivy.stack(ys)\n", "issue": "scan\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n if pred:\n return true_fun(*operands)\n return false_fun(*operands)\n\n\n@to_ivy_arrays_and_back\ndef map(f, xs):\n return ivy.stack([f(x) for x in xs])\n\n\n@to_ivy_arrays_and_back\ndef switch(index, branches, *operands, operand=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n index = max(index, 0)\n index = min(len(branches) - 1, index)\n return branches[index](*operands)\n\n\n@to_ivy_arrays_and_back\ndef fori_loop(lower, upper, body_fun, init_val):\n if not (callable(body_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.fori_loop: Argument body_fun should be callable.\"\n )\n val = init_val\n for i in range(lower, upper):\n val = body_fun(i, val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef while_loop(cond_fun, body_fun, init_val):\n if not (callable(body_fun) and callable(cond_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.while_loop: Arguments body_fun and cond_fun should be callable.\"\n )\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n", "path": "ivy/functional/frontends/jax/lax/control_flow_operators.py"}]}
1,097
325
gh_patches_debug_27218
rasdani/github-patches
git_diff
fedora-infra__bodhi-2906
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop bodhi.server.services.zz_redirects This module exists to redirect legacy Bodhi 1 URLs to the Bodhi 2 counterparts, but I don't think we need it anymore. Bodhi 2 is not backwards compatible with Bodhi 1, and Bodhi 4 will also be further incompatible. </issue> <code> [start of bodhi/server/services/zz_redirects.py] 1 # Copyright © 2015-2017 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 """ 19 Handle general redirect stuff. 20 21 This module name gets a 'zz_' tacked on the front so that it comes last. 22 We need to catch /updates/{id}/request and /updates/{id}/edit first and those 23 get defined in the other service modules. 24 """ 25 26 from cornice import Service 27 from pyramid.httpexceptions import HTTPFound 28 29 import bodhi.server.security 30 31 32 zz_bodhi1_update_redirect = Service( 33 name='bodhi1_update_redirect', path='/updates/{id}/{title}', 34 description='Redirect to old updates/ALIAS/TITLE urls', 35 cors_origins=bodhi.server.security.cors_origins_rw) 36 37 38 @zz_bodhi1_update_redirect.get() 39 def zz_get_bodhi1_update_redirect(request): 40 """ 41 Redirect users from the Bodhi 1 update URL to the new path. 42 43 Args: 44 request (pyramid.request): The current web request. 45 Returns: 46 pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL 47 heirarchy. 48 """ 49 return HTTPFound("/updates/{0}".format(request.matchdict['id'])) 50 [end of bodhi/server/services/zz_redirects.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bodhi/server/services/zz_redirects.py b/bodhi/server/services/zz_redirects.py deleted file mode 100644 --- a/bodhi/server/services/zz_redirects.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright © 2015-2017 Red Hat, Inc. -# -# This file is part of Bodhi. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -""" -Handle general redirect stuff. - -This module name gets a 'zz_' tacked on the front so that it comes last. -We need to catch /updates/{id}/request and /updates/{id}/edit first and those -get defined in the other service modules. -""" - -from cornice import Service -from pyramid.httpexceptions import HTTPFound - -import bodhi.server.security - - -zz_bodhi1_update_redirect = Service( - name='bodhi1_update_redirect', path='/updates/{id}/{title}', - description='Redirect to old updates/ALIAS/TITLE urls', - cors_origins=bodhi.server.security.cors_origins_rw) - - -@zz_bodhi1_update_redirect.get() -def zz_get_bodhi1_update_redirect(request): - """ - Redirect users from the Bodhi 1 update URL to the new path. - - Args: - request (pyramid.request): The current web request. - Returns: - pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL - heirarchy. - """ - return HTTPFound("/updates/{0}".format(request.matchdict['id']))
{"golden_diff": "diff --git a/bodhi/server/services/zz_redirects.py b/bodhi/server/services/zz_redirects.py\ndeleted file mode 100644\n--- a/bodhi/server/services/zz_redirects.py\n+++ /dev/null\n@@ -1,49 +0,0 @@\n-# Copyright \u00a9 2015-2017 Red Hat, Inc.\n-#\n-# This file is part of Bodhi.\n-#\n-# This program is free software; you can redistribute it and/or\n-# modify it under the terms of the GNU General Public License\n-# as published by the Free Software Foundation; either version 2\n-# of the License, or (at your option) any later version.\n-#\n-# This program is distributed in the hope that it will be useful,\n-# but WITHOUT ANY WARRANTY; without even the implied warranty of\n-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n-# GNU General Public License for more details.\n-#\n-# You should have received a copy of the GNU General Public License\n-# along with this program; if not, write to the Free Software\n-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n-\"\"\"\n-Handle general redirect stuff.\n-\n-This module name gets a 'zz_' tacked on the front so that it comes last.\n-We need to catch /updates/{id}/request and /updates/{id}/edit first and those\n-get defined in the other service modules.\n-\"\"\"\n-\n-from cornice import Service\n-from pyramid.httpexceptions import HTTPFound\n-\n-import bodhi.server.security\n-\n-\n-zz_bodhi1_update_redirect = Service(\n- name='bodhi1_update_redirect', path='/updates/{id}/{title}',\n- description='Redirect to old updates/ALIAS/TITLE urls',\n- cors_origins=bodhi.server.security.cors_origins_rw)\n-\n-\n-@zz_bodhi1_update_redirect.get()\n-def zz_get_bodhi1_update_redirect(request):\n- \"\"\"\n- Redirect users from the Bodhi 1 update URL to the new path.\n-\n- Args:\n- request (pyramid.request): The current web request.\n- Returns:\n- pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL\n- heirarchy.\n- \"\"\"\n- return HTTPFound(\"/updates/{0}\".format(request.matchdict['id']))\n", "issue": "Drop bodhi.server.services.zz_redirects\nThis module exists to redirect legacy Bodhi 1 URLs to the Bodhi 2 counterparts, but I don't think we need it anymore. Bodhi 2 is not backwards compatible with Bodhi 1, and Bodhi 4 will also be further incompatible.\n", "before_files": [{"content": "# Copyright \u00a9 2015-2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nHandle general redirect stuff.\n\nThis module name gets a 'zz_' tacked on the front so that it comes last.\nWe need to catch /updates/{id}/request and /updates/{id}/edit first and those\nget defined in the other service modules.\n\"\"\"\n\nfrom cornice import Service\nfrom pyramid.httpexceptions import HTTPFound\n\nimport bodhi.server.security\n\n\nzz_bodhi1_update_redirect = Service(\n name='bodhi1_update_redirect', path='/updates/{id}/{title}',\n description='Redirect to old updates/ALIAS/TITLE urls',\n cors_origins=bodhi.server.security.cors_origins_rw)\n\n\n@zz_bodhi1_update_redirect.get()\ndef zz_get_bodhi1_update_redirect(request):\n \"\"\"\n Redirect users from the Bodhi 1 update URL to the new path.\n\n Args:\n request (pyramid.request): The current web request.\n Returns:\n pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL\n heirarchy.\n \"\"\"\n return HTTPFound(\"/updates/{0}\".format(request.matchdict['id']))\n", "path": "bodhi/server/services/zz_redirects.py"}]}
1,134
539
gh_patches_debug_30030
rasdani/github-patches
git_diff
OCA__server-tools-316
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [8.0][dead_mans_switch_client] Module crashes runbots I'm seeing more and more runbots with :x: because of this module. [This seems the offending line](https://github.com/OCA/server-tools/blob/8.0/dead_mans_switch_client/models/dead_mans_switch_client.py#L54). Any clue on how to fix it? Example runbot: https://runbot.odoo-community.org/runbot/build/3137787 CC @hbrunn. </issue> <code> [start of dead_mans_switch_client/__openerp__.py] 1 # -*- coding: utf-8 -*- 2 # © 2015 Therp BV <http://therp.nl> 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). 4 { 5 "name": "Dead man's switch (client)", 6 "version": "8.0.1.0.0", 7 "author": "Therp BV,Odoo Community Association (OCA)", 8 "license": "AGPL-3", 9 "category": "Monitoring", 10 "summary": "Be notified when customers' odoo instances go down", 11 "depends": [ 12 'base', 13 ], 14 "data": [ 15 "data/ir_actions.xml", 16 "data/ir_cron.xml", 17 ], 18 } 19 [end of dead_mans_switch_client/__openerp__.py] [start of dead_mans_switch_client/models/dead_mans_switch_client.py] 1 # -*- coding: utf-8 -*- 2 # © 2015 Therp BV <http://therp.nl> 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). 4 import json 5 import logging 6 import os 7 try: 8 import psutil 9 except ImportError: 10 psutil = None 11 import urllib2 12 from openerp import api, models 13 14 15 class DeadMansSwitchClient(models.AbstractModel): 16 _name = 'dead.mans.switch.client' 17 _register = True 18 19 @api.model 20 def _get_data(self): 21 ram = 0 22 cpu = 0 23 if psutil: 24 process = psutil.Process(os.getpid()) 25 # psutil changed its api through versions 26 if process.parent: 27 if hasattr(process.parent, '__call__'): 28 process = process.parent() 29 else: 30 process = process.parent 31 if hasattr(process, 'memory_percent'): 32 ram = process.memory_percent() 33 if hasattr(process, 'cpu_percent'): 34 cpu = process.cpu_percent() 35 user_count = 0 36 if 'im_chat.presence' in self.env.registry: 37 user_count = len(self.env['im_chat.presence'].search([ 38 ('status', '!=', 'offline'), 39 ])) 40 return { 41 'database_uuid': self.env['ir.config_parameter'].get_param( 42 'database.uuid'), 43 'cpu': cpu, 44 'ram': ram, 45 'user_count': user_count, 46 } 47 48 @api.model 49 def alive(self): 50 url = self.env['ir.config_parameter'].get_param( 51 'dead_mans_switch_client.url') 52 logger = logging.getLogger(__name__) 53 if not url: 54 logger.error('No server configured!') 55 return 56 data = self._get_data() 57 logger.debug('sending %s', data) 58 urllib2.urlopen( 59 urllib2.Request( 60 url, 61 json.dumps({ 62 'jsonrpc': '2.0', 63 'method': 'call', 64 'params': data, 65 }), 66 { 67 'Content-Type': 'application/json', 68 })) 69 [end of dead_mans_switch_client/models/dead_mans_switch_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dead_mans_switch_client/__openerp__.py b/dead_mans_switch_client/__openerp__.py --- a/dead_mans_switch_client/__openerp__.py +++ b/dead_mans_switch_client/__openerp__.py @@ -3,7 +3,7 @@ # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Dead man's switch (client)", - "version": "8.0.1.0.0", + "version": "8.0.1.0.1", "author": "Therp BV,Odoo Community Association (OCA)", "license": "AGPL-3", "category": "Monitoring", @@ -15,4 +15,7 @@ "data/ir_actions.xml", "data/ir_cron.xml", ], + "demo": [ + "demo/dead_mans_switch_client_demo.yml", + ], } diff --git a/dead_mans_switch_client/models/dead_mans_switch_client.py b/dead_mans_switch_client/models/dead_mans_switch_client.py --- a/dead_mans_switch_client/models/dead_mans_switch_client.py +++ b/dead_mans_switch_client/models/dead_mans_switch_client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # © 2015 Therp BV <http://therp.nl> +# © 2015 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). import json import logging @@ -66,3 +67,19 @@ { 'Content-Type': 'application/json', })) + + @api.model + def _install_default_url(self): + """Set up a default URL.""" + conf = self.env["ir.config_parameter"] + name = "dead_mans_switch_client.url" + param = conf.get_param(name) + + if not param: + url = "{}/dead_mans_switch/alive".format( + conf.get_param( + "report.url", + conf.get_param( + "web.base.url", + "http://localhost"))) + conf.set_param(name, url)
{"golden_diff": "diff --git a/dead_mans_switch_client/__openerp__.py b/dead_mans_switch_client/__openerp__.py\n--- a/dead_mans_switch_client/__openerp__.py\n+++ b/dead_mans_switch_client/__openerp__.py\n@@ -3,7 +3,7 @@\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n {\n \"name\": \"Dead man's switch (client)\",\n- \"version\": \"8.0.1.0.0\",\n+ \"version\": \"8.0.1.0.1\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n@@ -15,4 +15,7 @@\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n+ \"demo\": [\n+ \"demo/dead_mans_switch_client_demo.yml\",\n+ ],\n }\ndiff --git a/dead_mans_switch_client/models/dead_mans_switch_client.py b/dead_mans_switch_client/models/dead_mans_switch_client.py\n--- a/dead_mans_switch_client/models/dead_mans_switch_client.py\n+++ b/dead_mans_switch_client/models/dead_mans_switch_client.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n # \u00a9 2015 Therp BV <http://therp.nl>\n+# \u00a9 2015 Grupo ESOC Ingenier\u00eda de Servicios, S.L.U. - Jairo Llopis\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n import json\n import logging\n@@ -66,3 +67,19 @@\n {\n 'Content-Type': 'application/json',\n }))\n+\n+ @api.model\n+ def _install_default_url(self):\n+ \"\"\"Set up a default URL.\"\"\"\n+ conf = self.env[\"ir.config_parameter\"]\n+ name = \"dead_mans_switch_client.url\"\n+ param = conf.get_param(name)\n+\n+ if not param:\n+ url = \"{}/dead_mans_switch/alive\".format(\n+ conf.get_param(\n+ \"report.url\",\n+ conf.get_param(\n+ \"web.base.url\",\n+ \"http://localhost\")))\n+ conf.set_param(name, url)\n", "issue": "[8.0][dead_mans_switch_client] Module crashes runbots\nI'm seeing more and more runbots with :x: because of this module. [This seems the offending line](https://github.com/OCA/server-tools/blob/8.0/dead_mans_switch_client/models/dead_mans_switch_client.py#L54). Any clue on how to fix it?\n\nExample runbot: https://runbot.odoo-community.org/runbot/build/3137787\n\nCC @hbrunn.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Dead man's switch (client)\",\n \"version\": \"8.0.1.0.0\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n \"summary\": \"Be notified when customers' odoo instances go down\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n}\n", "path": "dead_mans_switch_client/__openerp__.py"}, {"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nimport json\nimport logging\nimport os\ntry:\n import psutil\nexcept ImportError:\n psutil = None\nimport urllib2\nfrom openerp import api, models\n\n\nclass DeadMansSwitchClient(models.AbstractModel):\n _name = 'dead.mans.switch.client'\n _register = True\n\n @api.model\n def _get_data(self):\n ram = 0\n cpu = 0\n if psutil:\n process = psutil.Process(os.getpid())\n # psutil changed its api through versions\n if process.parent:\n if hasattr(process.parent, '__call__'):\n process = process.parent()\n else:\n process = process.parent\n if hasattr(process, 'memory_percent'):\n ram = process.memory_percent()\n if hasattr(process, 'cpu_percent'):\n cpu = process.cpu_percent()\n user_count = 0\n if 'im_chat.presence' in self.env.registry:\n user_count = len(self.env['im_chat.presence'].search([\n ('status', '!=', 'offline'),\n ]))\n return {\n 'database_uuid': self.env['ir.config_parameter'].get_param(\n 'database.uuid'),\n 'cpu': cpu,\n 'ram': ram,\n 'user_count': user_count,\n }\n\n @api.model\n def alive(self):\n url = self.env['ir.config_parameter'].get_param(\n 'dead_mans_switch_client.url')\n logger = logging.getLogger(__name__)\n if not url:\n logger.error('No server configured!')\n return\n data = self._get_data()\n logger.debug('sending %s', data)\n urllib2.urlopen(\n urllib2.Request(\n url,\n json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'call',\n 'params': data,\n }),\n {\n 'Content-Type': 'application/json',\n }))\n", "path": "dead_mans_switch_client/models/dead_mans_switch_client.py"}]}
1,456
529
gh_patches_debug_20868
rasdani/github-patches
git_diff
pytorch__vision-2654
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docs of some functions written are missing ## 📚 Documentation A simple issue, Docs are missing on the torchvision website for following functions written in torchvision. I guess we should add these docs on the webpage, as end-users will benefit from using these functions. Most people will not look at source code to find these functions but refer to docs. Missing docs that I found - [x] Image reading functions [here](https://github.com/pytorch/vision/blob/master/torchvision/io/image.py) We have docs for video io functions, so maybe image should too be there. - [x] Torchvision ops from [boxes.py](https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py). Docs are added for NMS. but we are missing IoU, Box area and some classes. Partly fixed in #2642 Please do let me know if some other docs or missing as well. Also, I can raise a PR to fix these, please do let me know if it is needed! </issue> <code> [start of torchvision/io/__init__.py] 1 from ._video_opt import ( 2 Timebase, 3 VideoMetaData, 4 _HAS_VIDEO_OPT, 5 _probe_video_from_file, 6 _probe_video_from_memory, 7 _read_video_from_file, 8 _read_video_from_memory, 9 _read_video_timestamps_from_file, 10 _read_video_timestamps_from_memory, 11 ) 12 from .video import ( 13 read_video, 14 read_video_timestamps, 15 write_video, 16 ) 17 18 19 __all__ = [ 20 "write_video", 21 "read_video", 22 "read_video_timestamps", 23 "_read_video_from_file", 24 "_read_video_timestamps_from_file", 25 "_probe_video_from_file", 26 "_read_video_from_memory", 27 "_read_video_timestamps_from_memory", 28 "_probe_video_from_memory", 29 "_HAS_VIDEO_OPT", 30 "_read_video_clip_from_memory", 31 "_read_video_meta_data", 32 "VideoMetaData", 33 "Timebase" 34 ] 35 [end of torchvision/io/__init__.py] [start of torchvision/ops/__init__.py] 1 from .boxes import nms, box_iou 2 from .new_empty_tensor import _new_empty_tensor 3 from .deform_conv import deform_conv2d, DeformConv2d 4 from .roi_align import roi_align, RoIAlign 5 from .roi_pool import roi_pool, RoIPool 6 from .ps_roi_align import ps_roi_align, PSRoIAlign 7 from .ps_roi_pool import ps_roi_pool, PSRoIPool 8 from .poolers import MultiScaleRoIAlign 9 from .feature_pyramid_network import FeaturePyramidNetwork 10 11 from ._register_onnx_ops import _register_custom_op 12 13 _register_custom_op() 14 15 16 __all__ = [ 17 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool', 18 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 19 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork' 20 ] 21 [end of torchvision/ops/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py --- a/torchvision/io/__init__.py +++ b/torchvision/io/__init__.py @@ -15,7 +15,6 @@ write_video, ) - __all__ = [ "write_video", "read_video", diff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py --- a/torchvision/ops/__init__.py +++ b/torchvision/ops/__init__.py @@ -1,4 +1,4 @@ -from .boxes import nms, box_iou +from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou from .new_empty_tensor import _new_empty_tensor from .deform_conv import deform_conv2d, DeformConv2d from .roi_align import roi_align, RoIAlign @@ -14,7 +14,8 @@ __all__ = [ - 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool', + 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes', + 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool', 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork' ]
{"golden_diff": "diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py\n--- a/torchvision/io/__init__.py\n+++ b/torchvision/io/__init__.py\n@@ -15,7 +15,6 @@\n write_video,\n )\n \n-\n __all__ = [\n \"write_video\",\n \"read_video\",\ndiff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py\n--- a/torchvision/ops/__init__.py\n+++ b/torchvision/ops/__init__.py\n@@ -1,4 +1,4 @@\n-from .boxes import nms, box_iou\n+from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\n from .new_empty_tensor import _new_empty_tensor\n from .deform_conv import deform_conv2d, DeformConv2d\n from .roi_align import roi_align, RoIAlign\n@@ -14,7 +14,8 @@\n \n \n __all__ = [\n- 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',\n+ 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n+ 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n ]\n", "issue": "Docs of some functions written are missing\n## \ud83d\udcda Documentation\r\n\r\nA simple issue, Docs are missing on the torchvision website for following functions written in torchvision.\r\n\r\nI guess we should add these docs on the webpage, as end-users will benefit from using these functions. \r\n\r\nMost people will not look at source code to find these functions but refer to docs.\r\n\r\nMissing docs that I found\r\n\r\n- [x] Image reading functions [here](https://github.com/pytorch/vision/blob/master/torchvision/io/image.py)\r\nWe have docs for video io functions, so maybe image should too be there.\r\n\r\n- [x] Torchvision ops from [boxes.py](https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py). Docs are added for NMS. but we are missing IoU, Box area and some classes. Partly fixed in #2642 \r\n\r\nPlease do let me know if some other docs or missing as well.\r\n\r\nAlso, I can raise a PR to fix these, please do let me know if it is needed!\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from ._video_opt import (\n Timebase,\n VideoMetaData,\n _HAS_VIDEO_OPT,\n _probe_video_from_file,\n _probe_video_from_memory,\n _read_video_from_file,\n _read_video_from_memory,\n _read_video_timestamps_from_file,\n _read_video_timestamps_from_memory,\n)\nfrom .video import (\n read_video,\n read_video_timestamps,\n write_video,\n)\n\n\n__all__ = [\n \"write_video\",\n \"read_video\",\n \"read_video_timestamps\",\n \"_read_video_from_file\",\n \"_read_video_timestamps_from_file\",\n \"_probe_video_from_file\",\n \"_read_video_from_memory\",\n \"_read_video_timestamps_from_memory\",\n \"_probe_video_from_memory\",\n \"_HAS_VIDEO_OPT\",\n \"_read_video_clip_from_memory\",\n \"_read_video_meta_data\",\n \"VideoMetaData\",\n \"Timebase\"\n]\n", "path": "torchvision/io/__init__.py"}, {"content": "from .boxes import nms, box_iou\nfrom .new_empty_tensor import _new_empty_tensor\nfrom .deform_conv import deform_conv2d, DeformConv2d\nfrom .roi_align import roi_align, RoIAlign\nfrom .roi_pool import roi_pool, RoIPool\nfrom .ps_roi_align import ps_roi_align, PSRoIAlign\nfrom .ps_roi_pool import ps_roi_pool, PSRoIPool\nfrom .poolers import MultiScaleRoIAlign\nfrom .feature_pyramid_network import FeaturePyramidNetwork\n\nfrom ._register_onnx_ops import _register_custom_op\n\n_register_custom_op()\n\n\n__all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n]\n", "path": "torchvision/ops/__init__.py"}]}
1,283
377
gh_patches_debug_59180
rasdani/github-patches
git_diff
TheAlgorithms__Python-295
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ProjectEuler -- Problem 1 -- solv2.py -- Error For the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168``` See [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py) </issue> <code> [start of Project Euler/Problem 01/sol2.py] 1 ''' 2 Problem Statement: 3 If we list all the natural numbers below 10 that are multiples of 3 or 5, 4 we get 3,5,6 and 9. The sum of these multiples is 23. 5 Find the sum of all the multiples of 3 or 5 below N. 6 ''' 7 from __future__ import print_function 8 try: 9 raw_input # Python 2 10 except NameError: 11 raw_input = input # Python 3 12 n = int(raw_input().strip()) 13 sum = 0 14 terms = (n-1)/3 15 sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P. 16 terms = (n-1)/5 17 sum+= ((terms)*(10+(terms-1)*5))/2 18 terms = (n-1)/15 19 sum-= ((terms)*(30+(terms-1)*15))/2 20 print(sum) 21 [end of Project Euler/Problem 01/sol2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py --- a/Project Euler/Problem 01/sol2.py +++ b/Project Euler/Problem 01/sol2.py @@ -11,10 +11,10 @@ raw_input = input # Python 3 n = int(raw_input().strip()) sum = 0 -terms = (n-1)/3 -sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P. -terms = (n-1)/5 -sum+= ((terms)*(10+(terms-1)*5))/2 -terms = (n-1)/15 -sum-= ((terms)*(30+(terms-1)*15))/2 +terms = (n-1)//3 +sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P. +terms = (n-1)//5 +sum+= ((terms)*(10+(terms-1)*5))//2 +terms = (n-1)//15 +sum-= ((terms)*(30+(terms-1)*15))//2 print(sum)
{"golden_diff": "diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py\n--- a/Project Euler/Problem 01/sol2.py\t\n+++ b/Project Euler/Problem 01/sol2.py\t\n@@ -11,10 +11,10 @@\n raw_input = input # Python 3\n n = int(raw_input().strip())\n sum = 0\n-terms = (n-1)/3\n-sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\n-terms = (n-1)/5\n-sum+= ((terms)*(10+(terms-1)*5))/2\n-terms = (n-1)/15\n-sum-= ((terms)*(30+(terms-1)*15))/2\n+terms = (n-1)//3\n+sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\n+terms = (n-1)//5\n+sum+= ((terms)*(10+(terms-1)*5))//2\n+terms = (n-1)//15\n+sum-= ((terms)*(30+(terms-1)*15))//2\n print(sum)\n", "issue": "ProjectEuler -- Problem 1 -- solv2.py -- Error\nFor the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168``` \r\nSee [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py)\n", "before_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)/3\nsum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\nterms = (n-1)/5\nsum+= ((terms)*(10+(terms-1)*5))/2\nterms = (n-1)/15\nsum-= ((terms)*(30+(terms-1)*15))/2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}]}
862
278
gh_patches_debug_18615
rasdani/github-patches
git_diff
vyperlang__vyper-555
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Log topic and data allow byte array longer than 32 bytes. ### What's your issue about? When packing data/topic for log, if the the actual argument is a byte array variable, there is no check for the actual length of the variable. e.g., ``` MyLog: __log__({arg1: indexed(bytes<=2000)}) @public def foo(): a: bytes<=100 log.MyLog(a) ``` This program should be rejected by is not. ### How can it be fixed? Add check in event_sig, pack_arg_by_32 and pack_logging_topic. #### Cute Animal Picture ![image](https://user-images.githubusercontent.com/5641590/33631206-753fab08-d9cf-11e7-89b4-f2d71f844453.png) </issue> <code> [start of viper/signatures/event_signature.py] 1 from viper.types import get_size_of_type, canonicalize_type, parse_type, \ 2 ByteArrayType 3 from viper.utils import sha3, is_varname_valid, bytes_to_int 4 import ast 5 from viper.function_signature import VariableRecord 6 from viper.exceptions import InvalidTypeException, VariableDeclarationException 7 8 9 # Event signature object 10 class EventSignature(): 11 def __init__(self, name, args, indexed_list, event_id, sig): 12 self.name = name 13 self.args = args 14 self.indexed_list = indexed_list 15 self.sig = sig 16 self.event_id = event_id 17 18 # Get a signature from an event declaration 19 @classmethod 20 def from_declaration(cls, code): 21 name = code.target.id 22 pos = 0 23 # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ... 24 args = [] 25 indexed_list = [] 26 topics_count = 1 27 if code.annotation.args: 28 keys = code.annotation.args[0].keys 29 values = code.annotation.args[0].values 30 for i in range(len(keys)): 31 typ = values[i] 32 arg = keys[i].id 33 if isinstance(typ, ast.Call): 34 # Check to see if argument is a topic 35 if typ.func.id == 'indexed': 36 typ = values[i].args[0] 37 indexed_list.append(True) 38 topics_count += 1 39 else: 40 raise VariableDeclarationException("Only indexed keyword is allowed", arg) 41 else: 42 if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32: 43 raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.") 44 indexed_list.append(False) 45 if topics_count > 4: 46 raise VariableDeclarationException("Maximum of 3 topics {} given".format(topics_count - 1), arg) 47 if not isinstance(arg, str): 48 raise VariableDeclarationException("Argument name invalid", arg) 49 if not typ: 50 raise InvalidTypeException("Argument must have type", arg) 51 if not is_varname_valid(arg): 52 raise VariableDeclarationException("Argument name invalid or reserved: " + arg, arg) 53 if arg in (x.name for x in args): 54 raise VariableDeclarationException("Duplicate function argument name: " + arg, arg) 55 parsed_type = parse_type(typ, None) 56 args.append(VariableRecord(arg, pos, parsed_type, False)) 57 if isinstance(parsed_type, ByteArrayType): 58 pos += 32 59 else: 60 pos += get_size_of_type(parsed_type) * 32 61 sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')' 62 event_id = bytes_to_int(sha3(bytes(sig, 'utf-8'))) 63 return cls(name, args, indexed_list, event_id, sig) 64 65 def to_abi_dict(self): 66 return { 67 "name": self.name, 68 "inputs": [{"type": canonicalize_type(arg.typ, True), "name": arg.name, "indexed": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [], 69 "anonymous": False, 70 "type": "event" 71 } 72 [end of viper/signatures/event_signature.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/viper/signatures/event_signature.py b/viper/signatures/event_signature.py --- a/viper/signatures/event_signature.py +++ b/viper/signatures/event_signature.py @@ -39,9 +39,9 @@ else: raise VariableDeclarationException("Only indexed keyword is allowed", arg) else: - if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32: - raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.") indexed_list.append(False) + if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32: + raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.") if topics_count > 4: raise VariableDeclarationException("Maximum of 3 topics {} given".format(topics_count - 1), arg) if not isinstance(arg, str):
{"golden_diff": "diff --git a/viper/signatures/event_signature.py b/viper/signatures/event_signature.py\n--- a/viper/signatures/event_signature.py\n+++ b/viper/signatures/event_signature.py\n@@ -39,9 +39,9 @@\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n- if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n- raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n indexed_list.append(False)\n+ if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n+ raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n", "issue": "Log topic and data allow byte array longer than 32 bytes.\n### What's your issue about?\r\nWhen packing data/topic for log, if the the actual argument is a byte array variable, there is no check for the actual length of the variable.\r\ne.g.,\r\n```\r\nMyLog: __log__({arg1: indexed(bytes<=2000)})\r\n\r\n@public\r\ndef foo():\r\n a: bytes<=100\r\n log.MyLog(a)\r\n```\r\nThis program should be rejected by is not.\r\n\r\n### How can it be fixed?\r\n\r\nAdd check in event_sig, pack_arg_by_32 and pack_logging_topic.\r\n\r\n#### Cute Animal Picture\r\n![image](https://user-images.githubusercontent.com/5641590/33631206-753fab08-d9cf-11e7-89b4-f2d71f844453.png)\r\n\r\n\n", "before_files": [{"content": "from viper.types import get_size_of_type, canonicalize_type, parse_type, \\\n ByteArrayType\nfrom viper.utils import sha3, is_varname_valid, bytes_to_int\nimport ast\nfrom viper.function_signature import VariableRecord\nfrom viper.exceptions import InvalidTypeException, VariableDeclarationException\n\n\n# Event signature object\nclass EventSignature():\n def __init__(self, name, args, indexed_list, event_id, sig):\n self.name = name\n self.args = args\n self.indexed_list = indexed_list\n self.sig = sig\n self.event_id = event_id\n\n # Get a signature from an event declaration\n @classmethod\n def from_declaration(cls, code):\n name = code.target.id\n pos = 0\n # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...\n args = []\n indexed_list = []\n topics_count = 1\n if code.annotation.args:\n keys = code.annotation.args[0].keys\n values = code.annotation.args[0].values\n for i in range(len(keys)):\n typ = values[i]\n arg = keys[i].id\n if isinstance(typ, ast.Call):\n # Check to see if argument is a topic\n if typ.func.id == 'indexed':\n typ = values[i].args[0]\n indexed_list.append(True)\n topics_count += 1\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n indexed_list.append(False)\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n raise VariableDeclarationException(\"Argument name invalid\", arg)\n if not typ:\n raise InvalidTypeException(\"Argument must have type\", arg)\n if not is_varname_valid(arg):\n raise VariableDeclarationException(\"Argument name invalid or reserved: \" + arg, arg)\n if arg in (x.name for x in args):\n raise VariableDeclarationException(\"Duplicate function argument name: \" + arg, arg)\n parsed_type = parse_type(typ, None)\n args.append(VariableRecord(arg, pos, parsed_type, False))\n if isinstance(parsed_type, ByteArrayType):\n pos += 32\n else:\n pos += get_size_of_type(parsed_type) * 32\n sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')'\n event_id = bytes_to_int(sha3(bytes(sig, 'utf-8')))\n return cls(name, args, indexed_list, event_id, sig)\n\n def to_abi_dict(self):\n return {\n \"name\": self.name,\n \"inputs\": [{\"type\": canonicalize_type(arg.typ, True), \"name\": arg.name, \"indexed\": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [],\n \"anonymous\": False,\n \"type\": \"event\"\n }\n", "path": "viper/signatures/event_signature.py"}]}
1,572
223
gh_patches_debug_1433
rasdani/github-patches
git_diff
translate__translate-3603
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> po2ts fails with ascii encode error on py2 (should use utf-8) Test file: [octave.zip](https://github.com/translate/translate/files/870288/octave.zip) ``` $ po2ts octave.po oct.ts processing 1 files... po2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128) [###########################################] 100% $ python --version Python 2.7.12 ``` </issue> <code> [start of translate/convert/po2ts.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2004-2006 Zuza Software Foundation 5 # 6 # This file is part of translate. 7 # 8 # translate is free software; you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation; either version 2 of the License, or 11 # (at your option) any later version. 12 # 13 # translate is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with this program; if not, see <http://www.gnu.org/licenses/>. 20 21 """Convert Gettext PO localization files to Qt Linguist (.ts) files. 22 23 See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html 24 for examples and usage instructions. 25 """ 26 27 from translate.storage import po, ts 28 29 30 class po2ts(object): 31 32 def convertstore(self, inputstore, templatefile=None, context=None): 33 """converts a .po file to .ts format (using a template .ts file if given)""" 34 if templatefile is None: 35 tsfile = ts.QtTsParser() 36 else: 37 tsfile = ts.QtTsParser(templatefile) 38 for inputunit in inputstore.units: 39 if inputunit.isheader() or inputunit.isblank(): 40 continue 41 source = inputunit.source 42 translation = inputunit.target 43 comment = inputunit.getnotes("translator") 44 transtype = None 45 if not inputunit.istranslated(): 46 transtype = "unfinished" 47 elif inputunit.getnotes("developer") == "(obsolete)": 48 transtype = "obsolete" 49 if isinstance(source, bytes): 50 source = source.decode("utf-8") 51 if isinstance(translation, bytes): 52 translation = translation.decode("utf-8") 53 for sourcelocation in inputunit.getlocations(): 54 if context is None: 55 if "#" in sourcelocation: 56 contextname = sourcelocation[:sourcelocation.find("#")] 57 else: 58 contextname = sourcelocation 59 else: 60 contextname = context 61 tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True) 62 return tsfile.getxml() 63 64 65 def convertpo(inputfile, outputfile, templatefile, context): 66 """reads in stdin using fromfileclass, converts using convertorclass, writes to stdout""" 67 inputstore = po.pofile(inputfile) 68 if inputstore.isempty(): 69 return 0 70 convertor = po2ts() 71 outputstring = convertor.convertstore(inputstore, templatefile, context) 72 outputfile.write(outputstring) 73 return 1 74 75 76 def main(argv=None): 77 from translate.convert import convert 78 formats = {"po": ("ts", convertpo), ("po", "ts"): ("ts", convertpo)} 79 parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__) 80 parser.add_option("-c", "--context", dest="context", default=None, 81 help="use supplied context instead of the one in the .po file comment") 82 parser.passthrough.append("context") 83 parser.run(argv) 84 85 86 if __name__ == '__main__': 87 main() 88 [end of translate/convert/po2ts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py --- a/translate/convert/po2ts.py +++ b/translate/convert/po2ts.py @@ -69,7 +69,7 @@ return 0 convertor = po2ts() outputstring = convertor.convertstore(inputstore, templatefile, context) - outputfile.write(outputstring) + outputfile.write(outputstring.encode('utf-8')) return 1
{"golden_diff": "diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py\n--- a/translate/convert/po2ts.py\n+++ b/translate/convert/po2ts.py\n@@ -69,7 +69,7 @@\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n- outputfile.write(outputstring)\n+ outputfile.write(outputstring.encode('utf-8'))\n return 1\n", "issue": "po2ts fails with ascii encode error on py2 (should use utf-8)\nTest file:\r\n[octave.zip](https://github.com/translate/translate/files/870288/octave.zip)\r\n\r\n```\r\n$ po2ts octave.po oct.ts\r\nprocessing 1 files...\r\npo2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128)\r\n[###########################################] 100%\r\n\r\n$ python --version\r\nPython 2.7.12\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring)\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2ts.py"}]}
1,596
116
gh_patches_debug_17889
rasdani/github-patches
git_diff
akvo__akvo-rsr-1763
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sector vocabulary saved value not updated ## Test plan GIVEN the project editor WHEN the sector vocabulary AND sector code are filled in THEN the 'saved-value' attribute of the vocabulary should be correctly updated </issue> <code> [start of akvo/rsr/models/sector.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from django.db import models 9 from django.db.models.signals import post_save 10 from django.dispatch import receiver 11 from django.core.validators import MaxValueValidator, MinValueValidator 12 from django.utils.translation import ugettext_lazy as _ 13 14 from ..fields import ValidXMLCharField 15 16 from akvo.codelists import models as codelist_models 17 from akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY 18 from akvo.utils import codelist_choices, codelist_value 19 20 21 class Sector(models.Model): 22 project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors') 23 sector_code = ValidXMLCharField( 24 _(u'sector code'), blank=True, max_length=5, 25 help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>' 26 u'See these lists for the DAC-5 and DAC-3 sector codes:<br>' 27 u'- <a href="http://iatistandard.org/201/codelists/Sector/" target="_blank">' 28 u'DAC-5 sector codes</a><br>' 29 u'- <a href="http://iatistandard.org/201/codelists/SectorCategory/" ' 30 u'target="_blank">DAC-3 sector codes</a>') 31 ) 32 text = ValidXMLCharField( 33 _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)') 34 ) 35 vocabulary = ValidXMLCharField( 36 _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY) 37 ) 38 percentage = models.DecimalField( 39 _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1, 40 validators=[MaxValueValidator(100), MinValueValidator(0)], 41 help_text=_(u'You can set the percentage of the project that is relevant for ' 42 u'this sector here.') 43 ) 44 45 def __unicode__(self): 46 if self.sector_code: 47 try: 48 sector_unicode = self.iati_sector().name.capitalize() 49 except Exception as e: 50 sector_unicode = u'%s' % _(u'Sector code not found') 51 else: 52 sector_unicode = u'%s' % _(u'No sector code specified') 53 54 if self.percentage: 55 sector_unicode += u' (%s%%)' % str(self.percentage) 56 57 return sector_unicode 58 59 60 def iati_sector_codes(self): 61 if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'): 62 return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code') 63 elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'): 64 return self.sector_code, codelist_value(codelist_models.SectorCategory, 65 self, 66 'sector_code') 67 else: 68 return self.sector_code, self.sector_code 69 70 def iati_sector(self): 71 if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'): 72 return codelist_value(codelist_models.Sector, self, 'sector_code') 73 elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'): 74 return codelist_value(codelist_models.SectorCategory, self, 'sector_code') 75 else: 76 return self.sector_code 77 78 def iati_vocabulary(self): 79 return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary') 80 81 class Meta: 82 app_label = 'rsr' 83 verbose_name = _(u'sector') 84 verbose_name_plural = _(u'sectors') 85 86 @receiver(post_save, sender=Sector) 87 def update_vocabulary(sender, **kwargs): 88 "Updates the vocabulary if not specified." 89 sector = kwargs['instance'] 90 if not sector.vocabulary and sector.sector_code: 91 if len(sector.sector_code) == 3: 92 sector.vocabulary = '2' 93 elif len(sector.sector_code) == 5: 94 sector.vocabulary = '1' 95 sector.save() 96 [end of akvo/rsr/models/sector.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rsr/models/sector.py b/akvo/rsr/models/sector.py --- a/akvo/rsr/models/sector.py +++ b/akvo/rsr/models/sector.py @@ -6,8 +6,6 @@ from django.db import models -from django.db.models.signals import post_save -from django.dispatch import receiver from django.core.validators import MaxValueValidator, MinValueValidator from django.utils.translation import ugettext_lazy as _ @@ -82,14 +80,3 @@ app_label = 'rsr' verbose_name = _(u'sector') verbose_name_plural = _(u'sectors') - -@receiver(post_save, sender=Sector) -def update_vocabulary(sender, **kwargs): - "Updates the vocabulary if not specified." - sector = kwargs['instance'] - if not sector.vocabulary and sector.sector_code: - if len(sector.sector_code) == 3: - sector.vocabulary = '2' - elif len(sector.sector_code) == 5: - sector.vocabulary = '1' - sector.save()
{"golden_diff": "diff --git a/akvo/rsr/models/sector.py b/akvo/rsr/models/sector.py\n--- a/akvo/rsr/models/sector.py\n+++ b/akvo/rsr/models/sector.py\n@@ -6,8 +6,6 @@\n \n \n from django.db import models\n-from django.db.models.signals import post_save\n-from django.dispatch import receiver\n from django.core.validators import MaxValueValidator, MinValueValidator\n from django.utils.translation import ugettext_lazy as _\n \n@@ -82,14 +80,3 @@\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n-\n-@receiver(post_save, sender=Sector)\n-def update_vocabulary(sender, **kwargs):\n- \"Updates the vocabulary if not specified.\"\n- sector = kwargs['instance']\n- if not sector.vocabulary and sector.sector_code:\n- if len(sector.sector_code) == 3:\n- sector.vocabulary = '2'\n- elif len(sector.sector_code) == 5:\n- sector.vocabulary = '1'\n- sector.save()\n", "issue": "Sector vocabulary saved value not updated\n## Test plan\n\nGIVEN the project editor\nWHEN the sector vocabulary AND sector code are filled in\nTHEN the 'saved-value' attribute of the vocabulary should be correctly updated\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\nfrom akvo.codelists import models as codelist_models\nfrom akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Sector(models.Model):\n project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors')\n sector_code = ValidXMLCharField(\n _(u'sector code'), blank=True, max_length=5,\n help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>'\n u'See these lists for the DAC-5 and DAC-3 sector codes:<br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/Sector/\" target=\"_blank\">'\n u'DAC-5 sector codes</a><br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/SectorCategory/\" '\n u'target=\"_blank\">DAC-3 sector codes</a>')\n )\n text = ValidXMLCharField(\n _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')\n )\n vocabulary = ValidXMLCharField(\n _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)\n )\n percentage = models.DecimalField(\n _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1,\n validators=[MaxValueValidator(100), MinValueValidator(0)],\n help_text=_(u'You can set the percentage of the project that is relevant for '\n u'this sector here.')\n )\n\n def __unicode__(self):\n if self.sector_code:\n try:\n sector_unicode = self.iati_sector().name.capitalize()\n except Exception as e:\n sector_unicode = u'%s' % _(u'Sector code not found')\n else:\n sector_unicode = u'%s' % _(u'No sector code specified')\n\n if self.percentage:\n sector_unicode += u' (%s%%)' % str(self.percentage)\n\n return sector_unicode\n\n\n def iati_sector_codes(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return self.sector_code, codelist_value(codelist_models.SectorCategory,\n self,\n 'sector_code')\n else:\n return self.sector_code, self.sector_code\n\n def iati_sector(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return codelist_value(codelist_models.SectorCategory, self, 'sector_code')\n else:\n return self.sector_code\n\n def iati_vocabulary(self):\n return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n\n@receiver(post_save, sender=Sector)\ndef update_vocabulary(sender, **kwargs):\n \"Updates the vocabulary if not specified.\"\n sector = kwargs['instance']\n if not sector.vocabulary and sector.sector_code:\n if len(sector.sector_code) == 3:\n sector.vocabulary = '2'\n elif len(sector.sector_code) == 5:\n sector.vocabulary = '1'\n sector.save()\n", "path": "akvo/rsr/models/sector.py"}]}
1,748
247
gh_patches_debug_13378
rasdani/github-patches
git_diff
TheAlgorithms__Python-6467
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enter the logic for hash table ### Describe your change: * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? * [x] Documentation change? ### Checklist: * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. </issue> <code> [start of data_structures/hashing/double_hash.py] 1 #!/usr/bin/env python3 2 from .hash_table import HashTable 3 from .number_theory.prime_numbers import is_prime, next_prime 4 5 6 class DoubleHash(HashTable): 7 """ 8 Hash Table example with open addressing and Double Hash 9 """ 10 11 def __init__(self, *args, **kwargs): 12 super().__init__(*args, **kwargs) 13 14 def __hash_function_2(self, value, data): 15 16 next_prime_gt = ( 17 next_prime(value % self.size_table) 18 if not is_prime(value % self.size_table) 19 else value % self.size_table 20 ) # gt = bigger than 21 return next_prime_gt - (data % next_prime_gt) 22 23 def __hash_double_function(self, key, data, increment): 24 return (increment * self.__hash_function_2(key, data)) % self.size_table 25 26 def _collision_resolution(self, key, data=None): 27 i = 1 28 new_key = self.hash_function(data) 29 30 while self.values[new_key] is not None and self.values[new_key] != key: 31 new_key = ( 32 self.__hash_double_function(key, data, i) 33 if self.balanced_factor() >= self.lim_charge 34 else None 35 ) 36 if new_key is None: 37 break 38 else: 39 i += 1 40 41 return new_key 42 [end of data_structures/hashing/double_hash.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -1,4 +1,16 @@ #!/usr/bin/env python3 +""" +Double hashing is a collision resolving technique in Open Addressed Hash tables. +Double hashing uses the idea of applying a second hash function to key when a collision +occurs. The advantage of Double hashing is that it is one of the best form of probing, +producing a uniform distribution of records throughout a hash table. This technique +does not yield any clusters. It is one of effective method for resolving collisions. + +Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE +Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table. + +Reference: https://en.wikipedia.org/wiki/Double_hashing +""" from .hash_table import HashTable from .number_theory.prime_numbers import is_prime, next_prime
{"golden_diff": "diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py\n--- a/data_structures/hashing/double_hash.py\n+++ b/data_structures/hashing/double_hash.py\n@@ -1,4 +1,16 @@\n #!/usr/bin/env python3\n+\"\"\"\n+Double hashing is a collision resolving technique in Open Addressed Hash tables.\n+Double hashing uses the idea of applying a second hash function to key when a collision\n+occurs. The advantage of Double hashing is that it is one of the best form of probing,\n+producing a uniform distribution of records throughout a hash table. This technique\n+does not yield any clusters. It is one of effective method for resolving collisions.\n+\n+Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE\n+Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.\n+\n+Reference: https://en.wikipedia.org/wiki/Double_hashing\n+\"\"\"\n from .hash_table import HashTable\n from .number_theory.prime_numbers import is_prime, next_prime\n", "issue": "Enter the logic for hash table\n### Describe your change:\r\n\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [x] Documentation change?\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [ ] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.\r\n* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .hash_table import HashTable\nfrom .number_theory.prime_numbers import is_prime, next_prime\n\n\nclass DoubleHash(HashTable):\n \"\"\"\n Hash Table example with open addressing and Double Hash\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __hash_function_2(self, value, data):\n\n next_prime_gt = (\n next_prime(value % self.size_table)\n if not is_prime(value % self.size_table)\n else value % self.size_table\n ) # gt = bigger than\n return next_prime_gt - (data % next_prime_gt)\n\n def __hash_double_function(self, key, data, increment):\n return (increment * self.__hash_function_2(key, data)) % self.size_table\n\n def _collision_resolution(self, key, data=None):\n i = 1\n new_key = self.hash_function(data)\n\n while self.values[new_key] is not None and self.values[new_key] != key:\n new_key = (\n self.__hash_double_function(key, data, i)\n if self.balanced_factor() >= self.lim_charge\n else None\n )\n if new_key is None:\n break\n else:\n i += 1\n\n return new_key\n", "path": "data_structures/hashing/double_hash.py"}]}
1,210
244
gh_patches_debug_29279
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Type Inference 1: Check column against a type **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> Different types in Mathesar will enable different operations; for example, strings could be aggregated by concatenating, but numeric types could be aggregated by summing or multiplying. So far, while we can reflect different types, we have no way to determine the type most appropriate for a column. **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> Given a `schema`, `table_name`, `column_name`, and `type`, we need to be able to return a boolean giving whether the column can be cast to that type. **Additional context** <!-- Add any other context or screenshots about the feature request here.--> We may need to take an optional sample size parameter to do this for large data. Performance testing will be necessary. </issue> <code> [start of db/types/base.py] 1 from sqlalchemy import create_engine 2 from db import constants 3 4 SCHEMA = f"{constants.MATHESAR_PREFIX}types" 5 # Since we want to have our identifiers quoted appropriately for use in 6 # PostgreSQL, we want to use the postgres dialect preparer to set this up. 7 preparer = create_engine("postgresql://").dialect.identifier_preparer 8 9 10 def get_qualified_name(name): 11 return ".".join([preparer.quote_schema(SCHEMA), name]) 12 [end of db/types/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/types/base.py b/db/types/base.py --- a/db/types/base.py +++ b/db/types/base.py @@ -1,5 +1,6 @@ -from sqlalchemy import create_engine +from sqlalchemy import create_engine, MetaData, Table, DDL from db import constants +from db.types import email SCHEMA = f"{constants.MATHESAR_PREFIX}types" # Since we want to have our identifiers quoted appropriately for use in @@ -9,3 +10,41 @@ def get_qualified_name(name): return ".".join([preparer.quote_schema(SCHEMA), name]) + + +def get_supported_alter_column_types(engine): + dialect_types = engine.dialect.ischema_names + type_map = { + # Default Postgres types + "boolean": dialect_types.get("boolean"), + "interval": dialect_types.get("interval"), + "numeric": dialect_types.get("numeric"), + "string": dialect_types.get("name"), + # Custom Mathesar types + "email": dialect_types.get(email.QUALIFIED_EMAIL) + } + return {k: v for k, v in type_map.items() if v is not None} + + +def alter_column_type( + schema, table_name, column_name, target_type_str, engine +): + _preparer = engine.dialect.identifier_preparer + supported_types = get_supported_alter_column_types(engine) + target_type = supported_types.get(target_type_str.lower()) + with engine.begin() as conn: + metadata = MetaData(bind=engine, schema=schema) + table = Table( + table_name, metadata, schema=schema, autoload_with=engine + ) + column = table.columns[column_name] + prepared_table_name = _preparer.format_table(table) + prepared_column_name = _preparer.format_column(column) + prepared_type_name = target_type().compile(dialect=engine.dialect) + alter_stmt = f""" + ALTER TABLE {prepared_table_name} + ALTER COLUMN {prepared_column_name} + TYPE {prepared_type_name} + USING {prepared_column_name}::{prepared_type_name}; + """ + conn.execute(DDL(alter_stmt))
{"golden_diff": "diff --git a/db/types/base.py b/db/types/base.py\n--- a/db/types/base.py\n+++ b/db/types/base.py\n@@ -1,5 +1,6 @@\n-from sqlalchemy import create_engine\n+from sqlalchemy import create_engine, MetaData, Table, DDL\n from db import constants\n+from db.types import email\n \n SCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n # Since we want to have our identifiers quoted appropriately for use in\n@@ -9,3 +10,41 @@\n \n def get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n+\n+\n+def get_supported_alter_column_types(engine):\n+ dialect_types = engine.dialect.ischema_names\n+ type_map = {\n+ # Default Postgres types\n+ \"boolean\": dialect_types.get(\"boolean\"),\n+ \"interval\": dialect_types.get(\"interval\"),\n+ \"numeric\": dialect_types.get(\"numeric\"),\n+ \"string\": dialect_types.get(\"name\"),\n+ # Custom Mathesar types\n+ \"email\": dialect_types.get(email.QUALIFIED_EMAIL)\n+ }\n+ return {k: v for k, v in type_map.items() if v is not None}\n+\n+\n+def alter_column_type(\n+ schema, table_name, column_name, target_type_str, engine\n+):\n+ _preparer = engine.dialect.identifier_preparer\n+ supported_types = get_supported_alter_column_types(engine)\n+ target_type = supported_types.get(target_type_str.lower())\n+ with engine.begin() as conn:\n+ metadata = MetaData(bind=engine, schema=schema)\n+ table = Table(\n+ table_name, metadata, schema=schema, autoload_with=engine\n+ )\n+ column = table.columns[column_name]\n+ prepared_table_name = _preparer.format_table(table)\n+ prepared_column_name = _preparer.format_column(column)\n+ prepared_type_name = target_type().compile(dialect=engine.dialect)\n+ alter_stmt = f\"\"\"\n+ ALTER TABLE {prepared_table_name}\n+ ALTER COLUMN {prepared_column_name}\n+ TYPE {prepared_type_name}\n+ USING {prepared_column_name}::{prepared_type_name};\n+ \"\"\"\n+ conn.execute(DDL(alter_stmt))\n", "issue": "Type Inference 1: Check column against a type\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nDifferent types in Mathesar will enable different operations; for example, strings could be aggregated by concatenating, but numeric types could be aggregated by summing or multiplying. So far, while we can reflect different types, we have no way to determine the type most appropriate for a column.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nGiven a `schema`, `table_name`, `column_name`, and `type`, we need to be able to return a boolean giving whether the column can be cast to that type.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nWe may need to take an optional sample size parameter to do this for large data. Performance testing will be necessary.\r\n\n", "before_files": [{"content": "from sqlalchemy import create_engine\nfrom db import constants\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\ndef get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n", "path": "db/types/base.py"}]}
840
488
gh_patches_debug_19467
rasdani/github-patches
git_diff
mlcommons__GaNDLF-675
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Perform penalty calculation after all sanity checks are completed **Is your feature request related to a problem? Please describe.** The penalty calculation takes a long time, and there are sanity checks that happen after this, which can be a pain. **Describe the solution you'd like** It would be great to have these checks before the penalty calculation for quality-of-life improvements. **Describe alternatives you've considered** N.A. **Additional context** From Evan C. </issue> <code> [start of GANDLF/compute/generic.py] 1 from GANDLF.models import get_model 2 from GANDLF.schedulers import get_scheduler 3 from GANDLF.optimizers import get_optimizer 4 from GANDLF.data import ( 5 get_train_loader, 6 get_validation_loader, 7 ) 8 from GANDLF.utils import ( 9 populate_header_in_parameters, 10 parseTrainingCSV, 11 send_model_to_device, 12 get_class_imbalance_weights, 13 ) 14 15 16 def create_pytorch_objects(parameters, train_csv=None, val_csv=None, device="cpu"): 17 """ 18 This function creates all the PyTorch objects needed for training. 19 20 Args: 21 parameters (dict): The parameters dictionary. 22 train_csv (str): The path to the training CSV file. 23 val_csv (str): The path to the validation CSV file. 24 device (str): The device to perform computations on. 25 26 Returns: 27 model (torch.nn.Module): The model to use for training. 28 optimizer (Optimizer): The optimizer to use for training. 29 train_loader (torch.utils.data.DataLoader): The training data loader. 30 val_loader (torch.utils.data.DataLoader): The validation data loader. 31 scheduler (object): The scheduler to use for training. 32 parameters (dict): The updated parameters dictionary. 33 """ 34 # initialize train and val loaders 35 train_loader, val_loader = None, None 36 headers_to_populate_train, headers_to_populate_val = None, None 37 38 if train_csv is not None: 39 # populate the data frames 40 parameters["training_data"], headers_to_populate_train = parseTrainingCSV( 41 train_csv, train=True 42 ) 43 parameters = populate_header_in_parameters( 44 parameters, headers_to_populate_train 45 ) 46 # get the train loader 47 train_loader = get_train_loader(parameters) 48 parameters["training_samples_size"] = len(train_loader) 49 50 # Calculate the weights here 51 ( 52 parameters["weights"], 53 parameters["class_weights"], 54 ) = get_class_imbalance_weights(parameters["training_data"], parameters) 55 56 if val_csv is not None: 57 parameters["validation_data"], headers_to_populate_val = parseTrainingCSV( 58 val_csv, train=False 59 ) 60 if headers_to_populate_train is None: 61 parameters = populate_header_in_parameters( 62 parameters, headers_to_populate_val 63 ) 64 # get the validation loader 65 val_loader = get_validation_loader(parameters) 66 67 # get the model 68 model = get_model(parameters) 69 parameters["model_parameters"] = model.parameters() 70 71 # get the optimizer 72 optimizer = get_optimizer(parameters) 73 parameters["optimizer_object"] = optimizer 74 75 # send model to correct device 76 ( 77 model, 78 parameters["model"]["amp"], 79 parameters["device"], 80 parameters["device_id"], 81 ) = send_model_to_device( 82 model, amp=parameters["model"]["amp"], device=device, optimizer=optimizer 83 ) 84 85 # only need to create scheduler if training 86 if train_csv is not None: 87 if not ("step_size" in parameters["scheduler"]): 88 parameters["scheduler"]["step_size"] = ( 89 parameters["training_samples_size"] / parameters["learning_rate"] 90 ) 91 92 scheduler = get_scheduler(parameters) 93 else: 94 scheduler = None 95 96 # these keys contain generators, and are not needed beyond this point in params 97 generator_keys_to_remove = ["optimizer_object", "model_parameters"] 98 for key in generator_keys_to_remove: 99 parameters.pop(key, None) 100 101 return model, optimizer, train_loader, val_loader, scheduler, parameters 102 [end of GANDLF/compute/generic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/GANDLF/compute/generic.py b/GANDLF/compute/generic.py --- a/GANDLF/compute/generic.py +++ b/GANDLF/compute/generic.py @@ -47,12 +47,6 @@ train_loader = get_train_loader(parameters) parameters["training_samples_size"] = len(train_loader) - # Calculate the weights here - ( - parameters["weights"], - parameters["class_weights"], - ) = get_class_imbalance_weights(parameters["training_data"], parameters) - if val_csv is not None: parameters["validation_data"], headers_to_populate_val = parseTrainingCSV( val_csv, train=False @@ -90,6 +84,13 @@ ) scheduler = get_scheduler(parameters) + + # Calculate the weights here + ( + parameters["weights"], + parameters["class_weights"], + ) = get_class_imbalance_weights(parameters["training_data"], parameters) + else: scheduler = None
{"golden_diff": "diff --git a/GANDLF/compute/generic.py b/GANDLF/compute/generic.py\n--- a/GANDLF/compute/generic.py\n+++ b/GANDLF/compute/generic.py\n@@ -47,12 +47,6 @@\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n \n- # Calculate the weights here\n- (\n- parameters[\"weights\"],\n- parameters[\"class_weights\"],\n- ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n-\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n@@ -90,6 +84,13 @@\n )\n \n scheduler = get_scheduler(parameters)\n+\n+ # Calculate the weights here\n+ (\n+ parameters[\"weights\"],\n+ parameters[\"class_weights\"],\n+ ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n+\n else:\n scheduler = None\n", "issue": "Perform penalty calculation after all sanity checks are completed\n**Is your feature request related to a problem? Please describe.**\r\nThe penalty calculation takes a long time, and there are sanity checks that happen after this, which can be a pain.\r\n\r\n**Describe the solution you'd like**\r\nIt would be great to have these checks before the penalty calculation for quality-of-life improvements.\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nFrom Evan C.\n", "before_files": [{"content": "from GANDLF.models import get_model\nfrom GANDLF.schedulers import get_scheduler\nfrom GANDLF.optimizers import get_optimizer\nfrom GANDLF.data import (\n get_train_loader,\n get_validation_loader,\n)\nfrom GANDLF.utils import (\n populate_header_in_parameters,\n parseTrainingCSV,\n send_model_to_device,\n get_class_imbalance_weights,\n)\n\n\ndef create_pytorch_objects(parameters, train_csv=None, val_csv=None, device=\"cpu\"):\n \"\"\"\n This function creates all the PyTorch objects needed for training.\n\n Args:\n parameters (dict): The parameters dictionary.\n train_csv (str): The path to the training CSV file.\n val_csv (str): The path to the validation CSV file.\n device (str): The device to perform computations on.\n\n Returns:\n model (torch.nn.Module): The model to use for training.\n optimizer (Optimizer): The optimizer to use for training.\n train_loader (torch.utils.data.DataLoader): The training data loader.\n val_loader (torch.utils.data.DataLoader): The validation data loader.\n scheduler (object): The scheduler to use for training.\n parameters (dict): The updated parameters dictionary.\n \"\"\"\n # initialize train and val loaders\n train_loader, val_loader = None, None\n headers_to_populate_train, headers_to_populate_val = None, None\n\n if train_csv is not None:\n # populate the data frames\n parameters[\"training_data\"], headers_to_populate_train = parseTrainingCSV(\n train_csv, train=True\n )\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_train\n )\n # get the train loader\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n\n # Calculate the weights here\n (\n parameters[\"weights\"],\n parameters[\"class_weights\"],\n ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n )\n if headers_to_populate_train is None:\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_val\n )\n # get the validation loader\n val_loader = get_validation_loader(parameters)\n\n # get the model\n model = get_model(parameters)\n parameters[\"model_parameters\"] = model.parameters()\n\n # get the optimizer\n optimizer = get_optimizer(parameters)\n parameters[\"optimizer_object\"] = optimizer\n\n # send model to correct device\n (\n model,\n parameters[\"model\"][\"amp\"],\n parameters[\"device\"],\n parameters[\"device_id\"],\n ) = send_model_to_device(\n model, amp=parameters[\"model\"][\"amp\"], device=device, optimizer=optimizer\n )\n\n # only need to create scheduler if training\n if train_csv is not None:\n if not (\"step_size\" in parameters[\"scheduler\"]):\n parameters[\"scheduler\"][\"step_size\"] = (\n parameters[\"training_samples_size\"] / parameters[\"learning_rate\"]\n )\n\n scheduler = get_scheduler(parameters)\n else:\n scheduler = None\n\n # these keys contain generators, and are not needed beyond this point in params\n generator_keys_to_remove = [\"optimizer_object\", \"model_parameters\"]\n for key in generator_keys_to_remove:\n parameters.pop(key, None)\n\n return model, optimizer, train_loader, val_loader, scheduler, parameters\n", "path": "GANDLF/compute/generic.py"}]}
1,568
225
gh_patches_debug_32962
rasdani/github-patches
git_diff
microsoft__torchgeo-250
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> torchgeo.models.RFC should have a seed argument The parameters of this model are randomly initialized, but it is not trainable. To have repeatable results with this we need a seed parameter so we can guarantee that parameter init happens the same. </issue> <code> [start of torchgeo/models/rcf.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 """Implementation of a random convolutional feature projection model.""" 5 6 from typing import cast 7 8 import torch 9 import torch.nn.functional as F 10 from torch import Tensor 11 from torch.nn.modules import Conv2d, Module 12 13 Module.__module__ = "torch.nn" 14 Conv2d.__module__ = "torch.nn" 15 16 17 class RCF(Module): 18 """This model extracts random convolutional features (RCFs) from its input. 19 20 RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks 21 (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z. 22 23 .. note:: 24 25 This Module is *not* trainable. It is only used as a feature extractor. 26 """ 27 28 def __init__( 29 self, 30 in_channels: int = 4, 31 features: int = 16, 32 kernel_size: int = 3, 33 bias: float = -1.0, 34 ) -> None: 35 """Initializes the RCF model. 36 37 This is a static model that serves to extract fixed length feature vectors from 38 input patches. 39 40 Args: 41 in_channels: number of input channels 42 features: number of features to compute, must be divisible by 2 43 kernel_size: size of the kernel used to compute the RCFs 44 bias: bias of the convolutional layer 45 """ 46 super().__init__() 47 48 assert features % 2 == 0 49 50 # We register the weight and bias tensors as "buffers". This does two things: 51 # makes them behave correctly when we call .to(...) on the module, and makes 52 # them explicitely _not_ Parameters of the model (which might get updated) if 53 # a user tries to train with this model. 54 self.register_buffer( 55 "weights", 56 torch.randn( 57 features // 2, 58 in_channels, 59 kernel_size, 60 kernel_size, 61 requires_grad=False, 62 ), 63 ) 64 self.register_buffer( 65 "biases", 66 torch.zeros( # type: ignore[attr-defined] 67 features // 2, requires_grad=False 68 ) 69 + bias, 70 ) 71 72 def forward(self, x: Tensor) -> Tensor: 73 """Forward pass of the RCF model. 74 75 Args: 76 x: a tensor with shape (B, C, H, W) 77 78 Returns: 79 a tensor of size (B, ``self.num_features``) 80 """ 81 x1a = F.relu( 82 F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0), 83 inplace=True, 84 ) 85 x1b = F.relu( 86 -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0), 87 inplace=False, 88 ) 89 90 x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze() 91 x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze() 92 93 if len(x1a.shape) == 1: # case where we passed a single input 94 output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined] 95 return cast(Tensor, output) 96 else: # case where we passed a batch of > 1 inputs 97 assert len(x1a.shape) == 2 98 output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined] 99 return cast(Tensor, output) 100 [end of torchgeo/models/rcf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchgeo/models/rcf.py b/torchgeo/models/rcf.py --- a/torchgeo/models/rcf.py +++ b/torchgeo/models/rcf.py @@ -3,7 +3,7 @@ """Implementation of a random convolutional feature projection model.""" -from typing import cast +from typing import Optional, cast import torch import torch.nn.functional as F @@ -31,6 +31,7 @@ features: int = 16, kernel_size: int = 3, bias: float = -1.0, + seed: Optional[int] = None, ) -> None: """Initializes the RCF model. @@ -42,11 +43,19 @@ features: number of features to compute, must be divisible by 2 kernel_size: size of the kernel used to compute the RCFs bias: bias of the convolutional layer + seed: random seed used to initialize the convolutional layer """ super().__init__() assert features % 2 == 0 + if seed is None: + generator = None + else: + generator = torch.Generator().manual_seed( # type: ignore[attr-defined] + seed + ) + # We register the weight and bias tensors as "buffers". This does two things: # makes them behave correctly when we call .to(...) on the module, and makes # them explicitely _not_ Parameters of the model (which might get updated) if @@ -59,6 +68,7 @@ kernel_size, kernel_size, requires_grad=False, + generator=generator, ), ) self.register_buffer(
{"golden_diff": "diff --git a/torchgeo/models/rcf.py b/torchgeo/models/rcf.py\n--- a/torchgeo/models/rcf.py\n+++ b/torchgeo/models/rcf.py\n@@ -3,7 +3,7 @@\n \n \"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n \n-from typing import cast\n+from typing import Optional, cast\n \n import torch\n import torch.nn.functional as F\n@@ -31,6 +31,7 @@\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n+ seed: Optional[int] = None,\n ) -> None:\n \"\"\"Initializes the RCF model.\n \n@@ -42,11 +43,19 @@\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n+ seed: random seed used to initialize the convolutional layer\n \"\"\"\n super().__init__()\n \n assert features % 2 == 0\n \n+ if seed is None:\n+ generator = None\n+ else:\n+ generator = torch.Generator().manual_seed( # type: ignore[attr-defined]\n+ seed\n+ )\n+\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n@@ -59,6 +68,7 @@\n kernel_size,\n kernel_size,\n requires_grad=False,\n+ generator=generator,\n ),\n )\n self.register_buffer(\n", "issue": "torchgeo.models.RFC should have a seed argument\nThe parameters of this model are randomly initialized, but it is not trainable. To have repeatable results with this we need a seed parameter so we can guarantee that parameter init happens the same.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Implementation of a random convolutional feature projection model.\"\"\"\n\nfrom typing import cast\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn.modules import Conv2d, Module\n\nModule.__module__ = \"torch.nn\"\nConv2d.__module__ = \"torch.nn\"\n\n\nclass RCF(Module):\n \"\"\"This model extracts random convolutional features (RCFs) from its input.\n\n RCFs are used in Multi-task Observation using Satellite Imagery & Kitchen Sinks\n (MOSAIKS) method proposed in https://www.nature.com/articles/s41467-021-24638-z.\n\n .. note::\n\n This Module is *not* trainable. It is only used as a feature extractor.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int = 4,\n features: int = 16,\n kernel_size: int = 3,\n bias: float = -1.0,\n ) -> None:\n \"\"\"Initializes the RCF model.\n\n This is a static model that serves to extract fixed length feature vectors from\n input patches.\n\n Args:\n in_channels: number of input channels\n features: number of features to compute, must be divisible by 2\n kernel_size: size of the kernel used to compute the RCFs\n bias: bias of the convolutional layer\n \"\"\"\n super().__init__()\n\n assert features % 2 == 0\n\n # We register the weight and bias tensors as \"buffers\". This does two things:\n # makes them behave correctly when we call .to(...) on the module, and makes\n # them explicitely _not_ Parameters of the model (which might get updated) if\n # a user tries to train with this model.\n self.register_buffer(\n \"weights\",\n torch.randn(\n features // 2,\n in_channels,\n kernel_size,\n kernel_size,\n requires_grad=False,\n ),\n )\n self.register_buffer(\n \"biases\",\n torch.zeros( # type: ignore[attr-defined]\n features // 2, requires_grad=False\n )\n + bias,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Forward pass of the RCF model.\n\n Args:\n x: a tensor with shape (B, C, H, W)\n\n Returns:\n a tensor of size (B, ``self.num_features``)\n \"\"\"\n x1a = F.relu(\n F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=True,\n )\n x1b = F.relu(\n -F.conv2d(x, self.weights, bias=self.biases, stride=1, padding=0),\n inplace=False,\n )\n\n x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()\n x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()\n\n if len(x1a.shape) == 1: # case where we passed a single input\n output = torch.cat((x1a, x1b), dim=0) # type: ignore[attr-defined]\n return cast(Tensor, output)\n else: # case where we passed a batch of > 1 inputs\n assert len(x1a.shape) == 2\n output = torch.cat((x1a, x1b), dim=1) # type: ignore[attr-defined]\n return cast(Tensor, output)\n", "path": "torchgeo/models/rcf.py"}]}
1,579
381
gh_patches_debug_1893
rasdani/github-patches
git_diff
rasterio__rasterio-778
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Copy colormap when rasters are merged I'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap? I have an initial pass of this change at: https://github.com/kapadia/rasterio/tree/rio-merge-colormap </issue> <code> [start of rasterio/rio/merge.py] 1 """Merge command.""" 2 3 import logging 4 5 import click 6 from cligj import files_inout_arg, format_opt 7 8 from .helpers import resolve_inout 9 from . import options 10 import rasterio 11 12 13 @click.command(short_help="Merge a stack of raster datasets.") 14 @files_inout_arg 15 @options.output_opt 16 @format_opt 17 @options.bounds_opt 18 @options.resolution_opt 19 @options.nodata_opt 20 @options.force_overwrite_opt 21 @click.option('--precision', type=int, default=7, 22 help="Number of decimal places of precision in alignment of " 23 "pixels") 24 @options.creation_options 25 @click.pass_context 26 def merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite, 27 precision, creation_options): 28 """Copy valid pixels from input files to an output file. 29 30 All files must have the same number of bands, data type, and 31 coordinate reference system. 32 33 Input files are merged in their listed order using the reverse 34 painter's algorithm. If the output file exists, its values will be 35 overwritten by input values. 36 37 Geospatial bounds and resolution of a new output file in the 38 units of the input file coordinate reference system may be provided 39 and are otherwise taken from the first input file. 40 41 Note: --res changed from 2 parameters in 0.25. 42 43 \b 44 --res 0.1 0.1 => --res 0.1 (square) 45 --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular) 46 """ 47 from rasterio.merge import merge as merge_tool 48 49 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1 50 51 output, files = resolve_inout( 52 files=files, output=output, force_overwrite=force_overwrite) 53 54 with rasterio.Env(CPL_DEBUG=verbosity > 2): 55 sources = [rasterio.open(f) for f in files] 56 dest, output_transform = merge_tool(sources, bounds=bounds, res=res, 57 nodata=nodata, precision=precision) 58 59 profile = sources[0].profile 60 profile.pop('affine') 61 profile['transform'] = output_transform 62 profile['height'] = dest.shape[1] 63 profile['width'] = dest.shape[2] 64 profile['driver'] = driver 65 66 profile.update(**creation_options) 67 68 with rasterio.open(output, 'w', **profile) as dst: 69 dst.write(dest) 70 [end of rasterio/rio/merge.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py --- a/rasterio/rio/merge.py +++ b/rasterio/rio/merge.py @@ -67,3 +67,10 @@ with rasterio.open(output, 'w', **profile) as dst: dst.write(dest) + + # uses the colormap in the first input raster. + try: + colormap = sources[0].colormap(1) + dst.write_colormap(1, colormap) + except ValueError: + pass
{"golden_diff": "diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py\n--- a/rasterio/rio/merge.py\n+++ b/rasterio/rio/merge.py\n@@ -67,3 +67,10 @@\n \n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n+\n+ # uses the colormap in the first input raster.\n+ try:\n+ colormap = sources[0].colormap(1)\n+ dst.write_colormap(1, colormap)\n+ except ValueError:\n+ pass\n", "issue": "Copy colormap when rasters are merged\nI'm running `rio merge` over a few single band images that contain a colormap. During the merge, the colormap is not copied to the new raster. Can we modify `rio merge` to preserve the colormap?\n\nI have an initial pass of this change at:\n\nhttps://github.com/kapadia/rasterio/tree/rio-merge-colormap\n\n", "before_files": [{"content": "\"\"\"Merge command.\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\[email protected]_opt\n@format_opt\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected]_overwrite_opt\[email protected]('--precision', type=int, default=7,\n help=\"Number of decimal places of precision in alignment of \"\n \"pixels\")\[email protected]_options\[email protected]_context\ndef merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,\n precision, creation_options):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same number of bands, data type, and\n coordinate reference system.\n\n Input files are merged in their listed order using the reverse\n painter's algorithm. If the output file exists, its values will be\n overwritten by input values.\n\n Geospatial bounds and resolution of a new output file in the\n units of the input file coordinate reference system may be provided\n and are otherwise taken from the first input file.\n\n Note: --res changed from 2 parameters in 0.25.\n\n \\b\n --res 0.1 0.1 => --res 0.1 (square)\n --res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)\n \"\"\"\n from rasterio.merge import merge as merge_tool\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n\n output, files = resolve_inout(\n files=files, output=output, force_overwrite=force_overwrite)\n\n with rasterio.Env(CPL_DEBUG=verbosity > 2):\n sources = [rasterio.open(f) for f in files]\n dest, output_transform = merge_tool(sources, bounds=bounds, res=res,\n nodata=nodata, precision=precision)\n\n profile = sources[0].profile\n profile.pop('affine')\n profile['transform'] = output_transform\n profile['height'] = dest.shape[1]\n profile['width'] = dest.shape[2]\n profile['driver'] = driver\n\n profile.update(**creation_options)\n\n with rasterio.open(output, 'w', **profile) as dst:\n dst.write(dest)\n", "path": "rasterio/rio/merge.py"}]}
1,301
130
gh_patches_debug_36937
rasdani/github-patches
git_diff
comic__grand-challenge.org-1923
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `get_follow_object_pk` errors out if `obj.follow_object` is `None` Occurs when the follow object has been deleted and the follow is not cleaned up. See https://sentry.io/organizations/grand-challenge/issues/2511041483/?project=303639&query=is%3Aunresolved </issue> <code> [start of app/grandchallenge/notifications/signals.py] 1 from actstream import action 2 from actstream.actions import follow 3 from actstream.models import Action, Follow, followers 4 from django.db.models.signals import post_save 5 from django.dispatch import receiver 6 from guardian.shortcuts import assign_perm 7 from machina.apps.forum_conversation.models import Post, Topic 8 9 from grandchallenge.notifications.models import Notification 10 11 12 @receiver(post_save, sender=Topic) 13 def create_topic_action(sender, *, instance, created, **_): 14 if created: 15 follow( 16 user=instance.poster, 17 obj=instance, 18 actor_only=False, 19 send_action=False, 20 ) 21 22 if int(instance.type) == int(Topic.TOPIC_ANNOUNCE): 23 action.send( 24 sender=instance.poster, 25 verb="announced", 26 action_object=instance, 27 target=instance.forum, 28 context_class="info", 29 ) 30 else: 31 action.send( 32 sender=instance.poster, 33 verb="posted", 34 action_object=instance, 35 target=instance.forum, 36 ) 37 38 39 @receiver(post_save, sender=Post) 40 def create_post_action(sender, *, instance, created, **_): 41 if ( 42 created 43 and instance.topic.posts_count != 0 44 and not instance.is_topic_head 45 ): 46 follow( 47 user=instance.poster, 48 obj=instance.topic, 49 actor_only=False, 50 send_action=False, 51 ) 52 53 action.send( 54 sender=instance.poster, verb="replied to", target=instance.topic, 55 ) 56 57 58 @receiver(post_save, sender=Action) 59 def create_notification(*, instance, **_): 60 if instance.target: 61 follower_group = followers(instance.target) 62 for follower in follower_group: 63 # only send notifications to followers other than the poster 64 if follower != instance.actor: 65 Notification(user=follower, action=instance).save() 66 else: 67 follower_group = followers(instance.actor) 68 for follower in follower_group: 69 # only send notifications to followers other than the poster 70 if follower != instance.actor: 71 Notification(user=follower, action=instance).save() 72 73 74 @receiver(post_save, sender=Follow) 75 def add_permissions(*, instance, created, **_): 76 if created: 77 assign_perm("change_follow", instance.user, instance) 78 assign_perm("delete_follow", instance.user, instance) 79 assign_perm("view_follow", instance.user, instance) 80 [end of app/grandchallenge/notifications/signals.py] [start of app/grandchallenge/forum_conversation/templatetags/forum_extras.py] 1 from actstream.models import Follow 2 from django import template 3 from django.contrib.contenttypes.models import ContentType 4 5 from grandchallenge.notifications.forms import FollowForm 6 7 register = template.Library() 8 9 10 @register.simple_tag 11 def get_follow_object_pk(user, follow_object): 12 object_follows_for_user = Follow.objects.filter( 13 user=user, 14 content_type=ContentType.objects.get( 15 app_label=follow_object._meta.app_label, 16 model=follow_object._meta.model_name, 17 ), 18 ).all() 19 current_follow_object = [] 20 for obj in object_follows_for_user: 21 if obj.follow_object.id == follow_object.id: 22 current_follow_object = obj.pk 23 return current_follow_object 24 25 26 @register.simple_tag 27 def follow_form(*, user, object_id, content_type): 28 return FollowForm( 29 user=user, 30 initial={ 31 "object_id": object_id, 32 "content_type": content_type, 33 "actor_only": False, 34 }, 35 ) 36 37 38 @register.simple_tag() 39 def get_content_type(follow_object): 40 ct = ContentType.objects.get( 41 app_label=follow_object._meta.app_label, 42 model=follow_object._meta.model_name, 43 ) 44 return ct 45 [end of app/grandchallenge/forum_conversation/templatetags/forum_extras.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py --- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py +++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py @@ -16,10 +16,16 @@ model=follow_object._meta.model_name, ), ).all() - current_follow_object = [] - for obj in object_follows_for_user: - if obj.follow_object.id == follow_object.id: - current_follow_object = obj.pk + + if not object_follows_for_user: + current_follow_object = [] + else: + current_follow_object = [] + for obj in object_follows_for_user: + if not obj.follow_object: + continue + elif obj.follow_object.id == follow_object.id: + current_follow_object = obj.pk return current_follow_object @@ -37,8 +43,11 @@ @register.simple_tag() def get_content_type(follow_object): - ct = ContentType.objects.get( - app_label=follow_object._meta.app_label, - model=follow_object._meta.model_name, - ) + try: + ct = ContentType.objects.get( + app_label=follow_object._meta.app_label, + model=follow_object._meta.model_name, + ) + except AttributeError: + ct = None return ct diff --git a/app/grandchallenge/notifications/signals.py b/app/grandchallenge/notifications/signals.py --- a/app/grandchallenge/notifications/signals.py +++ b/app/grandchallenge/notifications/signals.py @@ -1,9 +1,11 @@ from actstream import action from actstream.actions import follow from actstream.models import Action, Follow, followers -from django.db.models.signals import post_save +from django.contrib.contenttypes.models import ContentType +from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from guardian.shortcuts import assign_perm +from machina.apps.forum.models import Forum from machina.apps.forum_conversation.models import Post, Topic from grandchallenge.notifications.models import Notification @@ -77,3 +79,13 @@ assign_perm("change_follow", instance.user, instance) assign_perm("delete_follow", instance.user, instance) assign_perm("view_follow", instance.user, instance) + + +@receiver(pre_delete, sender=Topic) +@receiver(pre_delete, sender=Forum) +@receiver(pre_delete, sender=Post) +def clean_up_follows(*, instance, **_): + ct = ContentType.objects.filter( + app_label=instance._meta.app_label, model=instance._meta.model_name + ).get() + Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()
{"golden_diff": "diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n@@ -16,10 +16,16 @@\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n- current_follow_object = []\r\n- for obj in object_follows_for_user:\r\n- if obj.follow_object.id == follow_object.id:\r\n- current_follow_object = obj.pk\r\n+\r\n+ if not object_follows_for_user:\r\n+ current_follow_object = []\r\n+ else:\r\n+ current_follow_object = []\r\n+ for obj in object_follows_for_user:\r\n+ if not obj.follow_object:\r\n+ continue\r\n+ elif obj.follow_object.id == follow_object.id:\r\n+ current_follow_object = obj.pk\r\n return current_follow_object\r\n \r\n \r\n@@ -37,8 +43,11 @@\n \r\n @register.simple_tag()\r\n def get_content_type(follow_object):\r\n- ct = ContentType.objects.get(\r\n- app_label=follow_object._meta.app_label,\r\n- model=follow_object._meta.model_name,\r\n- )\r\n+ try:\r\n+ ct = ContentType.objects.get(\r\n+ app_label=follow_object._meta.app_label,\r\n+ model=follow_object._meta.model_name,\r\n+ )\r\n+ except AttributeError:\r\n+ ct = None\r\n return ct\r\ndiff --git a/app/grandchallenge/notifications/signals.py b/app/grandchallenge/notifications/signals.py\n--- a/app/grandchallenge/notifications/signals.py\n+++ b/app/grandchallenge/notifications/signals.py\n@@ -1,9 +1,11 @@\n from actstream import action\n from actstream.actions import follow\n from actstream.models import Action, Follow, followers\n-from django.db.models.signals import post_save\n+from django.contrib.contenttypes.models import ContentType\n+from django.db.models.signals import post_save, pre_delete\n from django.dispatch import receiver\n from guardian.shortcuts import assign_perm\n+from machina.apps.forum.models import Forum\n from machina.apps.forum_conversation.models import Post, Topic\n \n from grandchallenge.notifications.models import Notification\n@@ -77,3 +79,13 @@\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n+\n+\n+@receiver(pre_delete, sender=Topic)\n+@receiver(pre_delete, sender=Forum)\n+@receiver(pre_delete, sender=Post)\n+def clean_up_follows(*, instance, **_):\n+ ct = ContentType.objects.filter(\n+ app_label=instance._meta.app_label, model=instance._meta.model_name\n+ ).get()\n+ Follow.objects.filter(content_type=ct, object_id=instance.pk).delete()\n", "issue": "`get_follow_object_pk` errors out if `obj.follow_object` is `None`\nOccurs when the follow object has been deleted and the follow is not cleaned up. See https://sentry.io/organizations/grand-challenge/issues/2511041483/?project=303639&query=is%3Aunresolved\n", "before_files": [{"content": "from actstream import action\nfrom actstream.actions import follow\nfrom actstream.models import Action, Follow, followers\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom guardian.shortcuts import assign_perm\nfrom machina.apps.forum_conversation.models import Post, Topic\n\nfrom grandchallenge.notifications.models import Notification\n\n\n@receiver(post_save, sender=Topic)\ndef create_topic_action(sender, *, instance, created, **_):\n if created:\n follow(\n user=instance.poster,\n obj=instance,\n actor_only=False,\n send_action=False,\n )\n\n if int(instance.type) == int(Topic.TOPIC_ANNOUNCE):\n action.send(\n sender=instance.poster,\n verb=\"announced\",\n action_object=instance,\n target=instance.forum,\n context_class=\"info\",\n )\n else:\n action.send(\n sender=instance.poster,\n verb=\"posted\",\n action_object=instance,\n target=instance.forum,\n )\n\n\n@receiver(post_save, sender=Post)\ndef create_post_action(sender, *, instance, created, **_):\n if (\n created\n and instance.topic.posts_count != 0\n and not instance.is_topic_head\n ):\n follow(\n user=instance.poster,\n obj=instance.topic,\n actor_only=False,\n send_action=False,\n )\n\n action.send(\n sender=instance.poster, verb=\"replied to\", target=instance.topic,\n )\n\n\n@receiver(post_save, sender=Action)\ndef create_notification(*, instance, **_):\n if instance.target:\n follower_group = followers(instance.target)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n else:\n follower_group = followers(instance.actor)\n for follower in follower_group:\n # only send notifications to followers other than the poster\n if follower != instance.actor:\n Notification(user=follower, action=instance).save()\n\n\n@receiver(post_save, sender=Follow)\ndef add_permissions(*, instance, created, **_):\n if created:\n assign_perm(\"change_follow\", instance.user, instance)\n assign_perm(\"delete_follow\", instance.user, instance)\n assign_perm(\"view_follow\", instance.user, instance)\n", "path": "app/grandchallenge/notifications/signals.py"}, {"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}]}
1,642
646
gh_patches_debug_23374
rasdani/github-patches
git_diff
gratipay__gratipay.com-4390
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Localhost not loading in Firefox Just found this problem in Firefox while setting up Gratipay locally on @dmk246 laptop. For some reason the page never loads when you `make run` and try to open localhost:8537 in Firefox it hangs. We believe it is because `gratipay.report_uri.io` </issue> <code> [start of gratipay/security/__init__.py] 1 from aspen import Response 2 3 4 _requesting_asset = lambda r: r.path.raw.startswith('/assets/') 5 6 7 def only_allow_certain_methods(request): 8 method = request.method.upper() 9 whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST') 10 # POSTing to /assets/ interferes with the csrf.* functions if we're not careful 11 if method not in whitelist: 12 raise Response(405) 13 14 15 def add_headers_to_response(response): 16 """Add security headers. 17 """ 18 19 # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options 20 if 'X-Frame-Options' not in response.headers: 21 response.headers['X-Frame-Options'] = 'SAMEORIGIN' 22 elif response.headers['X-Frame-Options'] == 'ALLOWALL': 23 24 # ALLOWALL is non-standard. It's useful as a signal from a simplate 25 # that it doesn't want X-Frame-Options set at all, but because it's 26 # non-standard we don't send it. Instead we unset the header entirely, 27 # which has the desired effect of allowing framing indiscriminately. 28 # 29 # Refs.: 30 # 31 # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options 32 # http://ipsec.pl/node/1094 33 34 del response.headers['X-Frame-Options'] 35 36 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers 37 if 'X-Content-Type-Options' not in response.headers: 38 response.headers['X-Content-Type-Options'] = 'nosniff' 39 40 # https://www.owasp.org/index.php/List_of_useful_HTTP_headers 41 if 'X-XSS-Protection' not in response.headers: 42 response.headers['X-XSS-Protection'] = '1; mode=block' 43 44 # https://www.w3.org/TR/referrer-policy/ 45 if 'Referrer-Policy' not in response.headers: 46 response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin' 47 48 # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP 49 if 'content-security-policy-report-only' not in response.headers: 50 response.headers['content-security-policy-report-only'] = ( 51 "default-src 'self';" 52 "script-src 'self' assets.gratipay.com 'unsafe-inline';" 53 "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;" 54 "img-src *;" 55 "font-src 'self' assets.gratipay.com cloud.typography.com data:;" 56 "upgrade-insecure-requests;" 57 "block-all-mixed-content;" 58 "reflected-xss block;" 59 "report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;" 60 ) 61 [end of gratipay/security/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/security/__init__.py b/gratipay/security/__init__.py --- a/gratipay/security/__init__.py +++ b/gratipay/security/__init__.py @@ -43,7 +43,8 @@ # https://www.w3.org/TR/referrer-policy/ if 'Referrer-Policy' not in response.headers: - response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin' + response.headers['Referrer-Policy'] = \ + 'no-referrer-when-downgrade, strict-origin-when-cross-origin' # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP if 'content-security-policy-report-only' not in response.headers: @@ -53,8 +54,6 @@ "style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;" "img-src *;" "font-src 'self' assets.gratipay.com cloud.typography.com data:;" - "upgrade-insecure-requests;" "block-all-mixed-content;" - "reflected-xss block;" "report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;" )
{"golden_diff": "diff --git a/gratipay/security/__init__.py b/gratipay/security/__init__.py\n--- a/gratipay/security/__init__.py\n+++ b/gratipay/security/__init__.py\n@@ -43,7 +43,8 @@\n \n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n- response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'\n+ response.headers['Referrer-Policy'] = \\\n+ 'no-referrer-when-downgrade, strict-origin-when-cross-origin'\n \n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n@@ -53,8 +54,6 @@\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n- \"upgrade-insecure-requests;\"\n \"block-all-mixed-content;\"\n- \"reflected-xss block;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "issue": "Localhost not loading in Firefox\nJust found this problem in Firefox while setting up Gratipay locally on @dmk246 laptop. For some reason the page never loads when you `make run` and try to open localhost:8537 in Firefox it hangs. We believe it is because `gratipay.report_uri.io` \n", "before_files": [{"content": "from aspen import Response\n\n\n_requesting_asset = lambda r: r.path.raw.startswith('/assets/')\n\n\ndef only_allow_certain_methods(request):\n method = request.method.upper()\n whitelist = ('GET', 'HEAD') if _requesting_asset(request) else ('GET', 'HEAD', 'POST')\n # POSTing to /assets/ interferes with the csrf.* functions if we're not careful\n if method not in whitelist:\n raise Response(405)\n\n\ndef add_headers_to_response(response):\n \"\"\"Add security headers.\n \"\"\"\n\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n if 'X-Frame-Options' not in response.headers:\n response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n elif response.headers['X-Frame-Options'] == 'ALLOWALL':\n\n # ALLOWALL is non-standard. It's useful as a signal from a simplate\n # that it doesn't want X-Frame-Options set at all, but because it's\n # non-standard we don't send it. Instead we unset the header entirely,\n # which has the desired effect of allowing framing indiscriminately.\n #\n # Refs.:\n #\n # http://en.wikipedia.org/wiki/Clickjacking#X-Frame-Options\n # http://ipsec.pl/node/1094\n\n del response.headers['X-Frame-Options']\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-Content-Type-Options' not in response.headers:\n response.headers['X-Content-Type-Options'] = 'nosniff'\n\n # https://www.owasp.org/index.php/List_of_useful_HTTP_headers\n if 'X-XSS-Protection' not in response.headers:\n response.headers['X-XSS-Protection'] = '1; mode=block'\n\n # https://www.w3.org/TR/referrer-policy/\n if 'Referrer-Policy' not in response.headers:\n response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\n if 'content-security-policy-report-only' not in response.headers:\n response.headers['content-security-policy-report-only'] = (\n \"default-src 'self';\"\n \"script-src 'self' assets.gratipay.com 'unsafe-inline';\"\n \"style-src 'self' assets.gratipay.com downloads.gratipay.com cloud.typography.com;\"\n \"img-src *;\"\n \"font-src 'self' assets.gratipay.com cloud.typography.com data:;\"\n \"upgrade-insecure-requests;\"\n \"block-all-mixed-content;\"\n \"reflected-xss block;\"\n \"report-uri https://gratipay.report-uri.io/r/default/csp/reportOnly;\"\n )\n", "path": "gratipay/security/__init__.py"}]}
1,344
270
gh_patches_debug_1920
rasdani/github-patches
git_diff
mozilla__bugbug-598
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use new 'everchanged' operator instead of changedafter 1970 Depends on https://bugzilla.mozilla.org/show_bug.cgi?id=1546624. </issue> <code> [start of scripts/get_type_labels.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import argparse 7 import csv 8 import sys 9 10 import requests 11 12 13 def parse_args(args): 14 parser = argparse.ArgumentParser() 15 parser.add_argument( 16 "--types", 17 help="Types to retrieve", 18 default=["defect", "enhancement", "task"], 19 nargs="*", 20 ) 21 return parser.parse_args(args) 22 23 24 def main(args): 25 params = { 26 "columnlist": "bug_type", 27 "order": "bug_id", 28 "j_top": "OR", 29 "f1": "bug_type", 30 "o1": "changedafter", 31 "v1": "1970-01-01", 32 "f2": "OP", 33 "f3": "bug_type", 34 "o3": "anyexact", 35 "v3": "task,enhancement", 36 "f4": "bug_id", 37 "o4": "greaterthan", 38 "v4": 1540807, 39 "f5": "CP", 40 "ctype": "csv", 41 } 42 43 r = requests.get("https://bugzilla.mozilla.org/buglist.cgi", params=params) 44 r.raise_for_status() 45 46 with open("bugbug/labels/defect_enhancement_task_h.csv", "r") as f: 47 reader = csv.reader(f) 48 headers = next(reader) 49 bug_type_map = {int(row[0]): row[1] for row in reader} 50 51 # We add to our csv both labels that were changed, and labels that are in 52 # the list of requested types. 53 reader = csv.reader(r.text.splitlines()) 54 next(reader) 55 for row in reader: 56 if int(row[0]) in bug_type_map or row[1] in args.types: 57 bug_type_map[int(row[0])] = row[1] 58 59 with open("bugbug/labels/defect_enhancement_task_h.csv", "w") as f: 60 writer = csv.writer(f) 61 writer.writerow(headers) 62 writer.writerows(sorted(bug_type_map.items())) 63 64 65 if __name__ == "__main__": 66 main(parse_args(sys.argv[1:])) 67 [end of scripts/get_type_labels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/get_type_labels.py b/scripts/get_type_labels.py --- a/scripts/get_type_labels.py +++ b/scripts/get_type_labels.py @@ -27,8 +27,7 @@ "order": "bug_id", "j_top": "OR", "f1": "bug_type", - "o1": "changedafter", - "v1": "1970-01-01", + "o1": "everchanged", "f2": "OP", "f3": "bug_type", "o3": "anyexact",
{"golden_diff": "diff --git a/scripts/get_type_labels.py b/scripts/get_type_labels.py\n--- a/scripts/get_type_labels.py\n+++ b/scripts/get_type_labels.py\n@@ -27,8 +27,7 @@\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n- \"o1\": \"changedafter\",\n- \"v1\": \"1970-01-01\",\n+ \"o1\": \"everchanged\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n", "issue": "Use new 'everchanged' operator instead of changedafter 1970\nDepends on https://bugzilla.mozilla.org/show_bug.cgi?id=1546624.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport sys\n\nimport requests\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--types\",\n help=\"Types to retrieve\",\n default=[\"defect\", \"enhancement\", \"task\"],\n nargs=\"*\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n params = {\n \"columnlist\": \"bug_type\",\n \"order\": \"bug_id\",\n \"j_top\": \"OR\",\n \"f1\": \"bug_type\",\n \"o1\": \"changedafter\",\n \"v1\": \"1970-01-01\",\n \"f2\": \"OP\",\n \"f3\": \"bug_type\",\n \"o3\": \"anyexact\",\n \"v3\": \"task,enhancement\",\n \"f4\": \"bug_id\",\n \"o4\": \"greaterthan\",\n \"v4\": 1540807,\n \"f5\": \"CP\",\n \"ctype\": \"csv\",\n }\n\n r = requests.get(\"https://bugzilla.mozilla.org/buglist.cgi\", params=params)\n r.raise_for_status()\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"r\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n bug_type_map = {int(row[0]): row[1] for row in reader}\n\n # We add to our csv both labels that were changed, and labels that are in\n # the list of requested types.\n reader = csv.reader(r.text.splitlines())\n next(reader)\n for row in reader:\n if int(row[0]) in bug_type_map or row[1] in args.types:\n bug_type_map[int(row[0])] = row[1]\n\n with open(\"bugbug/labels/defect_enhancement_task_h.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(headers)\n writer.writerows(sorted(bug_type_map.items()))\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "scripts/get_type_labels.py"}]}
1,223
134
gh_patches_debug_17121
rasdani/github-patches
git_diff
opendatacube__datacube-core-905
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update release process documentation Many steps described in the document have since been automated, documentation should reflect that: - Upload to pypi is done by Travis - Updates for conda-forge are done by some bot that creates PR </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 5 tests_require = [ 6 'compliance-checker>=4.0.0', 7 'hypothesis', 8 'mock', 9 'pycodestyle', 10 'pylint', 11 'pytest', 12 'pytest-cov', 13 'pytest-timeout', 14 'pytest-httpserver', 15 'moto', 16 ] 17 18 extras_require = { 19 'performance': ['ciso8601', 'bottleneck'], 20 'interactive': ['matplotlib', 'fiona'], 21 'distributed': ['distributed', 'dask[distributed]'], 22 'doc': ['Sphinx', 'setuptools'], 23 'replicas': ['paramiko', 'sshtunnel', 'tqdm'], 24 'celery': ['celery>=4', 'redis'], 25 's3': ['boto3'], 26 'test': tests_require, 27 } 28 # An 'all' option, following ipython naming conventions. 29 extras_require['all'] = sorted(set(sum(extras_require.values(), []))) 30 31 extra_plugins = dict(read=[], write=[], index=[]) 32 33 setup( 34 name='datacube', 35 python_requires='>=3.5.2', 36 37 url='https://github.com/opendatacube/datacube-core', 38 author='Open Data Cube', 39 maintainer='Open Data Cube', 40 maintainer_email='', 41 description='An analysis environment for satellite and other earth observation data', 42 long_description=open('README.rst').read(), 43 long_description_content_type='text/x-rst', 44 license='Apache License 2.0', 45 classifiers=[ 46 "Development Status :: 4 - Beta", 47 "Intended Audience :: Developers", 48 "Intended Audience :: Science/Research", 49 "License :: OSI Approved :: Apache Software License", 50 "Natural Language :: English", 51 "Operating System :: MacOS :: MacOS X", 52 "Operating System :: POSIX", 53 "Operating System :: POSIX :: BSD", 54 "Operating System :: POSIX :: Linux", 55 "Operating System :: Microsoft :: Windows", 56 "Programming Language :: Python", 57 "Programming Language :: Python :: 3", 58 "Programming Language :: Python :: 3.5", 59 "Programming Language :: Python :: 3.6", 60 "Topic :: Scientific/Engineering :: GIS", 61 "Topic :: Scientific/Engineering :: Information Analysis", 62 ], 63 64 packages=find_packages( 65 exclude=('tests', 'tests.*', 66 'integration_tests', 'integration_tests.*') 67 ), 68 package_data={ 69 '': ['*.yaml', '*/*.yaml'], 70 }, 71 scripts=[ 72 'datacube_apps/scripts/pbs_helpers.sh' 73 ], 74 install_requires=[ 75 'affine', 76 'pyproj>=2.5', 77 'shapely>=1.6.4', 78 'cachetools', 79 'click>=5.0', 80 'cloudpickle>=0.4', 81 'dask[array]', 82 'distributed', 83 'jsonschema', 84 'netcdf4', 85 'numpy', 86 'psycopg2', 87 'lark-parser>=0.6.7', 88 'python-dateutil', 89 'pyyaml', 90 'rasterio>=1.0.2', # Multi-band re-project fixed in that version 91 'sqlalchemy', 92 'toolz', 93 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost 94 ], 95 extras_require=extras_require, 96 tests_require=tests_require, 97 98 entry_points={ 99 'console_scripts': [ 100 'datacube = datacube.scripts.cli_app:cli', 101 'datacube-search = datacube.scripts.search_tool:cli', 102 'datacube-stacker = datacube_apps.stacker:main', 103 'datacube-worker = datacube.execution.worker:main', 104 'datacube-fixer = datacube_apps.stacker:fixer_main', 105 'datacube-ncml = datacube_apps.ncml:ncml_app', 106 'pixeldrill = datacube_apps.pixeldrill:main [interactive]', 107 'movie_generator = datacube_apps.movie_generator:main', 108 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]' 109 ], 110 'datacube.plugins.io.read': [ 111 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init', 112 *extra_plugins['read'], 113 ], 114 'datacube.plugins.io.write': [ 115 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init', 116 *extra_plugins['write'], 117 ], 118 'datacube.plugins.index': [ 119 'default = datacube.index.index:index_driver_init', 120 *extra_plugins['index'], 121 ], 122 }, 123 ) 124 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ setup( name='datacube', - python_requires='>=3.5.2', + python_requires='>=3.6.0', url='https://github.com/opendatacube/datacube-core', author='Open Data Cube', @@ -55,8 +55,8 @@ "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering :: GIS", "Topic :: Scientific/Engineering :: Information Analysis", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n \n setup(\n name='datacube',\n- python_requires='>=3.5.2',\n+ python_requires='>=3.6.0',\n \n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n@@ -55,8 +55,8 @@\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n", "issue": "Update release process documentation\nMany steps described in the document have since been automated, documentation should reflect that:\r\n\r\n- Upload to pypi is done by Travis\r\n- Updates for conda-forge are done by some bot that creates PR\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker>=4.0.0',\n 'hypothesis',\n 'mock',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.5.2',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark-parser>=0.6.7',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.0.2', # Multi-band re-project fixed in that version\n 'sqlalchemy',\n 'toolz',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]}
1,839
188
gh_patches_debug_27280
rasdani/github-patches
git_diff
Pylons__pyramid-2620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pcreate -s shows wrong link to tutorials after a ``` pcreate -s alchemy scaffold-alchemy ``` I see a link to tutorials, but this link is a 404: ``` Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials ``` </issue> <code> [start of pyramid/scaffolds/__init__.py] 1 import binascii 2 import os 3 from textwrap import dedent 4 5 from pyramid.compat import native_ 6 7 from pyramid.scaffolds.template import Template # API 8 9 class PyramidTemplate(Template): 10 """ 11 A class that can be used as a base class for Pyramid scaffolding 12 templates. 13 """ 14 def pre(self, command, output_dir, vars): 15 """ Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding 16 several variables to the default variables list (including 17 ``random_string``, and ``package_logger``). It also prevents common 18 misnamings (such as naming a package "site" or naming a package 19 logger "root". 20 """ 21 vars['random_string'] = native_(binascii.hexlify(os.urandom(20))) 22 package_logger = vars['package'] 23 if package_logger == 'root': 24 # Rename the app logger in the rare case a project is named 'root' 25 package_logger = 'app' 26 vars['package_logger'] = package_logger 27 return Template.pre(self, command, output_dir, vars) 28 29 def post(self, command, output_dir, vars): # pragma: no cover 30 """ Overrides :meth:`pyramid.scaffolds.template.Template.post`, to 31 print "Welcome to Pyramid. Sorry for the convenience." after a 32 successful scaffolding rendering.""" 33 34 separator = "=" * 79 35 msg = dedent( 36 """ 37 %(separator)s 38 Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials 39 Documentation: http://docs.pylonsproject.org/projects/pyramid 40 41 Twitter (tips & updates): http://twitter.com/pylons 42 Mailing List: http://groups.google.com/group/pylons-discuss 43 44 Welcome to Pyramid. Sorry for the convenience. 45 %(separator)s 46 """ % {'separator': separator}) 47 48 self.out(msg) 49 return Template.post(self, command, output_dir, vars) 50 51 def out(self, msg): # pragma: no cover (replaceable testing hook) 52 print(msg) 53 54 class StarterProjectTemplate(PyramidTemplate): 55 _template_dir = 'starter' 56 summary = 'Pyramid starter project' 57 58 class ZODBProjectTemplate(PyramidTemplate): 59 _template_dir = 'zodb' 60 summary = 'Pyramid ZODB project using traversal' 61 62 class AlchemyProjectTemplate(PyramidTemplate): 63 _template_dir = 'alchemy' 64 summary = 'Pyramid SQLAlchemy project using url dispatch' 65 [end of pyramid/scaffolds/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py --- a/pyramid/scaffolds/__init__.py +++ b/pyramid/scaffolds/__init__.py @@ -35,11 +35,10 @@ msg = dedent( """ %(separator)s - Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials - Documentation: http://docs.pylonsproject.org/projects/pyramid - - Twitter (tips & updates): http://twitter.com/pylons - Mailing List: http://groups.google.com/group/pylons-discuss + Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/ + Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/ + Twitter: https://twitter.com/trypyramid + Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss Welcome to Pyramid. Sorry for the convenience. %(separator)s @@ -53,12 +52,13 @@ class StarterProjectTemplate(PyramidTemplate): _template_dir = 'starter' - summary = 'Pyramid starter project' + summary = 'Pyramid starter project using URL dispatch and Chameleon' class ZODBProjectTemplate(PyramidTemplate): _template_dir = 'zodb' - summary = 'Pyramid ZODB project using traversal' + summary = 'Pyramid project using ZODB, traversal, and Chameleon' class AlchemyProjectTemplate(PyramidTemplate): _template_dir = 'alchemy' - summary = 'Pyramid SQLAlchemy project using url dispatch' + summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and' + ' Chameleon'
{"golden_diff": "diff --git a/pyramid/scaffolds/__init__.py b/pyramid/scaffolds/__init__.py\n--- a/pyramid/scaffolds/__init__.py\n+++ b/pyramid/scaffolds/__init__.py\n@@ -35,11 +35,10 @@\n msg = dedent(\n \"\"\"\n %(separator)s\n- Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n- Documentation: http://docs.pylonsproject.org/projects/pyramid\n-\n- Twitter (tips & updates): http://twitter.com/pylons\n- Mailing List: http://groups.google.com/group/pylons-discuss\n+ Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/\n+ Documentation: http://docs.pylonsproject.org/projects/pyramid/en/latest/\n+ Twitter: https://twitter.com/trypyramid\n+ Mailing List: https://groups.google.com/forum/#!forum/pylons-discuss\n \n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n@@ -53,12 +52,13 @@\n \n class StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n- summary = 'Pyramid starter project'\n+ summary = 'Pyramid starter project using URL dispatch and Chameleon'\n \n class ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n- summary = 'Pyramid ZODB project using traversal'\n+ summary = 'Pyramid project using ZODB, traversal, and Chameleon'\n \n class AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n- summary = 'Pyramid SQLAlchemy project using url dispatch'\n+ summary = 'Pyramid project using SQLAlchemy, SQLite, URL dispatch, and'\n+ ' Chameleon'\n", "issue": "pcreate -s shows wrong link to tutorials\nafter a \n\n```\npcreate -s alchemy scaffold-alchemy\n```\n\nI see a link to tutorials, but this link is a 404: \n\n```\nTutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n```\n\n", "before_files": [{"content": "import binascii\nimport os\nfrom textwrap import dedent\n\nfrom pyramid.compat import native_\n\nfrom pyramid.scaffolds.template import Template # API\n\nclass PyramidTemplate(Template):\n \"\"\"\n A class that can be used as a base class for Pyramid scaffolding\n templates.\n \"\"\"\n def pre(self, command, output_dir, vars):\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.pre`, adding\n several variables to the default variables list (including\n ``random_string``, and ``package_logger``). It also prevents common\n misnamings (such as naming a package \"site\" or naming a package\n logger \"root\".\n \"\"\"\n vars['random_string'] = native_(binascii.hexlify(os.urandom(20)))\n package_logger = vars['package']\n if package_logger == 'root':\n # Rename the app logger in the rare case a project is named 'root'\n package_logger = 'app'\n vars['package_logger'] = package_logger\n return Template.pre(self, command, output_dir, vars)\n\n def post(self, command, output_dir, vars): # pragma: no cover\n \"\"\" Overrides :meth:`pyramid.scaffolds.template.Template.post`, to\n print \"Welcome to Pyramid. Sorry for the convenience.\" after a\n successful scaffolding rendering.\"\"\"\n\n separator = \"=\" * 79\n msg = dedent(\n \"\"\"\n %(separator)s\n Tutorials: http://docs.pylonsproject.org/projects/pyramid_tutorials\n Documentation: http://docs.pylonsproject.org/projects/pyramid\n\n Twitter (tips & updates): http://twitter.com/pylons\n Mailing List: http://groups.google.com/group/pylons-discuss\n\n Welcome to Pyramid. Sorry for the convenience.\n %(separator)s\n \"\"\" % {'separator': separator})\n\n self.out(msg)\n return Template.post(self, command, output_dir, vars)\n\n def out(self, msg): # pragma: no cover (replaceable testing hook)\n print(msg)\n\nclass StarterProjectTemplate(PyramidTemplate):\n _template_dir = 'starter'\n summary = 'Pyramid starter project'\n\nclass ZODBProjectTemplate(PyramidTemplate):\n _template_dir = 'zodb'\n summary = 'Pyramid ZODB project using traversal'\n\nclass AlchemyProjectTemplate(PyramidTemplate):\n _template_dir = 'alchemy'\n summary = 'Pyramid SQLAlchemy project using url dispatch'\n", "path": "pyramid/scaffolds/__init__.py"}]}
1,258
399
gh_patches_debug_9196
rasdani/github-patches
git_diff
conda__conda-build-1470
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> conda metapackage Hello, I was wondering why the behaviour of `conda metapackage` has changed. Previously, it outputted helpful information about the location of the recently created package. However, this is the output now: ``` BUILD START: cgat-devel-0.4-py27r3.2.2_6 Package: cgat-devel-0.4-py27r3.2.2_6 source tree in: /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780260959/work number of files: 1 Fixing permissions Detected hard-coded path in text file bin/cgat Fixing permissions ``` Moreover, the command also creates temporary folders that are left empty after the package has been built: ``` sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720264845 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476695297317 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718035758 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718312877 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476721899323 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476698228374 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476696744782 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476719724225 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720123351 /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780047095 ``` Is this required? Here is additional info about my environment: ``` $ conda info Current conda install: platform : linux-64 conda version : 4.2.9 conda is private : False conda-env version : 4.2.9 conda-build version : 2.0.6 python version : 2.7.12.final.0 requests version : 2.11.1 root environment : /sebastian/conda/conda-build/build-testing (writable) default environment : /sebastian/conda/conda-build/build-testing envs directories : /sebastian/conda/conda-build/build-testing/envs package cache : /sebastian/conda/conda-build/build-testing/pkgs channel URLs : https://conda.anaconda.org/cgat/linux-64/ https://conda.anaconda.org/cgat/noarch/ https://repo.continuum.io/pkgs/free/linux-64/ https://repo.continuum.io/pkgs/free/noarch/ https://repo.continuum.io/pkgs/pro/linux-64/ https://repo.continuum.io/pkgs/pro/noarch/ https://conda.anaconda.org/conda-forge/linux-64/ https://conda.anaconda.org/conda-forge/noarch/ https://conda.anaconda.org/r/linux-64/ https://conda.anaconda.org/r/noarch/ https://conda.anaconda.org/bioconda/linux-64/ https://conda.anaconda.org/bioconda/noarch/ config file : /ifs/home/sebastian/.condarc offline mode : False ``` Many thanks, Sebastian </issue> <code> [start of conda_build/metapackage.py] 1 from collections import defaultdict 2 from conda_build.config import Config 3 from conda_build.metadata import MetaData 4 5 6 def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0, 7 dependencies=(), home=None, license_name=None, summary=None, config=None): 8 # local import to avoid circular import, we provid create_metapackage in api 9 from conda_build.build import build 10 11 if not config: 12 config = Config() 13 14 d = defaultdict(dict) 15 d['package']['name'] = name 16 d['package']['version'] = version 17 d['build']['number'] = build_number 18 d['build']['entry_points'] = entry_points 19 # MetaData does the auto stuff if the build string is None 20 d['build']['string'] = build_string 21 d['requirements']['run'] = dependencies 22 d['about']['home'] = home 23 d['about']['license'] = license_name 24 d['about']['summary'] = summary 25 d = dict(d) 26 m = MetaData.fromdict(d, config=config) 27 config.compute_build_id(m.name()) 28 29 return build(m, config=config, need_source_download=False) 30 [end of conda_build/metapackage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda_build/metapackage.py b/conda_build/metapackage.py --- a/conda_build/metapackage.py +++ b/conda_build/metapackage.py @@ -6,7 +6,7 @@ def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0, dependencies=(), home=None, license_name=None, summary=None, config=None): # local import to avoid circular import, we provid create_metapackage in api - from conda_build.build import build + from conda_build.api import build if not config: config = Config()
{"golden_diff": "diff --git a/conda_build/metapackage.py b/conda_build/metapackage.py\n--- a/conda_build/metapackage.py\n+++ b/conda_build/metapackage.py\n@@ -6,7 +6,7 @@\n def create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n- from conda_build.build import build\n+ from conda_build.api import build\n \n if not config:\n config = Config()\n", "issue": "conda metapackage \nHello,\n\nI was wondering why the behaviour of `conda metapackage` has changed. Previously, it outputted helpful information about the location of the recently created package. However, this is the output now:\n\n```\nBUILD START: cgat-devel-0.4-py27r3.2.2_6\nPackage: cgat-devel-0.4-py27r3.2.2_6\nsource tree in: /sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780260959/work\nnumber of files: 1\nFixing permissions\nDetected hard-coded path in text file bin/cgat\nFixing permissions\n```\n\nMoreover, the command also creates temporary folders that are left empty after the package has been built:\n\n```\nsebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720264845\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476695297317\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718035758\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476718312877\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476721899323\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476698228374\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476696744782\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476719724225\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476720123351\n/sebastian/conda/conda-build/build-testing/conda-bld/cgat-devel_1476780047095\n```\n\nIs this required?\n\nHere is additional info about my environment:\n\n```\n$ conda info\nCurrent conda install:\n\n platform : linux-64\n conda version : 4.2.9\n conda is private : False\n conda-env version : 4.2.9\n conda-build version : 2.0.6\n python version : 2.7.12.final.0\n requests version : 2.11.1\n root environment : /sebastian/conda/conda-build/build-testing (writable)\n default environment : /sebastian/conda/conda-build/build-testing\n envs directories : /sebastian/conda/conda-build/build-testing/envs\n package cache : /sebastian/conda/conda-build/build-testing/pkgs\n channel URLs : https://conda.anaconda.org/cgat/linux-64/\n https://conda.anaconda.org/cgat/noarch/\n https://repo.continuum.io/pkgs/free/linux-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/linux-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n https://conda.anaconda.org/conda-forge/linux-64/\n https://conda.anaconda.org/conda-forge/noarch/\n https://conda.anaconda.org/r/linux-64/\n https://conda.anaconda.org/r/noarch/\n https://conda.anaconda.org/bioconda/linux-64/\n https://conda.anaconda.org/bioconda/noarch/\n config file : /ifs/home/sebastian/.condarc\n offline mode : False\n```\n\nMany thanks,\nSebastian\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom conda_build.config import Config\nfrom conda_build.metadata import MetaData\n\n\ndef create_metapackage(name, version, entry_points=(), build_string=None, build_number=0,\n dependencies=(), home=None, license_name=None, summary=None, config=None):\n # local import to avoid circular import, we provid create_metapackage in api\n from conda_build.build import build\n\n if not config:\n config = Config()\n\n d = defaultdict(dict)\n d['package']['name'] = name\n d['package']['version'] = version\n d['build']['number'] = build_number\n d['build']['entry_points'] = entry_points\n # MetaData does the auto stuff if the build string is None\n d['build']['string'] = build_string\n d['requirements']['run'] = dependencies\n d['about']['home'] = home\n d['about']['license'] = license_name\n d['about']['summary'] = summary\n d = dict(d)\n m = MetaData.fromdict(d, config=config)\n config.compute_build_id(m.name())\n\n return build(m, config=config, need_source_download=False)\n", "path": "conda_build/metapackage.py"}]}
1,741
138
gh_patches_debug_17275
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5892
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Supprimer les messages privés de l'interface d'administration de Django À l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour ça (aucune notion de fil, etc.), mais je trouve tout de même bien peu souhaitable. ![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png) Après discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les accès prod pourrait toujours les lire (vu qu'ils ne sont pas chiffrés de bout en bout), mais ça limiterait d'autant l'exposition. Supprimer les messages privés de l'interface d'administration de Django À l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour ça (aucune notion de fil, etc.), mais je trouve tout de même bien peu souhaitable. ![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png) Après discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les accès prod pourrait toujours les lire (vu qu'ils ne sont pas chiffrés de bout en bout), mais ça limiterait d'autant l'exposition. </issue> <code> [start of zds/mp/admin.py] 1 from django.contrib import admin 2 3 from .models import PrivatePost, PrivateTopic, PrivateTopicRead 4 5 6 class PrivatePostAdmin(admin.ModelAdmin): 7 8 """Representation of PrivatePost model in the admin interface.""" 9 10 list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic') 11 raw_id_fields = ('privatetopic', 'author') 12 13 14 class PrivateTopicAdmin(admin.ModelAdmin): 15 16 """Representation of PrivateTopic model in the admin interface.""" 17 18 list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate') 19 raw_id_fields = ('author', 'participants', 'last_message') 20 21 22 class PrivateTopicReadAdmin(admin.ModelAdmin): 23 24 """Representation of PrivateTopicRead model in the admin interface.""" 25 26 list_display = ('privatetopic', 'privatepost', 'user') 27 raw_id_fields = ('privatetopic', 'privatepost', 'user') 28 29 30 admin.site.register(PrivatePost, PrivatePostAdmin) 31 admin.site.register(PrivateTopic, PrivateTopicAdmin) 32 admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin) 33 [end of zds/mp/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/mp/admin.py b/zds/mp/admin.py deleted file mode 100644 --- a/zds/mp/admin.py +++ /dev/null @@ -1,32 +0,0 @@ -from django.contrib import admin - -from .models import PrivatePost, PrivateTopic, PrivateTopicRead - - -class PrivatePostAdmin(admin.ModelAdmin): - - """Representation of PrivatePost model in the admin interface.""" - - list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic') - raw_id_fields = ('privatetopic', 'author') - - -class PrivateTopicAdmin(admin.ModelAdmin): - - """Representation of PrivateTopic model in the admin interface.""" - - list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate') - raw_id_fields = ('author', 'participants', 'last_message') - - -class PrivateTopicReadAdmin(admin.ModelAdmin): - - """Representation of PrivateTopicRead model in the admin interface.""" - - list_display = ('privatetopic', 'privatepost', 'user') - raw_id_fields = ('privatetopic', 'privatepost', 'user') - - -admin.site.register(PrivatePost, PrivatePostAdmin) -admin.site.register(PrivateTopic, PrivateTopicAdmin) -admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)
{"golden_diff": "diff --git a/zds/mp/admin.py b/zds/mp/admin.py\ndeleted file mode 100644\n--- a/zds/mp/admin.py\n+++ /dev/null\n@@ -1,32 +0,0 @@\n-from django.contrib import admin\n-\n-from .models import PrivatePost, PrivateTopic, PrivateTopicRead\n-\n-\n-class PrivatePostAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivatePost model in the admin interface.\"\"\"\n-\n- list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')\n- raw_id_fields = ('privatetopic', 'author')\n-\n-\n-class PrivateTopicAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivateTopic model in the admin interface.\"\"\"\n-\n- list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')\n- raw_id_fields = ('author', 'participants', 'last_message')\n-\n-\n-class PrivateTopicReadAdmin(admin.ModelAdmin):\n-\n- \"\"\"Representation of PrivateTopicRead model in the admin interface.\"\"\"\n-\n- list_display = ('privatetopic', 'privatepost', 'user')\n- raw_id_fields = ('privatetopic', 'privatepost', 'user')\n-\n-\n-admin.site.register(PrivatePost, PrivatePostAdmin)\n-admin.site.register(PrivateTopic, PrivateTopicAdmin)\n-admin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)\n", "issue": "Supprimer les messages priv\u00e9s de l'interface d'administration de Django\n\u00c0 l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour \u00e7a (aucune notion de fil, etc.), mais je trouve tout de m\u00eame bien peu souhaitable.\r\n\r\n![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png)\r\n\r\nApr\u00e8s discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les acc\u00e8s prod pourrait toujours les lire (vu qu'ils ne sont pas chiffr\u00e9s de bout en bout), mais \u00e7a limiterait d'autant l'exposition.\nSupprimer les messages priv\u00e9s de l'interface d'administration de Django\n\u00c0 l'heure actuelle, un super-admin peut, via l'interface de Django, lire tous les MPs du site. Certes, l'interface est peu pratique pour \u00e7a (aucune notion de fil, etc.), mais je trouve tout de m\u00eame bien peu souhaitable.\r\n\r\n![Regardez-moi tous ces MPs dans l'interface d'administration de Django.](https://user-images.githubusercontent.com/1417570/88059844-b199bf00-cb65-11ea-8dc9-fc78310001c7.png)\r\n\r\nApr\u00e8s discussion avec @gcodeur sur ce sujet, je propose donc de **supprimer les MPs de l'interface d'administration de Django**. Une personne avec les acc\u00e8s prod pourrait toujours les lire (vu qu'ils ne sont pas chiffr\u00e9s de bout en bout), mais \u00e7a limiterait d'autant l'exposition.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import PrivatePost, PrivateTopic, PrivateTopicRead\n\n\nclass PrivatePostAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivatePost model in the admin interface.\"\"\"\n\n list_display = ('privatetopic', 'author', 'pubdate', 'update', 'position_in_topic')\n raw_id_fields = ('privatetopic', 'author')\n\n\nclass PrivateTopicAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivateTopic model in the admin interface.\"\"\"\n\n list_display = ('title', 'subtitle', 'author', 'last_message', 'pubdate')\n raw_id_fields = ('author', 'participants', 'last_message')\n\n\nclass PrivateTopicReadAdmin(admin.ModelAdmin):\n\n \"\"\"Representation of PrivateTopicRead model in the admin interface.\"\"\"\n\n list_display = ('privatetopic', 'privatepost', 'user')\n raw_id_fields = ('privatetopic', 'privatepost', 'user')\n\n\nadmin.site.register(PrivatePost, PrivatePostAdmin)\nadmin.site.register(PrivateTopic, PrivateTopicAdmin)\nadmin.site.register(PrivateTopicRead, PrivateTopicReadAdmin)\n", "path": "zds/mp/admin.py"}]}
1,274
300
gh_patches_debug_15406
rasdani/github-patches
git_diff
vega__altair-3303
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Verify versions of both VegaFusion packages See https://github.com/altair-viz/altair/pull/3281#issuecomment-1867599879 We should check the version of `vegafusion-python-embed` as well as the version of `vegafusion` since it's possible for these to get out of sync. </issue> <code> [start of altair/utils/_importers.py] 1 from types import ModuleType 2 from packaging.version import Version 3 from importlib.metadata import version as importlib_version 4 5 6 def import_vegafusion() -> ModuleType: 7 min_version = "1.5.0" 8 try: 9 version = importlib_version("vegafusion") 10 if Version(version) < Version(min_version): 11 raise RuntimeError( 12 f"The vegafusion package must be version {min_version} or greater. " 13 f"Found version {version}" 14 ) 15 import vegafusion as vf # type: ignore 16 17 return vf 18 except ImportError as err: 19 raise ImportError( 20 'The "vegafusion" data transformer and chart.transformed_data feature requires\n' 21 f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n" 22 "These can be installed with pip using:\n" 23 f' pip install "vegafusion[embed]>={min_version}"\n' 24 "Or with conda using:\n" 25 f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" ' 26 f'"vegafusion>={min_version}"\n\n' 27 f"ImportError: {err.args[0]}" 28 ) from err 29 30 31 def import_vl_convert() -> ModuleType: 32 min_version = "1.1.0" 33 try: 34 version = importlib_version("vl-convert-python") 35 if Version(version) < Version(min_version): 36 raise RuntimeError( 37 f"The vl-convert-python package must be version {min_version} or greater. " 38 f"Found version {version}" 39 ) 40 import vl_convert as vlc 41 42 return vlc 43 except ImportError as err: 44 raise ImportError( 45 f"The vl-convert Vega-Lite compiler and file export feature requires\n" 46 f"version {min_version} or greater of the 'vl-convert-python' package. \n" 47 f"This can be installed with pip using:\n" 48 f' pip install "vl-convert-python>={min_version}"\n' 49 "or conda:\n" 50 f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n' 51 f"ImportError: {err.args[0]}" 52 ) from err 53 54 55 def vl_version_for_vl_convert() -> str: 56 from ..vegalite import SCHEMA_VERSION 57 58 # Compute VlConvert's vl_version string (of the form 'v5_2') 59 # from SCHEMA_VERSION (of the form 'v5.2.0') 60 return "_".join(SCHEMA_VERSION.split(".")[:2]) 61 62 63 def import_pyarrow_interchange() -> ModuleType: 64 min_version = "11.0.0" 65 try: 66 version = importlib_version("pyarrow") 67 68 if Version(version) < Version(min_version): 69 raise RuntimeError( 70 f"The pyarrow package must be version {min_version} or greater. " 71 f"Found version {version}" 72 ) 73 import pyarrow.interchange as pi 74 75 return pi 76 except ImportError as err: 77 raise ImportError( 78 f"Usage of the DataFrame Interchange Protocol requires\n" 79 f"version {min_version} or greater of the pyarrow package. \n" 80 f"This can be installed with pip using:\n" 81 f' pip install "pyarrow>={min_version}"\n' 82 "or conda:\n" 83 f' conda install -c conda-forge "pyarrow>={min_version}"\n\n' 84 f"ImportError: {err.args[0]}" 85 ) from err 86 87 88 def pyarrow_available() -> bool: 89 try: 90 import_pyarrow_interchange() 91 return True 92 except ImportError: 93 return False 94 [end of altair/utils/_importers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py --- a/altair/utils/_importers.py +++ b/altair/utils/_importers.py @@ -7,10 +7,14 @@ min_version = "1.5.0" try: version = importlib_version("vegafusion") - if Version(version) < Version(min_version): + embed_version = importlib_version("vegafusion-python-embed") + if version != embed_version or Version(version) < Version(min_version): raise RuntimeError( - f"The vegafusion package must be version {min_version} or greater. " - f"Found version {version}" + "The versions of the vegafusion and vegafusion-python-embed packages must match\n" + f"and must be version {min_version} or greater.\n" + f"Found:\n" + f" - vegafusion=={version}\n" + f" - vegafusion-python-embed=={embed_version}\n" ) import vegafusion as vf # type: ignore
{"golden_diff": "diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py\n--- a/altair/utils/_importers.py\n+++ b/altair/utils/_importers.py\n@@ -7,10 +7,14 @@\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n- if Version(version) < Version(min_version):\n+ embed_version = importlib_version(\"vegafusion-python-embed\")\n+ if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n- f\"The vegafusion package must be version {min_version} or greater. \"\n- f\"Found version {version}\"\n+ \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n+ f\"and must be version {min_version} or greater.\\n\"\n+ f\"Found:\\n\"\n+ f\" - vegafusion=={version}\\n\"\n+ f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n", "issue": "Verify versions of both VegaFusion packages\nSee https://github.com/altair-viz/altair/pull/3281#issuecomment-1867599879\r\n\r\nWe should check the version of `vegafusion-python-embed` as well as the version of `vegafusion` since it's possible for these to get out of sync.\r\n\r\n\n", "before_files": [{"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vegafusion package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.1.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n", "path": "altair/utils/_importers.py"}]}
1,642
253
gh_patches_debug_4396
rasdani/github-patches
git_diff
oppia__oppia-1465
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> In the rich-text editor, auto-prepend "https://" to links which don't specify a protocol ``` Currently the non-interactive link widget will only accept links that begin with either "http://" or "https://". I propose that whenever a link does not, e.g. "www.google.com" we automatically prepend "http://www.google.com" to the link string that is stored. ``` Original issue reported on code.google.com by `[email protected]` on 24 Aug 2014 at 9:43 </issue> <code> [start of extensions/rich_text_components/Link/Link.py] 1 # coding: utf-8 2 # 3 # Copyright 2014 The Oppia Authors. All Rights Reserved. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, softwar 12 # distributed under the License is distributed on an "AS-IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from extensions.rich_text_components import base 18 19 20 class Link(base.BaseRichTextComponent): 21 """A rich-text component for displaying links.""" 22 23 name = 'Link' 24 category = 'Basic Input' 25 description = 'A link to a URL.' 26 frontend_name = 'link' 27 tooltip = 'Insert link' 28 29 _customization_arg_specs = [{ 30 'name': 'url', 31 'description': ( 32 'The link URL. It must start with http:// or https://'), 33 'schema': { 34 'type': 'custom', 35 'obj_type': 'SanitizedUrl', 36 }, 37 'default_value': 'https://www.example.com', 38 }, { 39 'name': 'text', 40 'description': ( 41 'The link text. If left blank, the link URL will be used.'), 42 'schema': { 43 'type': 'unicode', 44 }, 45 'default_value': '', 46 }, { 47 'name': 'open_link_in_same_window', 48 'description': 'Open the link in the same window?', 49 'schema': { 50 'type': 'bool' 51 }, 52 'default_value': False, 53 }] 54 [end of extensions/rich_text_components/Link/Link.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/extensions/rich_text_components/Link/Link.py b/extensions/rich_text_components/Link/Link.py --- a/extensions/rich_text_components/Link/Link.py +++ b/extensions/rich_text_components/Link/Link.py @@ -29,7 +29,7 @@ _customization_arg_specs = [{ 'name': 'url', 'description': ( - 'The link URL. It must start with http:// or https://'), + 'The link URL. If no protocol is specified, HTTPS will be used.'), 'schema': { 'type': 'custom', 'obj_type': 'SanitizedUrl',
{"golden_diff": "diff --git a/extensions/rich_text_components/Link/Link.py b/extensions/rich_text_components/Link/Link.py\n--- a/extensions/rich_text_components/Link/Link.py\n+++ b/extensions/rich_text_components/Link/Link.py\n@@ -29,7 +29,7 @@\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n- 'The link URL. It must start with http:// or https://'),\n+ 'The link URL. If no protocol is specified, HTTPS will be used.'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n", "issue": "In the rich-text editor, auto-prepend \"https://\" to links which don't specify a protocol\n```\nCurrently the non-interactive link widget will only accept links that begin \nwith either \"http://\" or \"https://\". I propose that whenever a link does not, \ne.g. \"www.google.com\" we automatically prepend \"http://www.google.com\" to the \nlink string that is stored.\n```\n\nOriginal issue reported on code.google.com by `[email protected]` on 24 Aug 2014 at 9:43\n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom extensions.rich_text_components import base\n\n\nclass Link(base.BaseRichTextComponent):\n \"\"\"A rich-text component for displaying links.\"\"\"\n\n name = 'Link'\n category = 'Basic Input'\n description = 'A link to a URL.'\n frontend_name = 'link'\n tooltip = 'Insert link'\n\n _customization_arg_specs = [{\n 'name': 'url',\n 'description': (\n 'The link URL. It must start with http:// or https://'),\n 'schema': {\n 'type': 'custom',\n 'obj_type': 'SanitizedUrl',\n },\n 'default_value': 'https://www.example.com',\n }, {\n 'name': 'text',\n 'description': (\n 'The link text. If left blank, the link URL will be used.'),\n 'schema': {\n 'type': 'unicode',\n },\n 'default_value': '',\n }, {\n 'name': 'open_link_in_same_window',\n 'description': 'Open the link in the same window?',\n 'schema': {\n 'type': 'bool'\n },\n 'default_value': False,\n }]\n", "path": "extensions/rich_text_components/Link/Link.py"}]}
1,153
142
gh_patches_debug_26467
rasdani/github-patches
git_diff
liqd__a4-meinberlin-891
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Import of Bezirksregionen stopped working `$ manage.py import_geodata --gdal-legacy` Leads to a `KeyError`, probably the data format has changed. </issue> <code> [start of meinberlin/apps/maps/management/commands/import_geodata.py] 1 import json 2 import os 3 import subprocess 4 import sys 5 6 from django.core.management.base import BaseCommand 7 8 from meinberlin.apps.maps import models as map_models 9 10 11 class Command(BaseCommand): 12 help = 'Create map presets for berlin GEO-Data' 13 14 def add_arguments(self, parser): 15 parser.add_argument( 16 '--gdal-legacy', 17 action='store_true', 18 dest='gdal_legacy', 19 default=False, 20 help='GDAL version <= 1.10', 21 ) 22 23 def handle(self, *args, **options): 24 self.is_gdal_legacy = options['gdal_legacy'] 25 self._import_districts() 26 self._import_regions() 27 28 def _import_districts(self): 29 category = self._preset_category('Berlin') 30 tmpfile = '/tmp/bezirke.json' 31 url = 'http://fbinter.stadt-berlin.de/fb/' \ 32 'wfs/geometry/senstadt/re_bezirke/' 33 self._download_geodata(tmpfile, url, 'fis:re_bezirke') 34 data = json.load(open(tmpfile, 'r')) 35 for feature in data['features']: 36 district = feature['properties']['spatial_alias'] 37 if not map_models.MapPreset.objects.filter(name=district).exists(): 38 self._create_map_preset(district, feature, category) 39 os.remove(tmpfile) 40 41 def _import_regions(self): 42 url = 'http://fbinter.stadt-berlin.de/fb/' \ 43 'wfs/geometry/senstadt/re_bezirksregion' 44 tmpfile = '/tmp/bezirksregions.json' 45 self._download_geodata(tmpfile, url, 46 'fis:re_bezirksregion') 47 data = json.load(open(tmpfile, 'r')) 48 for feature in data['features']: 49 district = feature['properties']['BEZIRK'] 50 region = feature['properties']['BZR_NAME'] 51 category = self._preset_category(district) 52 if not map_models.MapPreset.objects.filter(name=region).exists(): 53 self._create_map_preset(region, feature, category) 54 os.remove(tmpfile) 55 56 def _preset_category(self, name): 57 category, _ = \ 58 map_models.MapPresetCategory.objects.get_or_create(name=name) 59 return category 60 61 def _create_map_preset(self, name, feature, category): 62 polygon = { 63 'type': 'FeatureCollection', 64 'features': [feature] 65 } 66 map_preset = map_models.MapPreset( 67 name=name, 68 polygon=polygon, 69 category=category 70 ) 71 map_preset.save() 72 73 def _download_geodata(self, filename: str, url: str, layer: str): 74 try: 75 os.remove(filename) 76 except: 77 pass 78 79 src = 'WFS:{}{}'.format( 80 url, 81 '?TYPENAMES=GML2' if self.is_gdal_legacy else '' 82 ) 83 try: 84 print('Trying to download file from {}'.format(url)) 85 subprocess.check_call([ 86 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84', 87 '-f', 'geoJSON', filename, src, layer 88 ]) 89 except FileNotFoundError as e: 90 print('Make sure ogr2ogr is installed and in user PATH.') 91 sys.exit(e) 92 [end of meinberlin/apps/maps/management/commands/import_geodata.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/maps/management/commands/import_geodata.py b/meinberlin/apps/maps/management/commands/import_geodata.py --- a/meinberlin/apps/maps/management/commands/import_geodata.py +++ b/meinberlin/apps/maps/management/commands/import_geodata.py @@ -40,13 +40,13 @@ def _import_regions(self): url = 'http://fbinter.stadt-berlin.de/fb/' \ - 'wfs/geometry/senstadt/re_bezirksregion' + 'wfs/geometry/senstadt/re_bezirksregion/' tmpfile = '/tmp/bezirksregions.json' self._download_geodata(tmpfile, url, 'fis:re_bezirksregion') data = json.load(open(tmpfile, 'r')) for feature in data['features']: - district = feature['properties']['BEZIRK'] + district = feature['properties']['BEZNAME'] region = feature['properties']['BZR_NAME'] category = self._preset_category(district) if not map_models.MapPreset.objects.filter(name=region).exists(): @@ -78,7 +78,7 @@ src = 'WFS:{}{}'.format( url, - '?TYPENAMES=GML2' if self.is_gdal_legacy else '' + '?VERSION=1.1.0' if self.is_gdal_legacy else '' ) try: print('Trying to download file from {}'.format(url))
{"golden_diff": "diff --git a/meinberlin/apps/maps/management/commands/import_geodata.py b/meinberlin/apps/maps/management/commands/import_geodata.py\n--- a/meinberlin/apps/maps/management/commands/import_geodata.py\n+++ b/meinberlin/apps/maps/management/commands/import_geodata.py\n@@ -40,13 +40,13 @@\n \n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n- 'wfs/geometry/senstadt/re_bezirksregion'\n+ 'wfs/geometry/senstadt/re_bezirksregion/'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n- district = feature['properties']['BEZIRK']\n+ district = feature['properties']['BEZNAME']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n@@ -78,7 +78,7 @@\n \n src = 'WFS:{}{}'.format(\n url,\n- '?TYPENAMES=GML2' if self.is_gdal_legacy else ''\n+ '?VERSION=1.1.0' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n", "issue": "Import of Bezirksregionen stopped working\n`$ manage.py import_geodata --gdal-legacy`\r\n\r\nLeads to a `KeyError`, probably the data format has changed.\r\n\n", "before_files": [{"content": "import json\nimport os\nimport subprocess\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom meinberlin.apps.maps import models as map_models\n\n\nclass Command(BaseCommand):\n help = 'Create map presets for berlin GEO-Data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--gdal-legacy',\n action='store_true',\n dest='gdal_legacy',\n default=False,\n help='GDAL version <= 1.10',\n )\n\n def handle(self, *args, **options):\n self.is_gdal_legacy = options['gdal_legacy']\n self._import_districts()\n self._import_regions()\n\n def _import_districts(self):\n category = self._preset_category('Berlin')\n tmpfile = '/tmp/bezirke.json'\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirke/'\n self._download_geodata(tmpfile, url, 'fis:re_bezirke')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['spatial_alias']\n if not map_models.MapPreset.objects.filter(name=district).exists():\n self._create_map_preset(district, feature, category)\n os.remove(tmpfile)\n\n def _import_regions(self):\n url = 'http://fbinter.stadt-berlin.de/fb/' \\\n 'wfs/geometry/senstadt/re_bezirksregion'\n tmpfile = '/tmp/bezirksregions.json'\n self._download_geodata(tmpfile, url,\n 'fis:re_bezirksregion')\n data = json.load(open(tmpfile, 'r'))\n for feature in data['features']:\n district = feature['properties']['BEZIRK']\n region = feature['properties']['BZR_NAME']\n category = self._preset_category(district)\n if not map_models.MapPreset.objects.filter(name=region).exists():\n self._create_map_preset(region, feature, category)\n os.remove(tmpfile)\n\n def _preset_category(self, name):\n category, _ = \\\n map_models.MapPresetCategory.objects.get_or_create(name=name)\n return category\n\n def _create_map_preset(self, name, feature, category):\n polygon = {\n 'type': 'FeatureCollection',\n 'features': [feature]\n }\n map_preset = map_models.MapPreset(\n name=name,\n polygon=polygon,\n category=category\n )\n map_preset.save()\n\n def _download_geodata(self, filename: str, url: str, layer: str):\n try:\n os.remove(filename)\n except:\n pass\n\n src = 'WFS:{}{}'.format(\n url,\n '?TYPENAMES=GML2' if self.is_gdal_legacy else ''\n )\n try:\n print('Trying to download file from {}'.format(url))\n subprocess.check_call([\n 'ogr2ogr', '-s_srs', 'EPSG:25833', '-t_srs', 'WGS84',\n '-f', 'geoJSON', filename, src, layer\n ])\n except FileNotFoundError as e:\n print('Make sure ogr2ogr is installed and in user PATH.')\n sys.exit(e)\n", "path": "meinberlin/apps/maps/management/commands/import_geodata.py"}]}
1,499
342
gh_patches_debug_4242
rasdani/github-patches
git_diff
kivy__python-for-android-1995
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TestGetSystemPythonExecutable.test_virtualenv test fail The `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden. Error was: ``` ModuleNotFoundError: No module named \'pytoml\'\n' ``` This ca be reproduced in local via: ```sh pytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv ``` </issue> <code> [start of setup.py] 1 2 import glob 3 from io import open # for open(..,encoding=...) parameter in python 2 4 from os import walk 5 from os.path import join, dirname, sep 6 import os 7 import re 8 from setuptools import setup, find_packages 9 10 # NOTE: All package data should also be set in MANIFEST.in 11 12 packages = find_packages() 13 14 package_data = {'': ['*.tmpl', 15 '*.patch', ], } 16 17 data_files = [] 18 19 20 21 # must be a single statement since buildozer is currently parsing it, refs: 22 # https://github.com/kivy/buildozer/issues/722 23 install_reqs = [ 24 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six', 25 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"', 26 'pep517', 'pytoml', 'virtualenv' 27 ] 28 # (pep517, pytoml and virtualenv are used by pythonpackage.py) 29 30 # By specifying every file manually, package_data will be able to 31 # include them in binary distributions. Note that we have to add 32 # everything as a 'pythonforandroid' rule, using '' apparently doesn't 33 # work. 34 def recursively_include(results, directory, patterns): 35 for root, subfolders, files in walk(directory): 36 for fn in files: 37 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]): 38 continue 39 filename = join(root, fn) 40 directory = 'pythonforandroid' 41 if directory not in results: 42 results[directory] = [] 43 results[directory].append(join(*filename.split(sep)[1:])) 44 45 recursively_include(package_data, 'pythonforandroid/recipes', 46 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h', 47 '*.mk', '*.jam', ]) 48 recursively_include(package_data, 'pythonforandroid/bootstraps', 49 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png', 50 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl', 51 '*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ]) 52 recursively_include(package_data, 'pythonforandroid/bootstraps', 53 ['sdl-config', ]) 54 recursively_include(package_data, 'pythonforandroid/bootstraps/webview', 55 ['*.html', ]) 56 recursively_include(package_data, 'pythonforandroid', 57 ['liblink', 'biglink', 'liblink.sh']) 58 59 with open(join(dirname(__file__), 'README.md'), 60 encoding="utf-8", 61 errors="replace", 62 ) as fileh: 63 long_description = fileh.read() 64 65 init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py') 66 version = None 67 try: 68 with open(init_filen, 69 encoding="utf-8", 70 errors="replace" 71 ) as fileh: 72 lines = fileh.readlines() 73 except IOError: 74 pass 75 else: 76 for line in lines: 77 line = line.strip() 78 if line.startswith('__version__ = '): 79 matches = re.findall(r'["\'].+["\']', line) 80 if matches: 81 version = matches[0].strip("'").strip('"') 82 break 83 if version is None: 84 raise Exception('Error: version could not be loaded from {}'.format(init_filen)) 85 86 setup(name='python-for-android', 87 version=version, 88 description='Android APK packager for Python scripts and apps', 89 long_description=long_description, 90 long_description_content_type='text/markdown', 91 author='The Kivy team', 92 author_email='[email protected]', 93 url='https://github.com/kivy/python-for-android', 94 license='MIT', 95 install_requires=install_reqs, 96 entry_points={ 97 'console_scripts': [ 98 'python-for-android = pythonforandroid.entrypoints:main', 99 'p4a = pythonforandroid.entrypoints:main', 100 ], 101 'distutils.commands': [ 102 'apk = pythonforandroid.bdistapk:BdistAPK', 103 ], 104 }, 105 classifiers = [ 106 'Development Status :: 5 - Production/Stable', 107 'Intended Audience :: Developers', 108 'License :: OSI Approved :: MIT License', 109 'Operating System :: Microsoft :: Windows', 110 'Operating System :: OS Independent', 111 'Operating System :: POSIX :: Linux', 112 'Operating System :: MacOS :: MacOS X', 113 'Operating System :: Android', 114 'Programming Language :: C', 115 'Programming Language :: Python :: 3', 116 'Topic :: Software Development', 117 'Topic :: Utilities', 118 ], 119 packages=packages, 120 package_data=package_data, 121 ) 122 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ install_reqs = [ 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six', 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"', - 'pep517', 'pytoml', 'virtualenv' + 'pep517<0.7.0"', 'pytoml', 'virtualenv' ] # (pep517, pytoml and virtualenv are used by pythonpackage.py)
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n install_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n- 'pep517', 'pytoml', 'virtualenv'\n+ 'pep517<0.7.0\"', 'pytoml', 'virtualenv'\n ]\n # (pep517, pytoml and virtualenv are used by pythonpackage.py)\n", "issue": "TestGetSystemPythonExecutable.test_virtualenv test fail\nThe `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden.\r\nError was:\r\n```\r\nModuleNotFoundError: No module named \\'pytoml\\'\\n'\r\n```\r\nThis ca be reproduced in local via:\r\n```sh\r\npytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv\r\n```\r\n\r\n\n", "before_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517', 'pytoml', 'virtualenv'\n]\n# (pep517, pytoml and virtualenv are used by pythonpackage.py)\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]}
1,899
151
gh_patches_debug_36038
rasdani/github-patches
git_diff
scverse__scanpy-260
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes` I just tried ```python import scanpy.api as sc sc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism') ``` I would expect scanpy complains that it does not know `'strange_organism'`, but I get the error ```python --------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-13-6a41b361ab41> in <module>() 1 import scanpy.api as sc ----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio') ~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org) 34 s.add_attribute_to_xml('mgi_symbol') 35 else: ---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) 37 return None 38 s.add_attribute_to_xml('chromosome_name') NameError: name 'logg' is not defined ``` It seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement. Would maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4? </issue> <code> [start of scanpy/queries/__init__.py] 1 import pandas as pd 2 3 4 def mitochondrial_genes(host, org): 5 """Mitochondrial gene symbols for specific organism through BioMart. 6 7 Parameters 8 ---------- 9 host : {{'www.ensembl.org', ...}} 10 A valid BioMart host URL. 11 org : {{'hsapiens', 'mmusculus'}} 12 Organism to query. Currently available are human ('hsapiens') and mouse 13 ('mmusculus'). 14 15 Returns 16 ------- 17 A `pd.Index` containing mitochondrial gene symbols. 18 """ 19 try: 20 from bioservices import biomart 21 except ImportError: 22 raise ImportError( 23 'You need to install the `bioservices` module.') 24 from io import StringIO 25 s = biomart.BioMart(host=host) 26 27 # building query 28 s.new_query() 29 if org == 'hsapiens': 30 s.add_dataset_to_xml('hsapiens_gene_ensembl') 31 s.add_attribute_to_xml('hgnc_symbol') 32 elif org == 'mmusculus': 33 s.add_dataset_to_xml('mmusculus_gene_ensembl') 34 s.add_attribute_to_xml('mgi_symbol') 35 else: 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) 37 return None 38 s.add_attribute_to_xml('chromosome_name') 39 xml = s.get_xml() 40 41 # parsing mitochondrial gene symbols 42 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) 43 res.columns = ['symbol', 'chromosome_name'] 44 res = res.dropna() 45 res = res[res['chromosome_name'] == 'MT'] 46 res = res.set_index('symbol') 47 res = res[~res.index.duplicated(keep='first')] 48 49 return res.index 50 51 52 def gene_coordinates(host, org, gene, chr_exclude=[]): 53 """Retrieve gene coordinates for specific organism through BioMart. 54 Parameters 55 ---------- 56 host : {{'www.ensembl.org', ...}} 57 A valid BioMart host URL. Can be used to control genome build. 58 org : {{'hsapiens', 'mmusculus'}} 59 Organism to query. Currently available are human ('hsapiens') and mouse 60 ('mmusculus'). 61 gene : 62 The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve 63 coordinates. 64 chr_exclude : 65 A list of chromosomes to exclude from query. 66 Returns 67 ------- 68 A `pd.DataFrame` containing gene coordinates for the specified gene symbol. 69 """ 70 try: 71 from bioservices import biomart 72 except ImportError: 73 raise ImportError( 74 'You need to install the `bioservices` module.') 75 from io import StringIO 76 s = biomart.BioMart(host=host) 77 78 # building query 79 s.new_query() 80 if org == 'hsapiens': 81 s.add_dataset_to_xml('hsapiens_gene_ensembl') 82 s.add_attribute_to_xml('hgnc_symbol') 83 elif org == 'mmusculus': 84 s.add_dataset_to_xml('mmusculus_gene_ensembl') 85 s.add_attribute_to_xml('mgi_symbol') 86 else: 87 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) 88 return None 89 s.add_attribute_to_xml('chromosome_name') 90 s.add_attribute_to_xml('start_position') 91 s.add_attribute_to_xml('end_position') 92 xml = s.get_xml() 93 94 # parsing gene coordinates 95 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) 96 res.columns = ['symbol', 'chromosome_name', 'start', 'end'] 97 res = res.dropna() 98 res = res[~res['chromosome_name'].isin(chr_exclude)] 99 res = res.set_index('symbol') 100 101 return res.loc[[gene], :] 102 [end of scanpy/queries/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py --- a/scanpy/queries/__init__.py +++ b/scanpy/queries/__init__.py @@ -1,4 +1,5 @@ import pandas as pd +from .. import logging as logg def mitochondrial_genes(host, org): @@ -8,9 +9,9 @@ ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. - org : {{'hsapiens', 'mmusculus'}} - Organism to query. Currently available are human ('hsapiens') and mouse - ('mmusculus'). + org : {{'hsapiens', 'mmusculus', 'drerio'}} + Organism to query. Currently available are human ('hsapiens'), mouse + ('mmusculus') and zebrafish ('drerio'). Returns ------- @@ -32,6 +33,9 @@ elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') + elif org == 'drerio': + s.add_dataset_to_xml('drerio_gene_ensembl') + s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None @@ -55,9 +59,9 @@ ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. - org : {{'hsapiens', 'mmusculus'}} - Organism to query. Currently available are human ('hsapiens') and mouse - ('mmusculus'). + org : {{'hsapiens', 'mmusculus', 'drerio'}} + Organism to query. Currently available are human ('hsapiens'), mouse + ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. @@ -83,6 +87,9 @@ elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') + elif org == 'drerio': + s.add_dataset_to_xml('drerio_gene_ensembl') + s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None
{"golden_diff": "diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py\n--- a/scanpy/queries/__init__.py\n+++ b/scanpy/queries/__init__.py\n@@ -1,4 +1,5 @@\n import pandas as pd\n+from .. import logging as logg\n \n \n def mitochondrial_genes(host, org):\n@@ -8,9 +9,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n \n Returns\n -------\n@@ -32,6 +33,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n@@ -55,9 +59,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n@@ -83,6 +87,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n", "issue": "`NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes`\nI just tried\r\n```python\r\nimport scanpy.api as sc\r\nsc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism')\r\n```\r\nI would expect scanpy complains that it does not know `'strange_organism'`, but I get the error \r\n```python\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-13-6a41b361ab41> in <module>()\r\n 1 import scanpy.api as sc\r\n----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio')\r\n\r\n~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org)\r\n 34 s.add_attribute_to_xml('mgi_symbol')\r\n 35 else:\r\n---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\r\n 37 return None\r\n 38 s.add_attribute_to_xml('chromosome_name')\r\n\r\nNameError: name 'logg' is not defined\r\n```\r\nIt seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement.\r\n\r\nWould maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4?\n", "before_files": [{"content": "import pandas as pd\n\n\ndef mitochondrial_genes(host, org):\n \"\"\"Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n\n Returns\n -------\n A `pd.Index` containing mitochondrial gene symbols.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n xml = s.get_xml()\n\n # parsing mitochondrial gene symbols\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name']\n res = res.dropna()\n res = res[res['chromosome_name'] == 'MT']\n res = res.set_index('symbol')\n res = res[~res.index.duplicated(keep='first')]\n\n return res.index\n\n\ndef gene_coordinates(host, org, gene, chr_exclude=[]):\n \"\"\"Retrieve gene coordinates for specific organism through BioMart.\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n chr_exclude :\n A list of chromosomes to exclude from query.\n Returns\n -------\n A `pd.DataFrame` containing gene coordinates for the specified gene symbol.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n s.add_attribute_to_xml('start_position')\n s.add_attribute_to_xml('end_position')\n xml = s.get_xml()\n\n # parsing gene coordinates\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name', 'start', 'end']\n res = res.dropna()\n res = res[~res['chromosome_name'].isin(chr_exclude)]\n res = res.set_index('symbol')\n\n return res.loc[[gene], :]\n", "path": "scanpy/queries/__init__.py"}]}
1,884
614
gh_patches_debug_28469
rasdani/github-patches
git_diff
fossasia__open-event-server-2390
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show image and square crop option (like in wizard) for speakers and ensure it shows up after import ![screenshot from 2016-08-24 11 30 08](https://cloud.githubusercontent.com/assets/1583873/17925701/48dd03be-69ee-11e6-84e4-c353001ddde1.png) As the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way. Compare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/ </issue> <code> [start of app/views/admin/models_views/speakers.py] 1 import json 2 3 from flask.ext.admin import BaseView 4 from flask.ext.restplus import abort 5 from flask_admin import expose 6 from flask.ext import login 7 from flask import request, url_for, redirect, flash 8 from ....helpers.data import delete_from_db, save_to_db 9 from ....helpers.data_getter import DataGetter 10 from ....helpers.storage import upload, UPLOAD_PATHS 11 12 13 def get_speaker_or_throw(speaker_id): 14 session = DataGetter.get_speaker(speaker_id) 15 if not session: 16 abort(404) 17 return session 18 19 20 class SpeakersView(BaseView): 21 22 def is_accessible(self): 23 return login.current_user.is_authenticated 24 25 def _handle_view(self, name, **kwargs): 26 if not self.is_accessible(): 27 return redirect(url_for('admin.login_view', next=request.url)) 28 event = DataGetter.get_event(kwargs['event_id']) 29 if not event.has_session_speakers: 30 return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event) 31 32 @expose('/') 33 def index_view(self, event_id): 34 speakers = DataGetter.get_speakers(event_id) 35 event = DataGetter.get_event(event_id) 36 return self.render('/gentelella/admin/event/speakers/base_speaker_table.html', 37 speakers=speakers, event_id=event_id, event=event) 38 39 @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST')) 40 def edit_view(self, event_id, speaker_id): 41 speaker = get_speaker_or_throw(speaker_id) 42 event = DataGetter.get_event(event_id) 43 form_elems = DataGetter.get_custom_form_elements(event_id) 44 if not form_elems: 45 flash("Speaker form has been incorrectly configured for this event. Editing has been disabled", "danger") 46 return redirect(url_for('.index_view', event_id=event_id)) 47 speaker_form = json.loads(form_elems.speaker_form) 48 if request.method == 'GET': 49 return self.render('/gentelella/admin/event/speakers/edit.html', 50 speaker=speaker, event_id=event_id, 51 event=event, speaker_form=speaker_form) 52 if request.method == 'POST': 53 # set photo 54 if 'photo' in request.files and request.files['photo'].filename != '': 55 speaker_img_file = request.files['photo'] 56 speaker_img = upload( 57 speaker_img_file, 58 UPLOAD_PATHS['speakers']['photo'].format( 59 event_id=int(event_id), id=int(speaker.id) 60 )) 61 speaker.photo = speaker_img 62 # set other fields 63 speaker.name = request.form.get('name', None) 64 speaker.short_biography = request.form.get('short_biography', None) 65 speaker.long_biography = request.form.get('long_biography', None) 66 speaker.email = request.form.get('email', None) 67 speaker.mobile = request.form.get('mobile', None) 68 speaker.website = request.form.get('website', None) 69 speaker.twitter = request.form.get('twitter', None) 70 speaker.facebook = request.form.get('facebook', None) 71 speaker.github = request.form.get('github', None) 72 speaker.linkedin = request.form.get('linkedin', None) 73 speaker.organisation = request.form.get('organisation', None) 74 speaker.featured = True if request.form.get('featured', 'false') == 'true' else False 75 speaker.position = request.form.get('position', None) 76 speaker.country = request.form.get('country', None) 77 save_to_db(speaker, "Speaker has been updated") 78 flash("Speaker has been saved", "success") 79 80 return redirect(url_for('.index_view', event_id=event_id)) 81 82 @expose('/<int:speaker_id>/delete', methods=('GET',)) 83 def delete(self, event_id, speaker_id): 84 speaker = get_speaker_or_throw(speaker_id) 85 delete_from_db(speaker, 'Speaker Rejected') 86 flash("The speaker has been deleted", "danger") 87 return redirect(url_for('.index_view', event_id=event_id)) 88 [end of app/views/admin/models_views/speakers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py --- a/app/views/admin/models_views/speakers.py +++ b/app/views/admin/models_views/speakers.py @@ -4,10 +4,11 @@ from flask.ext.restplus import abort from flask_admin import expose from flask.ext import login -from flask import request, url_for, redirect, flash +from flask import request, url_for, redirect, flash, jsonify from ....helpers.data import delete_from_db, save_to_db from ....helpers.data_getter import DataGetter from ....helpers.storage import upload, UPLOAD_PATHS +from app.helpers.helpers import uploaded_file def get_speaker_or_throw(speaker_id): @@ -85,3 +86,23 @@ delete_from_db(speaker, 'Speaker Rejected') flash("The speaker has been deleted", "danger") return redirect(url_for('.index_view', event_id=event_id)) + + @expose('/<int:speaker_id>/photo_upload', methods=('POST',)) + def photo_upload(self, event_id, speaker_id): + speaker = get_speaker_or_throw(speaker_id) + event = DataGetter.get_event(event_id) + photo = request.form['photo'] + if photo: + photo_file = uploaded_file(file_content=photo) + photo = upload( + photo_file, + UPLOAD_PATHS['speakers']['photo'].format( + event_id=int(event_id), id=int(speaker.id) + )) + speaker.photo = photo + save_to_db(speaker) + return jsonify({'status': 'ok', 'photo': photo}) + else: + speaker.photo = None + save_to_db(speaker) + return jsonify({'status': 'Removed'})
{"golden_diff": "diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py\n--- a/app/views/admin/models_views/speakers.py\n+++ b/app/views/admin/models_views/speakers.py\n@@ -4,10 +4,11 @@\n from flask.ext.restplus import abort\n from flask_admin import expose\n from flask.ext import login\n-from flask import request, url_for, redirect, flash\n+from flask import request, url_for, redirect, flash, jsonify\n from ....helpers.data import delete_from_db, save_to_db\n from ....helpers.data_getter import DataGetter\n from ....helpers.storage import upload, UPLOAD_PATHS\n+from app.helpers.helpers import uploaded_file\n \n \n def get_speaker_or_throw(speaker_id):\n@@ -85,3 +86,23 @@\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n+\n+ @expose('/<int:speaker_id>/photo_upload', methods=('POST',))\n+ def photo_upload(self, event_id, speaker_id):\n+ speaker = get_speaker_or_throw(speaker_id)\n+ event = DataGetter.get_event(event_id)\n+ photo = request.form['photo']\n+ if photo:\n+ photo_file = uploaded_file(file_content=photo)\n+ photo = upload(\n+ photo_file,\n+ UPLOAD_PATHS['speakers']['photo'].format(\n+ event_id=int(event_id), id=int(speaker.id)\n+ ))\n+ speaker.photo = photo\n+ save_to_db(speaker)\n+ return jsonify({'status': 'ok', 'photo': photo})\n+ else:\n+ speaker.photo = None\n+ save_to_db(speaker)\n+ return jsonify({'status': 'Removed'})\n", "issue": "Show image and square crop option (like in wizard) for speakers and ensure it shows up after import\n![screenshot from 2016-08-24 11 30 08](https://cloud.githubusercontent.com/assets/1583873/17925701/48dd03be-69ee-11e6-84e4-c353001ddde1.png)\n\nAs the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way.\n\nCompare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/\n\n", "before_files": [{"content": "import json\n\nfrom flask.ext.admin import BaseView\nfrom flask.ext.restplus import abort\nfrom flask_admin import expose\nfrom flask.ext import login\nfrom flask import request, url_for, redirect, flash\nfrom ....helpers.data import delete_from_db, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom ....helpers.storage import upload, UPLOAD_PATHS\n\n\ndef get_speaker_or_throw(speaker_id):\n session = DataGetter.get_speaker(speaker_id)\n if not session:\n abort(404)\n return session\n\n\nclass SpeakersView(BaseView):\n\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n event = DataGetter.get_event(kwargs['event_id'])\n if not event.has_session_speakers:\n return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)\n\n @expose('/')\n def index_view(self, event_id):\n speakers = DataGetter.get_speakers(event_id)\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',\n speakers=speakers, event_id=event_id, event=event)\n\n @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n form_elems = DataGetter.get_custom_form_elements(event_id)\n if not form_elems:\n flash(\"Speaker form has been incorrectly configured for this event. Editing has been disabled\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/speakers/edit.html',\n speaker=speaker, event_id=event_id,\n event=event, speaker_form=speaker_form)\n if request.method == 'POST':\n # set photo\n if 'photo' in request.files and request.files['photo'].filename != '':\n speaker_img_file = request.files['photo']\n speaker_img = upload(\n speaker_img_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = speaker_img\n # set other fields\n speaker.name = request.form.get('name', None)\n speaker.short_biography = request.form.get('short_biography', None)\n speaker.long_biography = request.form.get('long_biography', None)\n speaker.email = request.form.get('email', None)\n speaker.mobile = request.form.get('mobile', None)\n speaker.website = request.form.get('website', None)\n speaker.twitter = request.form.get('twitter', None)\n speaker.facebook = request.form.get('facebook', None)\n speaker.github = request.form.get('github', None)\n speaker.linkedin = request.form.get('linkedin', None)\n speaker.organisation = request.form.get('organisation', None)\n speaker.featured = True if request.form.get('featured', 'false') == 'true' else False\n speaker.position = request.form.get('position', None)\n speaker.country = request.form.get('country', None)\n save_to_db(speaker, \"Speaker has been updated\")\n flash(\"Speaker has been saved\", \"success\")\n\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/delete', methods=('GET',))\n def delete(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n", "path": "app/views/admin/models_views/speakers.py"}]}
1,731
397
gh_patches_debug_9587
rasdani/github-patches
git_diff
freedomofpress__securedrop-2475
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Session expiring do not display a localized logout message. # Bug ## Description Like #2391, if a source has their session expire, they will not be shown a localized message when they log out. ## Steps to Reproduce Set session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message. ## Expected Behavior The logout message is localized. ## Actual Behavior It is not. </issue> <code> [start of securedrop/source_app/__init__.py] 1 from datetime import datetime, timedelta 2 from flask import (Flask, render_template, flash, Markup, request, g, session, 3 url_for, redirect) 4 from flask_babel import gettext 5 from flask_assets import Environment 6 from flask_wtf.csrf import CSRFProtect 7 from jinja2 import evalcontextfilter 8 from os import path 9 from sqlalchemy.orm.exc import NoResultFound 10 11 import crypto_util 12 import i18n 13 import store 14 import template_filters 15 import version 16 17 from db import Source, db_session 18 from request_that_secures_file_uploads import RequestThatSecuresFileUploads 19 from source_app import main, info, api 20 from source_app.decorators import ignore_static 21 from source_app.utils import logged_in 22 23 24 def create_app(config): 25 app = Flask(__name__, 26 template_folder=config.SOURCE_TEMPLATES_DIR, 27 static_folder=path.join(config.SECUREDROP_ROOT, 'static')) 28 app.request_class = RequestThatSecuresFileUploads 29 app.config.from_object(config.SourceInterfaceFlaskConfig) 30 31 # The default CSRF token expiration is 1 hour. Since large uploads can 32 # take longer than an hour over Tor, we increase the valid window to 24h. 33 app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24 34 CSRFProtect(app) 35 36 assets = Environment(app) 37 app.config['assets'] = assets 38 39 i18n.setup_app(app) 40 41 app.jinja_env.trim_blocks = True 42 app.jinja_env.lstrip_blocks = True 43 app.jinja_env.globals['version'] = version.__version__ 44 if getattr(config, 'CUSTOM_HEADER_IMAGE', None): 45 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE 46 app.jinja_env.globals['use_custom_header_image'] = True 47 else: 48 app.jinja_env.globals['header_image'] = 'logo.png' 49 app.jinja_env.globals['use_custom_header_image'] = False 50 51 app.jinja_env.filters['rel_datetime_format'] = \ 52 template_filters.rel_datetime_format 53 app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br) 54 app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat 55 56 for module in [main, info, api]: 57 app.register_blueprint(module.make_blueprint(config)) 58 59 @app.before_request 60 @ignore_static 61 def check_tor2web(): 62 # ignore_static here so we only flash a single message warning 63 # about Tor2Web, corresponding to the initial page load. 64 if 'X-tor2web' in request.headers: 65 flash(Markup(gettext( 66 '<strong>WARNING:</strong> You appear to be using Tor2Web. ' 67 'This <strong>does not</strong> provide anonymity. ' 68 '<a href="{url}">Why is this dangerous?</a>') 69 .format(url=url_for('info.tor2web_warning'))), 70 "banner-warning") 71 72 @app.before_request 73 @ignore_static 74 def setup_g(): 75 """Store commonly used values in Flask's special g object""" 76 g.locale = i18n.get_locale() 77 g.text_direction = i18n.get_text_direction(g.locale) 78 g.html_lang = i18n.locale_to_rfc_5646(g.locale) 79 g.locales = i18n.get_locale2name() 80 81 if 'expires' in session and datetime.utcnow() >= session['expires']: 82 session.clear() 83 msg = render_template('session_timeout.html') 84 flash(Markup(msg), "important") 85 86 session['expires'] = datetime.utcnow() + \ 87 timedelta(minutes=getattr(config, 88 'SESSION_EXPIRATION_MINUTES', 89 30)) 90 91 # ignore_static here because `crypto_util.hash_codename` is scrypt 92 # (very time consuming), and we don't need to waste time running if 93 # we're just serving a static resource that won't need to access 94 # these common values. 95 if logged_in(): 96 g.codename = session['codename'] 97 g.filesystem_id = crypto_util.hash_codename(g.codename) 98 try: 99 g.source = Source.query \ 100 .filter(Source.filesystem_id == g.filesystem_id) \ 101 .one() 102 except NoResultFound as e: 103 app.logger.error( 104 "Found no Sources when one was expected: %s" % 105 (e,)) 106 del session['logged_in'] 107 del session['codename'] 108 return redirect(url_for('main.index')) 109 g.loc = store.path(g.filesystem_id) 110 111 @app.teardown_appcontext 112 def shutdown_session(exception=None): 113 """Automatically remove database sessions at the end of the request, or 114 when the application shuts down""" 115 db_session.remove() 116 117 @app.errorhandler(404) 118 def page_not_found(error): 119 return render_template('notfound.html'), 404 120 121 @app.errorhandler(500) 122 def internal_error(error): 123 return render_template('error.html'), 500 124 125 return app 126 [end of securedrop/source_app/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py --- a/securedrop/source_app/__init__.py +++ b/securedrop/source_app/__init__.py @@ -79,8 +79,11 @@ g.locales = i18n.get_locale2name() if 'expires' in session and datetime.utcnow() >= session['expires']: - session.clear() msg = render_template('session_timeout.html') + + # clear the session after we render the message so it's localized + session.clear() + flash(Markup(msg), "important") session['expires'] = datetime.utcnow() + \
{"golden_diff": "diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py\n--- a/securedrop/source_app/__init__.py\n+++ b/securedrop/source_app/__init__.py\n@@ -79,8 +79,11 @@\n g.locales = i18n.get_locale2name()\n \n if 'expires' in session and datetime.utcnow() >= session['expires']:\n- session.clear()\n msg = render_template('session_timeout.html')\n+\n+ # clear the session after we render the message so it's localized\n+ session.clear()\n+\n flash(Markup(msg), \"important\")\n \n session['expires'] = datetime.utcnow() + \\\n", "issue": "Session expiring do not display a localized logout message.\n# Bug\r\n\r\n## Description\r\n\r\nLike #2391, if a source has their session expire, they will not be shown a localized message when they log out.\r\n\r\n## Steps to Reproduce\r\n\r\nSet session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message.\r\n\r\n## Expected Behavior\r\n\r\nThe logout message is localized.\r\n\r\n## Actual Behavior\r\n\r\nIt is not.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n CSRFProtect(app)\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n session.clear()\n msg = render_template('session_timeout.html')\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 30))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}]}
2,016
153
gh_patches_debug_1179
rasdani/github-patches
git_diff
locustio__locust-1395
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update flask version Our minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py) https://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away. I can do the PR </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 import ast 3 import os 4 import re 5 import sys 6 7 from setuptools import find_packages, setup 8 9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__)) 10 11 # parse version from locust/__init__.py 12 _version_re = re.compile(r'__version__\s+=\s+(.*)') 13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py") 14 with open(_init_file, 'rb') as f: 15 version = str(ast.literal_eval(_version_re.search( 16 f.read().decode('utf-8')).group(1))) 17 18 setup( 19 name='locust', 20 version=version, 21 install_requires=[ 22 "gevent>=1.5.0", 23 "flask>=0.10.1", 24 "requests>=2.9.1", 25 "msgpack>=0.6.2", 26 "pyzmq>=16.0.2", 27 "geventhttpclient>=1.4.2", 28 "ConfigArgParse>=1.0", 29 "psutil>=5.6.7", 30 "Flask-BasicAuth>=0.2.0" 31 ], 32 test_suite="locust.test", 33 tests_require=[ 34 'cryptography', 35 'mock', 36 'pyquery', 37 ], 38 ) 39 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ version=version, install_requires=[ "gevent>=1.5.0", - "flask>=0.10.1", + "flask>=1.1.2", "requests>=2.9.1", "msgpack>=0.6.2", "pyzmq>=16.0.2",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n- \"flask>=0.10.1\", \n+ \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\",\n", "issue": "Update flask version\nOur minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)\r\n\r\nhttps://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.\r\n\r\nI can do the PR\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=0.10.1\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n", "path": "setup.py"}]}
986
116
gh_patches_debug_17420
rasdani/github-patches
git_diff
pytorch__ignite-2676
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Scheduled workflow failed Oh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**. Please look into it: https://github.com/pytorch/ignite/actions/runs/2923090334 Feel free to close this if this was just a one-off error. </issue> <code> [start of ignite/metrics/gan/utils.py] 1 from typing import Callable, Optional, Union 2 3 import torch 4 from packaging.version import Version 5 6 from ignite.metrics.metric import Metric 7 8 9 class InceptionModel(torch.nn.Module): 10 r"""Inception Model pre-trained on the ImageNet Dataset. 11 12 Args: 13 return_features: set it to `True` if you want the model to return features from the last pooling 14 layer instead of prediction probabilities. 15 device: specifies which device updates are accumulated on. Setting the 16 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is 17 non-blocking. By default, CPU. 18 """ 19 20 def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None: 21 try: 22 from torchvision import models 23 except ImportError: 24 raise RuntimeError("This module requires torchvision to be installed.") 25 super(InceptionModel, self).__init__() 26 self._device = device 27 if Version(torch.__version__) <= Version("1.7.0"): 28 model_kwargs = {"pretrained": True} 29 else: 30 model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT} 31 32 self.model = models.inception_v3(**model_kwargs).to(self._device) 33 34 if return_features: 35 self.model.fc = torch.nn.Identity() 36 else: 37 self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1)) 38 self.model.eval() 39 40 @torch.no_grad() 41 def forward(self, data: torch.Tensor) -> torch.Tensor: 42 if data.dim() != 4: 43 raise ValueError(f"Inputs should be a tensor of dim 4, got {data.dim()}") 44 if data.shape[1] != 3: 45 raise ValueError(f"Inputs should be a tensor with 3 channels, got {data.shape}") 46 if data.device != torch.device(self._device): 47 data = data.to(self._device) 48 return self.model(data) 49 50 51 class _BaseInceptionMetric(Metric): 52 def __init__( 53 self, 54 num_features: Optional[int], 55 feature_extractor: Optional[torch.nn.Module], 56 output_transform: Callable = lambda x: x, 57 device: Union[str, torch.device] = torch.device("cpu"), 58 ) -> None: 59 60 if num_features is None: 61 raise ValueError("Argument num_features must be provided, if feature_extractor is specified.") 62 63 if feature_extractor is None: 64 feature_extractor = torch.nn.Identity() 65 66 if num_features <= 0: 67 raise ValueError(f"Argument num_features must be greater to zero, got: {num_features}") 68 69 if not isinstance(feature_extractor, torch.nn.Module): 70 raise TypeError( 71 f"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}" 72 ) 73 74 self._num_features = num_features 75 self._feature_extractor = feature_extractor.to(device) 76 77 super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device) 78 79 def _check_feature_shapes(self, samples: torch.Tensor) -> None: 80 81 if samples.dim() != 2: 82 raise ValueError(f"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}") 83 84 if samples.shape[0] == 0: 85 raise ValueError(f"Batch size should be greater than one, got: {samples.shape[0]}") 86 87 if samples.shape[1] != self._num_features: 88 raise ValueError( 89 f"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}" 90 ) 91 92 def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor: 93 94 inputs = inputs.detach() 95 96 if inputs.device != torch.device(self._device): 97 inputs = inputs.to(self._device) 98 99 with torch.no_grad(): 100 outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64) 101 self._check_feature_shapes(outputs) 102 103 return outputs 104 [end of ignite/metrics/gan/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py --- a/ignite/metrics/gan/utils.py +++ b/ignite/metrics/gan/utils.py @@ -19,12 +19,13 @@ def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None: try: + import torchvision from torchvision import models except ImportError: raise RuntimeError("This module requires torchvision to be installed.") super(InceptionModel, self).__init__() self._device = device - if Version(torch.__version__) <= Version("1.7.0"): + if Version(torchvision.__version__) < Version("0.13.0"): model_kwargs = {"pretrained": True} else: model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
{"golden_diff": "diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py\n--- a/ignite/metrics/gan/utils.py\n+++ b/ignite/metrics/gan/utils.py\n@@ -19,12 +19,13 @@\n \n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n+ import torchvision\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n- if Version(torch.__version__) <= Version(\"1.7.0\"):\n+ if Version(torchvision.__version__) < Version(\"0.13.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n", "issue": "Scheduled workflow failed\nOh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**.\nPlease look into it:\n\nhttps://github.com/pytorch/ignite/actions/runs/2923090334\n\nFeel free to close this if this was just a one-off error.\n\n", "before_files": [{"content": "from typing import Callable, Optional, Union\n\nimport torch\nfrom packaging.version import Version\n\nfrom ignite.metrics.metric import Metric\n\n\nclass InceptionModel(torch.nn.Module):\n r\"\"\"Inception Model pre-trained on the ImageNet Dataset.\n\n Args:\n return_features: set it to `True` if you want the model to return features from the last pooling\n layer instead of prediction probabilities.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \"\"\"\n\n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n if Version(torch.__version__) <= Version(\"1.7.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n\n self.model = models.inception_v3(**model_kwargs).to(self._device)\n\n if return_features:\n self.model.fc = torch.nn.Identity()\n else:\n self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor) -> torch.Tensor:\n if data.dim() != 4:\n raise ValueError(f\"Inputs should be a tensor of dim 4, got {data.dim()}\")\n if data.shape[1] != 3:\n raise ValueError(f\"Inputs should be a tensor with 3 channels, got {data.shape}\")\n if data.device != torch.device(self._device):\n data = data.to(self._device)\n return self.model(data)\n\n\nclass _BaseInceptionMetric(Metric):\n def __init__(\n self,\n num_features: Optional[int],\n feature_extractor: Optional[torch.nn.Module],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if num_features is None:\n raise ValueError(\"Argument num_features must be provided, if feature_extractor is specified.\")\n\n if feature_extractor is None:\n feature_extractor = torch.nn.Identity()\n\n if num_features <= 0:\n raise ValueError(f\"Argument num_features must be greater to zero, got: {num_features}\")\n\n if not isinstance(feature_extractor, torch.nn.Module):\n raise TypeError(\n f\"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}\"\n )\n\n self._num_features = num_features\n self._feature_extractor = feature_extractor.to(device)\n\n super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)\n\n def _check_feature_shapes(self, samples: torch.Tensor) -> None:\n\n if samples.dim() != 2:\n raise ValueError(f\"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}\")\n\n if samples.shape[0] == 0:\n raise ValueError(f\"Batch size should be greater than one, got: {samples.shape[0]}\")\n\n if samples.shape[1] != self._num_features:\n raise ValueError(\n f\"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}\"\n )\n\n def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:\n\n inputs = inputs.detach()\n\n if inputs.device != torch.device(self._device):\n inputs = inputs.to(self._device)\n\n with torch.no_grad():\n outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)\n self._check_feature_shapes(outputs)\n\n return outputs\n", "path": "ignite/metrics/gan/utils.py"}]}
1,699
199
gh_patches_debug_30939
rasdani/github-patches
git_diff
keras-team__keras-nlp-357
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve our continuous testing for model presets Opening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope. I would like to propose the following changes to our "network_tests" for presets: - We collocate the preset testing within the model directory, and use test annotations to control how they are run. - We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code. - We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints. </issue> <code> [start of keras_nlp/conftest.py] 1 # Copyright 2022 The KerasNLP Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import sys 15 16 import pytest 17 18 19 def pytest_addoption(parser): 20 parser.addoption( 21 "--runslow", action="store_true", default=False, help="run slow tests" 22 ) 23 24 25 def pytest_configure(config): 26 config.addinivalue_line("markers", "slow: mark test as slow to run") 27 28 29 def pytest_collection_modifyitems(config, items): 30 if config.getoption("--runslow"): 31 # --runslow given in cli: do not skip slow tests 32 return 33 skip_slow = pytest.mark.skip(reason="need --runslow option to run") 34 skip_xla = pytest.mark.skipif( 35 sys.platform == "darwin", reason="XLA unsupported on MacOS." 36 ) 37 38 for item in items: 39 if "slow" in item.keywords: 40 item.add_marker(skip_slow) 41 if "jit_compile_true" in item.name: 42 item.add_marker(skip_xla) 43 [end of keras_nlp/conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -18,25 +18,48 @@ def pytest_addoption(parser): parser.addoption( - "--runslow", action="store_true", default=False, help="run slow tests" + "--run_large", + action="store_true", + default=False, + help="run large tests", + ) + parser.addoption( + "--run_extra_large", + action="store_true", + default=False, + help="run extra_large tests", ) def pytest_configure(config): - config.addinivalue_line("markers", "slow: mark test as slow to run") + config.addinivalue_line( + "markers", "large: mark test as being slow or requiring a network" + ) + config.addinivalue_line( + "markers", + "extra_large: mark test as being too large to run continuously", + ) def pytest_collection_modifyitems(config, items): - if config.getoption("--runslow"): - # --runslow given in cli: do not skip slow tests - return - skip_slow = pytest.mark.skip(reason="need --runslow option to run") + run_extra_large_tests = config.getoption("--run_extra_large") + # Run large tests for --run_extra_large or --run_large. + run_large_tests = config.getoption("--run_large") or run_extra_large_tests + + # Messages to annotate skipped tests with. skip_xla = pytest.mark.skipif( sys.platform == "darwin", reason="XLA unsupported on MacOS." ) - + skip_large = pytest.mark.skipif( + not run_large_tests, reason="need --run_large option to run" + ) + skip_extra_large = pytest.mark.skipif( + not run_extra_large_tests, reason="need --run_extra_large option to run" + ) for item in items: - if "slow" in item.keywords: - item.add_marker(skip_slow) if "jit_compile_true" in item.name: item.add_marker(skip_xla) + if "large" in item.keywords: + item.add_marker(skip_large) + if "extra_large" in item.keywords: + item.add_marker(skip_extra_large)
{"golden_diff": "diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py\n--- a/keras_nlp/conftest.py\n+++ b/keras_nlp/conftest.py\n@@ -18,25 +18,48 @@\n \n def pytest_addoption(parser):\n parser.addoption(\n- \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n+ \"--run_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run large tests\",\n+ )\n+ parser.addoption(\n+ \"--run_extra_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run extra_large tests\",\n )\n \n \n def pytest_configure(config):\n- config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n+ config.addinivalue_line(\n+ \"markers\", \"large: mark test as being slow or requiring a network\"\n+ )\n+ config.addinivalue_line(\n+ \"markers\",\n+ \"extra_large: mark test as being too large to run continuously\",\n+ )\n \n \n def pytest_collection_modifyitems(config, items):\n- if config.getoption(\"--runslow\"):\n- # --runslow given in cli: do not skip slow tests\n- return\n- skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n+ run_extra_large_tests = config.getoption(\"--run_extra_large\")\n+ # Run large tests for --run_extra_large or --run_large.\n+ run_large_tests = config.getoption(\"--run_large\") or run_extra_large_tests\n+\n+ # Messages to annotate skipped tests with.\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n-\n+ skip_large = pytest.mark.skipif(\n+ not run_large_tests, reason=\"need --run_large option to run\"\n+ )\n+ skip_extra_large = pytest.mark.skipif(\n+ not run_extra_large_tests, reason=\"need --run_extra_large option to run\"\n+ )\n for item in items:\n- if \"slow\" in item.keywords:\n- item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n+ if \"large\" in item.keywords:\n+ item.add_marker(skip_large)\n+ if \"extra_large\" in item.keywords:\n+ item.add_marker(skip_extra_large)\n", "issue": "Improve our continuous testing for model presets\nOpening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope.\r\n\r\nI would like to propose the following changes to our \"network_tests\" for presets:\r\n\r\n - We collocate the preset testing within the model directory, and use test annotations to control how they are run.\r\n - We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code.\r\n - We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n", "path": "keras_nlp/conftest.py"}]}
1,087
541
gh_patches_debug_59678
rasdani/github-patches
git_diff
mozilla__bugbug-31
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create a classifier to detect bugs that need QA Needed for https://github.com/mozilla/relman-auto-nag/issues/227. To do this, we'll need to collect some labels. We can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag. We can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set. </issue> <code> [start of bugbug/models/qaneeded.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import xgboost 7 from sklearn.feature_extraction import DictVectorizer 8 from sklearn.pipeline import FeatureUnion 9 from sklearn.pipeline import Pipeline 10 11 from bugbug import bug_features 12 from bugbug import labels 13 from bugbug.model import Model 14 from bugbug.utils import DictSelector 15 16 17 class QANeededModel(Model): 18 def __init__(self, lemmatization=False): 19 Model.__init__(self, lemmatization) 20 21 self.classes = labels.get_qa_needed_labels() 22 23 feature_extractors = [ 24 bug_features.has_str(), 25 bug_features.has_regression_range(), 26 bug_features.severity(), 27 bug_features.keywords(), 28 bug_features.is_coverity_issue(), 29 bug_features.has_crash_signature(), 30 bug_features.has_url(), 31 bug_features.has_w3c_url(), 32 bug_features.has_github_url(), 33 bug_features.whiteboard(), 34 bug_features.patches(), 35 bug_features.landings(), 36 bug_features.title(), 37 bug_features.comments(), 38 ] 39 40 self.extraction_pipeline = Pipeline([ 41 ('bug_extractor', bug_features.BugExtractor(feature_extractors)), 42 ('union', FeatureUnion( 43 transformer_list=[ 44 ('data', Pipeline([ 45 ('selector', DictSelector(key='data')), 46 ('vect', DictVectorizer()), 47 ])), 48 49 ('title', Pipeline([ 50 ('selector', DictSelector(key='title')), 51 ('tfidf', self.text_vectorizer(stop_words='english')), 52 ])), 53 54 ('comments', Pipeline([ 55 ('selector', DictSelector(key='comments')), 56 ('tfidf', self.text_vectorizer(stop_words='english')), 57 ])), 58 ], 59 )), 60 ]) 61 62 self.clf = xgboost.XGBClassifier(n_jobs=16) 63 [end of bugbug/models/qaneeded.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py --- a/bugbug/models/qaneeded.py +++ b/bugbug/models/qaneeded.py @@ -24,7 +24,7 @@ bug_features.has_str(), bug_features.has_regression_range(), bug_features.severity(), - bug_features.keywords(), + bug_features.keywords(set(['qawanted'])), bug_features.is_coverity_issue(), bug_features.has_crash_signature(), bug_features.has_url(),
{"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -24,7 +24,7 @@\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n- bug_features.keywords(),\n+ bug_features.keywords(set(['qawanted'])),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n", "issue": "Create a classifier to detect bugs that need QA\nNeeded for https://github.com/mozilla/relman-auto-nag/issues/227.\r\n\r\nTo do this, we'll need to collect some labels.\r\nWe can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag.\r\nWe can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import labels\nfrom bugbug.model import Model\nfrom bugbug.utils import DictSelector\n\n\nclass QANeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.classes = labels.get_qa_needed_labels()\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.comments(),\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors)),\n ('union', FeatureUnion(\n transformer_list=[\n ('data', Pipeline([\n ('selector', DictSelector(key='data')),\n ('vect', DictVectorizer()),\n ])),\n\n ('title', Pipeline([\n ('selector', DictSelector(key='title')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n\n ('comments', Pipeline([\n ('selector', DictSelector(key='comments')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n ],\n )),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n", "path": "bugbug/models/qaneeded.py"}]}
1,171
116
gh_patches_debug_9063
rasdani/github-patches
git_diff
pypa__virtualenv-1886
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `virtualenv --version` prints spurious error as of 20.0.24 **Issue** When running `virtualenv --version`, a logger error is printed to stderr, though the return code is still 0. **Environment** Tested with Python 3.7 and 3.8, virtualenvs managed with pipenv Ubuntu 18.04 on WSL ``` $ rm-rf tmp && mkdir tmp && cd tmp $ pipenv install "virtualenv==20.0.23" $ pipenv run virtualenv --version virtualenv 20.0.23 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py $ rm-rf tmp && mkdir tmp && cd tmp $ pipenv install "virtualenv==20.0.24" $ pipenv run virtualenv --version virtualenv 20.0.24 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py ERROR:root:SystemExit: 0 $ pipenv run virtualenv --version > /dev/null ERROR:root:SystemExit: 0 $ echo $? 0 ``` Nothing else is printed with `-vvv --with-traceback` </issue> <code> [start of src/virtualenv/__main__.py] 1 from __future__ import absolute_import, print_function, unicode_literals 2 3 import logging 4 import os 5 import sys 6 from datetime import datetime 7 8 9 def run(args=None, options=None): 10 start = datetime.now() 11 from virtualenv.util.error import ProcessCallFailed 12 from virtualenv.run import cli_run 13 14 if args is None: 15 args = sys.argv[1:] 16 try: 17 session = cli_run(args, options) 18 logging.warning(LogSession(session, start)) 19 except ProcessCallFailed as exception: 20 print("subprocess call failed for {} with code {}".format(exception.cmd, exception.code)) 21 print(exception.out, file=sys.stdout, end="") 22 print(exception.err, file=sys.stderr, end="") 23 raise SystemExit(exception.code) 24 25 26 class LogSession(object): 27 def __init__(self, session, start): 28 self.session = session 29 self.start = start 30 31 def __str__(self): 32 from virtualenv.util.six import ensure_text 33 34 spec = self.session.creator.interpreter.spec 35 elapsed = (datetime.now() - self.start).total_seconds() * 1000 36 lines = [ 37 "created virtual environment {} in {:.0f}ms".format(spec, elapsed), 38 " creator {}".format(ensure_text(str(self.session.creator))), 39 ] 40 if self.session.seeder.enabled: 41 lines += ( 42 " seeder {}".format(ensure_text(str(self.session.seeder))), 43 " added seed packages: {}".format( 44 ", ".join( 45 sorted( 46 "==".join(i.stem.split("-")) 47 for i in self.session.creator.purelib.iterdir() 48 if i.suffix == ".dist-info" 49 ), 50 ), 51 ), 52 ) 53 if self.session.activators: 54 lines.append(" activators {}".format(",".join(i.__class__.__name__ for i in self.session.activators))) 55 return os.linesep.join(lines) 56 57 58 def run_with_catch(args=None): 59 from virtualenv.config.cli.parser import VirtualEnvOptions 60 61 options = VirtualEnvOptions() 62 try: 63 run(args, options) 64 except (KeyboardInterrupt, SystemExit, Exception) as exception: 65 try: 66 if getattr(options, "with_traceback", False): 67 raise 68 else: 69 logging.error("%s: %s", type(exception).__name__, exception) 70 code = exception.code if isinstance(exception, SystemExit) else 1 71 sys.exit(code) 72 finally: 73 logging.shutdown() # force flush of log messages before the trace is printed 74 75 76 if __name__ == "__main__": # pragma: no cov 77 run_with_catch() # pragma: no cov 78 [end of src/virtualenv/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/virtualenv/__main__.py b/src/virtualenv/__main__.py --- a/src/virtualenv/__main__.py +++ b/src/virtualenv/__main__.py @@ -66,7 +66,8 @@ if getattr(options, "with_traceback", False): raise else: - logging.error("%s: %s", type(exception).__name__, exception) + if not (isinstance(exception, SystemExit) and exception.code == 0): + logging.error("%s: %s", type(exception).__name__, exception) code = exception.code if isinstance(exception, SystemExit) else 1 sys.exit(code) finally:
{"golden_diff": "diff --git a/src/virtualenv/__main__.py b/src/virtualenv/__main__.py\n--- a/src/virtualenv/__main__.py\n+++ b/src/virtualenv/__main__.py\n@@ -66,7 +66,8 @@\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n- logging.error(\"%s: %s\", type(exception).__name__, exception)\n+ if not (isinstance(exception, SystemExit) and exception.code == 0):\n+ logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n", "issue": "`virtualenv --version` prints spurious error as of 20.0.24\n**Issue**\r\n\r\nWhen running `virtualenv --version`, a logger error is printed to stderr, though the return code is still 0.\r\n\r\n**Environment**\r\n\r\nTested with Python 3.7 and 3.8, virtualenvs managed with pipenv\r\nUbuntu 18.04 on WSL\r\n\r\n```\r\n$ rm-rf tmp && mkdir tmp && cd tmp\r\n$ pipenv install \"virtualenv==20.0.23\"\r\n$ pipenv run virtualenv --version\r\nvirtualenv 20.0.23 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py\r\n\r\n$ rm-rf tmp && mkdir tmp && cd tmp\r\n$ pipenv install \"virtualenv==20.0.24\"\r\n$ pipenv run virtualenv --version\r\nvirtualenv 20.0.24 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py\r\nERROR:root:SystemExit: 0\r\n$ pipenv run virtualenv --version > /dev/null\r\nERROR:root:SystemExit: 0\r\n$ echo $?\r\n0\r\n```\r\n\r\nNothing else is printed with `-vvv --with-traceback`\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\n\n\ndef run(args=None, options=None):\n start = datetime.now()\n from virtualenv.util.error import ProcessCallFailed\n from virtualenv.run import cli_run\n\n if args is None:\n args = sys.argv[1:]\n try:\n session = cli_run(args, options)\n logging.warning(LogSession(session, start))\n except ProcessCallFailed as exception:\n print(\"subprocess call failed for {} with code {}\".format(exception.cmd, exception.code))\n print(exception.out, file=sys.stdout, end=\"\")\n print(exception.err, file=sys.stderr, end=\"\")\n raise SystemExit(exception.code)\n\n\nclass LogSession(object):\n def __init__(self, session, start):\n self.session = session\n self.start = start\n\n def __str__(self):\n from virtualenv.util.six import ensure_text\n\n spec = self.session.creator.interpreter.spec\n elapsed = (datetime.now() - self.start).total_seconds() * 1000\n lines = [\n \"created virtual environment {} in {:.0f}ms\".format(spec, elapsed),\n \" creator {}\".format(ensure_text(str(self.session.creator))),\n ]\n if self.session.seeder.enabled:\n lines += (\n \" seeder {}\".format(ensure_text(str(self.session.seeder))),\n \" added seed packages: {}\".format(\n \", \".join(\n sorted(\n \"==\".join(i.stem.split(\"-\"))\n for i in self.session.creator.purelib.iterdir()\n if i.suffix == \".dist-info\"\n ),\n ),\n ),\n )\n if self.session.activators:\n lines.append(\" activators {}\".format(\",\".join(i.__class__.__name__ for i in self.session.activators)))\n return os.linesep.join(lines)\n\n\ndef run_with_catch(args=None):\n from virtualenv.config.cli.parser import VirtualEnvOptions\n\n options = VirtualEnvOptions()\n try:\n run(args, options)\n except (KeyboardInterrupt, SystemExit, Exception) as exception:\n try:\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n logging.shutdown() # force flush of log messages before the trace is printed\n\n\nif __name__ == \"__main__\": # pragma: no cov\n run_with_catch() # pragma: no cov\n", "path": "src/virtualenv/__main__.py"}]}
1,530
152
gh_patches_debug_10902
rasdani/github-patches
git_diff
google__flax-362
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pooling: passing "sequence of `n` `(low, high)` integer pairs" resulting in TypeError Trying to pass a tuple or list of tuples to a pool operation's padding parameter gives out the following errors: `TypeError: Unknown padding type: (1, 1).` `TypeError : unhashable type: 'list' ` Sample code for reproducing the bug: ```python3 from flax import nn from jax import random class FlaxModel(nn.Module): def apply(self, x): x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)]) return x rng = random.PRNGKey(0) model, _ = FlaxModel.init_by_shape(rng, [(1, 100, 100, 1)]) ``` </issue> <code> [start of flax/nn/pooling.py] 1 # Copyright 2020 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Pooling modules.""" 16 17 from jax import lax 18 import jax.numpy as jnp 19 20 import numpy as onp 21 22 23 def pool(inputs, init, reduce_fn, window_shape, strides, padding): 24 """Helper function to define pooling functions. 25 26 Pooling functions are implemented using the ReduceWindow XLA op. 27 NOTE: Be aware that pooling is not generally differentiable. 28 That means providing a reduce_fn that is differentiable does not imply 29 that pool is differentiable. 30 31 Args: 32 inputs: input data with dimensions (batch, window dims..., features). 33 init: the initial value for the reduction 34 reduce_fn: a reduce function of the form `(T, T) -> T`. 35 window_shape: a shape tuple defining the window to reduce over. 36 strides: a sequence of `n` integers, representing the inter-window 37 strides. 38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence 39 of `n` `(low, high)` integer pairs that give the padding to apply before 40 and after each spatial dimension. 41 Returns: 42 The output of the reduction for each window slice. 43 """ 44 strides = strides or (1,) * len(window_shape) 45 strides = (1,) + strides + (1,) 46 dims = (1,) + window_shape + (1,) 47 return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding) 48 49 50 def avg_pool(inputs, window_shape, strides=None, padding="VALID"): 51 """Pools the input by taking the average over a window. 52 53 Args: 54 inputs: input data with dimensions (batch, window dims..., features). 55 window_shape: a shape tuple defining the window to reduce over. 56 strides: a sequence of `n` integers, representing the inter-window 57 strides (default: `(1, ..., 1)`). 58 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence 59 of `n` `(low, high)` integer pairs that give the padding to apply before 60 and after each spatial dimension (default: `'VALID'`). 61 Returns: 62 The average for each window slice. 63 """ 64 y = pool(inputs, 0., lax.add, window_shape, strides, padding) 65 y = y / onp.prod(window_shape) 66 return y 67 68 69 def max_pool(inputs, window_shape, strides=None, padding="VALID"): 70 """Pools the input by taking the maximum of a window slice. 71 72 Args: 73 inputs: input data with dimensions (batch, window dims..., features). 74 window_shape: a shape tuple defining the window to reduce over. 75 strides: a sequence of `n` integers, representing the inter-window 76 strides (default: `(1, ..., 1)`). 77 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence 78 of `n` `(low, high)` integer pairs that give the padding to apply before 79 and after each spatial dimension (default: `'VALID'`). 80 Returns: 81 The maximum for each window slice. 82 """ 83 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding) 84 return y 85 [end of flax/nn/pooling.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flax/nn/pooling.py b/flax/nn/pooling.py --- a/flax/nn/pooling.py +++ b/flax/nn/pooling.py @@ -44,6 +44,14 @@ strides = strides or (1,) * len(window_shape) strides = (1,) + strides + (1,) dims = (1,) + window_shape + (1,) + if not isinstance(padding, str): + padding = tuple(map(tuple, padding)) + assert(len(padding) == len(window_shape)), ( + f"padding {padding} must specify pads for same number of dims as " + f"window_shape {window_shape}") + assert(all([len(x) == 2 for x in padding])), ( + f"each entry in padding {padding} must be length 2") + padding = ((0,0),) + padding + ((0,0),) return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
{"golden_diff": "diff --git a/flax/nn/pooling.py b/flax/nn/pooling.py\n--- a/flax/nn/pooling.py\n+++ b/flax/nn/pooling.py\n@@ -44,6 +44,14 @@\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n+ if not isinstance(padding, str):\n+ padding = tuple(map(tuple, padding))\n+ assert(len(padding) == len(window_shape)), (\n+ f\"padding {padding} must specify pads for same number of dims as \"\n+ f\"window_shape {window_shape}\")\n+ assert(all([len(x) == 2 for x in padding])), (\n+ f\"each entry in padding {padding} must be length 2\")\n+ padding = ((0,0),) + padding + ((0,0),)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n", "issue": "Pooling: passing \"sequence of `n` `(low, high)` integer pairs\" resulting in TypeError\nTrying to pass a tuple or list of tuples to a pool operation's padding parameter gives out the following errors: \r\n`TypeError: Unknown padding type: (1, 1).`\r\n`TypeError : unhashable type: 'list' `\r\n\r\n\r\nSample code for reproducing the bug:\r\n```python3\r\nfrom flax import nn\r\nfrom jax import random\r\n\r\nclass FlaxModel(nn.Module):\r\n def apply(self, x):\r\n x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)])\r\n return x\r\n\r\nrng = random.PRNGKey(0)\r\nmodel, _ = FlaxModel.init_by_shape(rng, [(1, 100, 100, 1)])\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as onp\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / onp.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n", "path": "flax/nn/pooling.py"}]}
1,708
226
gh_patches_debug_18242
rasdani/github-patches
git_diff
Mailu__Mailu-1542
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dovecot does not use redis, so it should be removed from start script In core/dovecot/start.py REDIS_ADDRESS is resolved but redis is not used on dovecot. It should be removed from the script. </issue> <code> [start of core/dovecot/start.py] 1 #!/usr/bin/python3 2 3 import os 4 import glob 5 import multiprocessing 6 import logging as log 7 import sys 8 9 from podop import run_server 10 from socrate import system, conf 11 12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 13 14 def start_podop(): 15 os.setuid(8) 16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§" 17 run_server(0, "dovecot", "/tmp/podop.socket", [ 18 ("quota", "url", url ), 19 ("auth", "url", url), 20 ("sieve", "url", url), 21 ]) 22 23 # Actual startup script 24 25 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") 26 os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis") 27 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") 28 os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334") 29 if os.environ["WEBMAIL"] != "none": 30 os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail") 31 32 for dovecot_file in glob.glob("/conf/*.conf"): 33 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) 34 35 os.makedirs("/conf/bin", exist_ok=True) 36 for script_file in glob.glob("/conf/*.script"): 37 out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script','')) 38 conf.jinja(script_file, os.environ, out_file) 39 os.chmod(out_file, 0o555) 40 41 # Run Podop, then postfix 42 multiprocessing.Process(target=start_podop).start() 43 os.system("chown mail:mail /mail") 44 os.system("chown -R mail:mail /var/lib/dovecot /conf") 45 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"]) 46 [end of core/dovecot/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dovecot/start.py b/core/dovecot/start.py --- a/core/dovecot/start.py +++ b/core/dovecot/start.py @@ -21,13 +21,9 @@ ]) # Actual startup script - os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") -os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis") os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334") -if os.environ["WEBMAIL"] != "none": - os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail") for dovecot_file in glob.glob("/conf/*.conf"): conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
{"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -21,13 +21,9 @@\n ])\n \n # Actual startup script\n-\n os.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\n-os.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\n os.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\n os.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\n-if os.environ[\"WEBMAIL\"] != \"none\":\n- os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n \n for dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n", "issue": "Dovecot does not use redis, so it should be removed from start script \nIn core/dovecot/start.py REDIS_ADDRESS is resolved but redis is not used on dovecot. It should be removed from the script.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\n\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]}
1,144
231
gh_patches_debug_17431
rasdani/github-patches
git_diff
translate__pootle-5736
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> update_stores' last updated date doesn't tickle up to project overview/language list When updating against templates, the /projects/projectname/ listing doesn't reflect the **last update**, **unless** the update affected a file in the **toplevel** dir. Within a language overview (/lang/projectname), changes deep in a directory hierarchy will also affect the parent directory's last-change date. using pootle 2.8.0b5 (TDF) screenshots to clarify. overview lists last update as e.g. 3 weeks ago: ![bildschirmfoto von 2016-12-22 16 40 21](https://cloud.githubusercontent.com/assets/477936/21430954/acb2d028-c865-11e6-84ec-0784563abba2.png) drilling down to the language reveals that the files in xmlsecurity actually had been updated only 8 hours ago (in fact xmlsecurity/uiconfig/ui.po) ![bildschirmfoto von 2016-12-22 16 41 26](https://cloud.githubusercontent.com/assets/477936/21431071/27d3ab2e-c866-11e6-8350-4690fe89cc03.png) (also sorting by the last updated is not working properly, goes from 10months to 8 hours, to 3weeks…) </issue> <code> [start of pootle/apps/pootle_data/project_data.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from .utils import RelatedStoresDataTool, RelatedTPsDataTool 10 11 12 class ProjectDataTool(RelatedTPsDataTool): 13 """Retrieves aggregate stats for a Project""" 14 15 cache_key_name = "project" 16 17 def filter_data(self, qs): 18 return qs.filter(tp__project=self.context) 19 20 21 class ProjectResourceDataTool(RelatedStoresDataTool): 22 group_by = ("store__translation_project__language__code", ) 23 cache_key_name = "project_resource" 24 25 @property 26 def project_path(self): 27 return ( 28 "/%s%s" 29 % (self.project_code, self.tp_path)) 30 31 @property 32 def tp_path(self): 33 return ( 34 "/%s%s" 35 % (self.dir_path, 36 self.filename)) 37 38 def filter_data(self, qs): 39 return ( 40 qs.filter(store__translation_project__project__code=self.project_code) 41 .filter(store__tp_path__startswith=self.tp_path)) 42 43 @property 44 def context_name(self): 45 return "/projects%s" % self.project_path 46 47 48 class ProjectSetDataTool(RelatedTPsDataTool): 49 group_by = ("tp__project__code", ) 50 cache_key_name = "projects" 51 52 def get_root_child_path(self, child): 53 return child[self.group_by[0]] 54 55 @property 56 def context_name(self): 57 return "ALL" 58 [end of pootle/apps/pootle_data/project_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py --- a/pootle/apps/pootle_data/project_data.py +++ b/pootle/apps/pootle_data/project_data.py @@ -6,6 +6,8 @@ # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. +from pootle.core.delegate import revision + from .utils import RelatedStoresDataTool, RelatedTPsDataTool @@ -17,6 +19,11 @@ def filter_data(self, qs): return qs.filter(tp__project=self.context) + @property + def rev_cache_key(self): + return revision.get( + self.context.__class__)(self.context.directory).get(key="stats") + class ProjectResourceDataTool(RelatedStoresDataTool): group_by = ("store__translation_project__language__code", )
{"golden_diff": "diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py\n--- a/pootle/apps/pootle_data/project_data.py\n+++ b/pootle/apps/pootle_data/project_data.py\n@@ -6,6 +6,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from pootle.core.delegate import revision\n+\n from .utils import RelatedStoresDataTool, RelatedTPsDataTool\n \n \n@@ -17,6 +19,11 @@\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n \n+ @property\n+ def rev_cache_key(self):\n+ return revision.get(\n+ self.context.__class__)(self.context.directory).get(key=\"stats\")\n+\n \n class ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n", "issue": "update_stores' last updated date doesn't tickle up to project overview/language list\nWhen updating against templates, the /projects/projectname/ listing doesn't reflect the **last update**, **unless** the update affected a file in the **toplevel** dir.\r\n\r\nWithin a language overview (/lang/projectname), changes deep in a directory hierarchy will also affect the parent directory's last-change date.\r\n\r\nusing pootle 2.8.0b5 (TDF)\r\n\r\nscreenshots to clarify. overview lists last update as e.g. 3 weeks ago:\r\n![bildschirmfoto von 2016-12-22 16 40 21](https://cloud.githubusercontent.com/assets/477936/21430954/acb2d028-c865-11e6-84ec-0784563abba2.png)\r\n\r\ndrilling down to the language reveals that the files in xmlsecurity actually had been updated only 8 hours ago (in fact xmlsecurity/uiconfig/ui.po)\r\n![bildschirmfoto von 2016-12-22 16 41 26](https://cloud.githubusercontent.com/assets/477936/21431071/27d3ab2e-c866-11e6-8350-4690fe89cc03.png)\r\n\r\n(also sorting by the last updated is not working properly, goes from 10months to 8 hours, to 3weeks\u2026) \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom .utils import RelatedStoresDataTool, RelatedTPsDataTool\n\n\nclass ProjectDataTool(RelatedTPsDataTool):\n \"\"\"Retrieves aggregate stats for a Project\"\"\"\n\n cache_key_name = \"project\"\n\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n\n\nclass ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n cache_key_name = \"project_resource\"\n\n @property\n def project_path(self):\n return (\n \"/%s%s\"\n % (self.project_code, self.tp_path))\n\n @property\n def tp_path(self):\n return (\n \"/%s%s\"\n % (self.dir_path,\n self.filename))\n\n def filter_data(self, qs):\n return (\n qs.filter(store__translation_project__project__code=self.project_code)\n .filter(store__tp_path__startswith=self.tp_path))\n\n @property\n def context_name(self):\n return \"/projects%s\" % self.project_path\n\n\nclass ProjectSetDataTool(RelatedTPsDataTool):\n group_by = (\"tp__project__code\", )\n cache_key_name = \"projects\"\n\n def get_root_child_path(self, child):\n return child[self.group_by[0]]\n\n @property\n def context_name(self):\n return \"ALL\"\n", "path": "pootle/apps/pootle_data/project_data.py"}]}
1,369
220
gh_patches_debug_38407
rasdani/github-patches
git_diff
wagtail__wagtail-556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Search: Make update_index update all backends Currently, it only updates the default backend. It should update all search backends. </issue> <code> [start of wagtail/wagtailsearch/management/commands/update_index.py] 1 from django.core.management.base import BaseCommand 2 from django.db import models 3 4 from wagtail.wagtailsearch import Indexed, get_search_backend 5 6 7 class Command(BaseCommand): 8 def handle(self, **options): 9 # Print info 10 self.stdout.write("Getting object list") 11 12 # Get list of indexed models 13 indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)] 14 15 # Object set 16 object_set = {} 17 18 # Add all objects to object set and detect any duplicates 19 # Duplicates are caused when both a model and a derived model are indexed 20 # Eg, if BlogPost inherits from Page and both of these models are indexed 21 # If we were to add all objects from both models into the index, all the BlogPosts will have two entries 22 for model in indexed_models: 23 # Get toplevel content type 24 toplevel_content_type = model.indexed_get_toplevel_content_type() 25 26 # Loop through objects 27 for obj in model.get_indexed_objects(): 28 # Get key for this object 29 key = toplevel_content_type + ':' + str(obj.pk) 30 31 # Check if this key already exists 32 if key in object_set: 33 # Conflict, work out who should get this space 34 # The object with the longest content type string gets the space 35 # Eg, "wagtailcore.Page-myapp.BlogPost" kicks out "wagtailcore.Page" 36 if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()): 37 # Take the spot 38 object_set[key] = obj 39 else: 40 # Space free, take it 41 object_set[key] = obj 42 43 # Search backend 44 if 'backend' in options: 45 s = options['backend'] 46 else: 47 s = get_search_backend() 48 49 # Reset the index 50 self.stdout.write("Reseting index") 51 s.reset_index() 52 53 # Add types 54 self.stdout.write("Adding types") 55 for model in indexed_models: 56 s.add_type(model) 57 58 # Add objects to index 59 self.stdout.write("Adding objects") 60 for result in s.add_bulk(object_set.values()): 61 self.stdout.write(result[0] + ' ' + str(result[1])) 62 63 # Refresh index 64 self.stdout.write("Refreshing index") 65 s.refresh_index() 66 [end of wagtail/wagtailsearch/management/commands/update_index.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailsearch/management/commands/update_index.py b/wagtail/wagtailsearch/management/commands/update_index.py --- a/wagtail/wagtailsearch/management/commands/update_index.py +++ b/wagtail/wagtailsearch/management/commands/update_index.py @@ -1,11 +1,22 @@ +from optparse import make_option + from django.core.management.base import BaseCommand from django.db import models +from django.conf import settings from wagtail.wagtailsearch import Indexed, get_search_backend +def get_search_backends(): + if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'): + for backend in settings.WAGTAILSEARCH_BACKENDS.keys(): + yield backend, get_search_backend(backend) + else: + yield 'default', get_search_backend('default') + + class Command(BaseCommand): - def handle(self, **options): + def get_object_list(self): # Print info self.stdout.write("Getting object list") @@ -40,26 +51,51 @@ # Space free, take it object_set[key] = obj - # Search backend - if 'backend' in options: - s = options['backend'] - else: - s = get_search_backend() + return indexed_models, object_set.values() + + def update_backend(self, backend, models, object_list, backend_name=''): + # Print info + self.stdout.write("Updating backend: " + backend_name) + + # Get backend + if backend is None: + backend = get_search_backend(backend_name) # Reset the index - self.stdout.write("Reseting index") - s.reset_index() + self.stdout.write(backend_name + ": Reseting index") + backend.reset_index() # Add types - self.stdout.write("Adding types") - for model in indexed_models: - s.add_type(model) + self.stdout.write(backend_name + ": Adding types") + for model in models: + backend.add_type(model) # Add objects to index - self.stdout.write("Adding objects") - for result in s.add_bulk(object_set.values()): + self.stdout.write(backend_name + ": Adding objects") + for result in backend.add_bulk(object_list): self.stdout.write(result[0] + ' ' + str(result[1])) # Refresh index - self.stdout.write("Refreshing index") - s.refresh_index() + self.stdout.write(backend_name + ": Refreshing index") + backend.refresh_index() + + option_list = BaseCommand.option_list + ( + make_option('--backend', + action='store', + dest='backend_name', + default=False, + help="Specify a backend to update", + ), + ) + + def handle(self, **options): + # Get object list + models, object_list = self.get_object_list() + + # Update backends + if 'backend_name' in options: + backend = dict(get_search_backends())[options['backend_name']] + self.update_backend(backend, models, object_list, backend_name=options['backend_name']) + else: + for backend_name, backend in get_search_backends(): + self.update_backend(backend, models, object_list, backend_name=backend_name)
{"golden_diff": "diff --git a/wagtail/wagtailsearch/management/commands/update_index.py b/wagtail/wagtailsearch/management/commands/update_index.py\n--- a/wagtail/wagtailsearch/management/commands/update_index.py\n+++ b/wagtail/wagtailsearch/management/commands/update_index.py\n@@ -1,11 +1,22 @@\n+from optparse import make_option\n+\n from django.core.management.base import BaseCommand\n from django.db import models\n+from django.conf import settings\n \n from wagtail.wagtailsearch import Indexed, get_search_backend\n \n \n+def get_search_backends():\n+ if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):\n+ for backend in settings.WAGTAILSEARCH_BACKENDS.keys():\n+ yield backend, get_search_backend(backend)\n+ else:\n+ yield 'default', get_search_backend('default')\n+\n+\n class Command(BaseCommand):\n- def handle(self, **options):\n+ def get_object_list(self):\n # Print info\n self.stdout.write(\"Getting object list\")\n \n@@ -40,26 +51,51 @@\n # Space free, take it\n object_set[key] = obj\n \n- # Search backend\n- if 'backend' in options:\n- s = options['backend']\n- else:\n- s = get_search_backend()\n+ return indexed_models, object_set.values()\n+\n+ def update_backend(self, backend, models, object_list, backend_name=''):\n+ # Print info\n+ self.stdout.write(\"Updating backend: \" + backend_name)\n+\n+ # Get backend\n+ if backend is None:\n+ backend = get_search_backend(backend_name)\n \n # Reset the index\n- self.stdout.write(\"Reseting index\")\n- s.reset_index()\n+ self.stdout.write(backend_name + \": Reseting index\")\n+ backend.reset_index()\n \n # Add types\n- self.stdout.write(\"Adding types\")\n- for model in indexed_models:\n- s.add_type(model)\n+ self.stdout.write(backend_name + \": Adding types\")\n+ for model in models:\n+ backend.add_type(model)\n \n # Add objects to index\n- self.stdout.write(\"Adding objects\")\n- for result in s.add_bulk(object_set.values()):\n+ self.stdout.write(backend_name + \": Adding objects\")\n+ for result in backend.add_bulk(object_list):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n \n # Refresh index\n- self.stdout.write(\"Refreshing index\")\n- s.refresh_index()\n+ self.stdout.write(backend_name + \": Refreshing index\")\n+ backend.refresh_index()\n+\n+ option_list = BaseCommand.option_list + (\n+ make_option('--backend',\n+ action='store',\n+ dest='backend_name',\n+ default=False,\n+ help=\"Specify a backend to update\",\n+ ),\n+ )\n+\n+ def handle(self, **options):\n+ # Get object list\n+ models, object_list = self.get_object_list()\n+\n+ # Update backends\n+ if 'backend_name' in options:\n+ backend = dict(get_search_backends())[options['backend_name']]\n+ self.update_backend(backend, models, object_list, backend_name=options['backend_name'])\n+ else:\n+ for backend_name, backend in get_search_backends():\n+ self.update_backend(backend, models, object_list, backend_name=backend_name)\n", "issue": "Search: Make update_index update all backends\nCurrently, it only updates the default backend. It should update all search backends.\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.db import models\n\nfrom wagtail.wagtailsearch import Indexed, get_search_backend\n\n\nclass Command(BaseCommand):\n def handle(self, **options):\n # Print info\n self.stdout.write(\"Getting object list\")\n\n # Get list of indexed models\n indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]\n\n # Object set\n object_set = {}\n\n # Add all objects to object set and detect any duplicates\n # Duplicates are caused when both a model and a derived model are indexed\n # Eg, if BlogPost inherits from Page and both of these models are indexed\n # If we were to add all objects from both models into the index, all the BlogPosts will have two entries\n for model in indexed_models:\n # Get toplevel content type\n toplevel_content_type = model.indexed_get_toplevel_content_type()\n\n # Loop through objects\n for obj in model.get_indexed_objects():\n # Get key for this object\n key = toplevel_content_type + ':' + str(obj.pk)\n\n # Check if this key already exists\n if key in object_set:\n # Conflict, work out who should get this space\n # The object with the longest content type string gets the space\n # Eg, \"wagtailcore.Page-myapp.BlogPost\" kicks out \"wagtailcore.Page\"\n if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):\n # Take the spot\n object_set[key] = obj\n else:\n # Space free, take it\n object_set[key] = obj\n\n # Search backend\n if 'backend' in options:\n s = options['backend']\n else:\n s = get_search_backend()\n\n # Reset the index\n self.stdout.write(\"Reseting index\")\n s.reset_index()\n\n # Add types\n self.stdout.write(\"Adding types\")\n for model in indexed_models:\n s.add_type(model)\n\n # Add objects to index\n self.stdout.write(\"Adding objects\")\n for result in s.add_bulk(object_set.values()):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n\n # Refresh index\n self.stdout.write(\"Refreshing index\")\n s.refresh_index()\n", "path": "wagtail/wagtailsearch/management/commands/update_index.py"}]}
1,208
748
gh_patches_debug_12742
rasdani/github-patches
git_diff
ocadotechnology__codeforlife-portal-782
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Speak to legal team about updating our T&Cs for GDPR </issue> <code> [start of portal/admin.py] 1 # -*- coding: utf-8 -*- 2 # Code for Life 3 # 4 # Copyright (C) 2018, Ocado Innovation Limited 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU Affero General Public License as 8 # published by the Free Software Foundation, either version 3 of the 9 # License, or (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU Affero General Public License for more details. 15 # 16 # You should have received a copy of the GNU Affero General Public License 17 # along with this program. If not, see <http://www.gnu.org/licenses/>. 18 # 19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence 20 # 21 # This licence does not grant any right, title or interest in any “Ocado” logos, 22 # trade names or the trademark “Ocado” or any other trademarks or domain names 23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other 24 # distinctive brand features of “Ocado” as may be secured from time to time. You 25 # must not distribute any modification of this program using the trademark 26 # “Ocado” or claim any affiliation or association with Ocado or its employees. 27 # 28 # You are not authorised to use the name Ocado (or any of its trade names) or 29 # the names of any author or contributor in advertising or for publicity purposes 30 # pertaining to the distribution of this program, without the prior written 31 # authorisation of Ocado. 32 # 33 # Any propagation, distribution or conveyance of this program must include this 34 # copyright notice and these terms. You must not misrepresent the origins of this 35 # program; modified versions of the program must be marked as such and not 36 # identified as the original program. 37 from django.contrib import admin 38 from django.contrib.auth.models import User 39 from django.contrib.auth.admin import UserAdmin 40 41 42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification 43 44 45 class ClassAdmin(admin.ModelAdmin): 46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name'] 47 list_filter = ['teacher'] 48 readonly_fields = ['teacher'] 49 50 51 class SchoolAdmin(admin.ModelAdmin): 52 search_fields = ['name', 'country', 'postcode', 'town'] 53 list_filter = ['postcode', 'country'] 54 55 56 class StudentAdmin(admin.ModelAdmin): 57 search_fields = ['new_user__first_name', 'new_user__last_name'] 58 list_filter = ['class_field', 'class_field__teacher'] 59 readonly_fields = ['user', 'new_user'] 60 raw_id_fields = ['class_field', 'pending_class_request'] 61 62 63 class TeacherAdmin(admin.ModelAdmin): 64 search_fields = ['new_user__first_name', 'new_user__last_name'] 65 list_filter = ['school'] 66 readonly_fields = ['user', 'new_user'] 67 raw_id_fields = ['school', 'pending_join_request'] 68 69 70 class UserProfileAdmin(admin.ModelAdmin): 71 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined'] 72 list_filter = ['user__date_joined'] 73 list_display = ['user', 'joined_recently'] 74 readonly_fields = ['user'] 75 76 77 class EmailVerificationAdmin(admin.ModelAdmin): 78 search_fields = ['new_user'] 79 80 81 UserAdmin.list_display += ('date_joined',) 82 UserAdmin.list_filter += ('date_joined',) 83 84 85 admin.site.register(Class, ClassAdmin) 86 admin.site.register(Student, StudentAdmin) 87 admin.site.register(Guardian) 88 admin.site.register(Teacher, TeacherAdmin) 89 admin.site.register(School, SchoolAdmin) 90 admin.site.unregister(User) 91 admin.site.register(User, UserAdmin) 92 admin.site.register(UserProfile, UserProfileAdmin) 93 admin.site.register(FrontPageNews) 94 admin.site.register(EmailVerification, EmailVerificationAdmin) 95 [end of portal/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/portal/admin.py b/portal/admin.py --- a/portal/admin.py +++ b/portal/admin.py @@ -68,14 +68,14 @@ class UserProfileAdmin(admin.ModelAdmin): - search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined'] + search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined'] list_filter = ['user__date_joined'] list_display = ['user', 'joined_recently'] readonly_fields = ['user'] class EmailVerificationAdmin(admin.ModelAdmin): - search_fields = ['new_user'] + search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined'] UserAdmin.list_display += ('date_joined',)
{"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -68,14 +68,14 @@\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n- search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n- search_fields = ['new_user']\n+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n \n \n UserAdmin.list_display += ('date_joined',)\n", "issue": "Speak to legal team about updating our T&Cs for GDPR\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2018, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n readonly_fields = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['class_field', 'pending_class_request']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['school', 'pending_join_request']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]}
1,579
194
gh_patches_debug_22097
rasdani/github-patches
git_diff
svthalia__concrexit-2199
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add filter/display of members-only value to document admin ### Is your feature request related to a problem? Please describe. It is not really issue to see which documents are marked as members only. And it is impossible to easily get a list with documents that have a true/false value. ### Describe the solution you'd like I'd like to see more information about the documents in the admin page so that I do not have to open the detail page. ### Motivation Easier to manage these files. ### Describe alternatives you've considered The only alternative is not doing this. ### Additional context #2084 could have been prevented. </issue> <code> [start of website/documents/admin.py] 1 """Registers admin interfaces for the documents module.""" 2 from django.contrib import admin 3 from django.contrib.admin import ModelAdmin 4 from django.utils.translation import gettext_lazy as _ 5 6 from documents import forms 7 from documents.models import ( 8 AnnualDocument, 9 AssociationDocument, 10 EventDocument, 11 GeneralMeeting, 12 Minutes, 13 MiscellaneousDocument, 14 ) 15 from documents.services import is_owner 16 17 18 class MinutesInline(admin.StackedInline): 19 """Inline for minutes of a general meeting.""" 20 21 model = Minutes 22 form = forms.MinutesForm 23 extra = 0 24 25 26 @admin.register(GeneralMeeting) 27 class GeneralMeetingAdmin(ModelAdmin): 28 """Manage the general meetings.""" 29 30 form = forms.GeneralMeetingForm 31 inlines = [ 32 MinutesInline, 33 ] 34 list_filter = ("datetime",) 35 36 37 class LectureYearFilter(admin.SimpleListFilter): 38 """Filter the memberships on those started or ended in a lecture year.""" 39 40 title = _("lecture year") 41 parameter_name = "lecture_year" 42 43 def lookups(self, request, model_admin): 44 if AnnualDocument.objects.count() > 0: 45 first_year = AnnualDocument.objects.order_by("year").first().year 46 last_year = AnnualDocument.objects.order_by("year").last().year 47 48 return [ 49 (year, f"{year}-{year + 1}") 50 for year in range(last_year, first_year - 1, -1) 51 ] 52 return [] 53 54 def queryset(self, request, queryset): 55 if not self.value(): 56 return queryset 57 58 year = int(self.value()) 59 60 return queryset.filter(year=year) 61 62 63 @admin.register(AnnualDocument) 64 class AnnualDocumentAdmin(ModelAdmin): 65 """Manage the annual documents.""" 66 67 form = forms.AnnualDocumentForm 68 list_filter = ( 69 LectureYearFilter, 70 "created", 71 "last_updated", 72 ) 73 74 75 @admin.register(AssociationDocument) 76 class AssociationDocumentAdmin(ModelAdmin): 77 """Manage the association documents.""" 78 79 form = forms.AssociationDocumentForm 80 list_filter = ( 81 "created", 82 "last_updated", 83 ) 84 85 86 @admin.register(EventDocument) 87 class EventDocumentAdmin(ModelAdmin): 88 """Manage the event documents.""" 89 90 form = forms.EventDocumentForm 91 list_filter = ( 92 "created", 93 "last_updated", 94 ) 95 96 def has_change_permission(self, request, obj=None): 97 """Only allow access to the change form if the user is an owner.""" 98 if obj is not None and not is_owner(request.member, obj): 99 return False 100 return super().has_change_permission(request, obj) 101 102 def has_delete_permission(self, request, obj=None): 103 """Only allow delete access if the user is an owner.""" 104 if obj is not None and not is_owner(request.member, obj): 105 return False 106 return super().has_delete_permission(request, obj) 107 108 109 @admin.register(MiscellaneousDocument) 110 class MiscellaneousDocumentAdmin(ModelAdmin): 111 """Manage the miscellaneous documents.""" 112 113 form = forms.MiscellaneousDocumentForm 114 list_filter = ( 115 "created", 116 "last_updated", 117 ) 118 [end of website/documents/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/documents/admin.py b/website/documents/admin.py --- a/website/documents/admin.py +++ b/website/documents/admin.py @@ -69,6 +69,11 @@ LectureYearFilter, "created", "last_updated", + "members_only", + ) + list_display = ( + "__str__", + "members_only", ) @@ -80,6 +85,11 @@ list_filter = ( "created", "last_updated", + "members_only", + ) + list_display = ( + "__str__", + "members_only", ) @@ -91,6 +101,11 @@ list_filter = ( "created", "last_updated", + "members_only", + ) + list_display = ( + "__str__", + "members_only", ) def has_change_permission(self, request, obj=None): @@ -114,4 +129,9 @@ list_filter = ( "created", "last_updated", + "members_only", + ) + list_display = ( + "__str__", + "members_only", )
{"golden_diff": "diff --git a/website/documents/admin.py b/website/documents/admin.py\n--- a/website/documents/admin.py\n+++ b/website/documents/admin.py\n@@ -69,6 +69,11 @@\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n \n@@ -80,6 +85,11 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n \n@@ -91,6 +101,11 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n def has_change_permission(self, request, obj=None):\n@@ -114,4 +129,9 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n", "issue": "Add filter/display of members-only value to document admin\n### Is your feature request related to a problem? Please describe.\r\nIt is not really issue to see which documents are marked as members only. And it is impossible to easily get a list with documents that have a true/false value.\r\n\r\n### Describe the solution you'd like\r\nI'd like to see more information about the documents in the admin page so that I do not have to open the detail page.\r\n\r\n### Motivation\r\nEasier to manage these files.\r\n\r\n### Describe alternatives you've considered\r\nThe only alternative is not doing this.\r\n\r\n### Additional context\r\n#2084 could have been prevented.\r\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the documents module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom documents import forms\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n EventDocument,\n GeneralMeeting,\n Minutes,\n MiscellaneousDocument,\n)\nfrom documents.services import is_owner\n\n\nclass MinutesInline(admin.StackedInline):\n \"\"\"Inline for minutes of a general meeting.\"\"\"\n\n model = Minutes\n form = forms.MinutesForm\n extra = 0\n\n\[email protected](GeneralMeeting)\nclass GeneralMeetingAdmin(ModelAdmin):\n \"\"\"Manage the general meetings.\"\"\"\n\n form = forms.GeneralMeetingForm\n inlines = [\n MinutesInline,\n ]\n list_filter = (\"datetime\",)\n\n\nclass LectureYearFilter(admin.SimpleListFilter):\n \"\"\"Filter the memberships on those started or ended in a lecture year.\"\"\"\n\n title = _(\"lecture year\")\n parameter_name = \"lecture_year\"\n\n def lookups(self, request, model_admin):\n if AnnualDocument.objects.count() > 0:\n first_year = AnnualDocument.objects.order_by(\"year\").first().year\n last_year = AnnualDocument.objects.order_by(\"year\").last().year\n\n return [\n (year, f\"{year}-{year + 1}\")\n for year in range(last_year, first_year - 1, -1)\n ]\n return []\n\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n\n year = int(self.value())\n\n return queryset.filter(year=year)\n\n\[email protected](AnnualDocument)\nclass AnnualDocumentAdmin(ModelAdmin):\n \"\"\"Manage the annual documents.\"\"\"\n\n form = forms.AnnualDocumentForm\n list_filter = (\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n )\n\n\[email protected](AssociationDocument)\nclass AssociationDocumentAdmin(ModelAdmin):\n \"\"\"Manage the association documents.\"\"\"\n\n form = forms.AssociationDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n\n\[email protected](EventDocument)\nclass EventDocumentAdmin(ModelAdmin):\n \"\"\"Manage the event documents.\"\"\"\n\n form = forms.EventDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow delete access if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_delete_permission(request, obj)\n\n\[email protected](MiscellaneousDocument)\nclass MiscellaneousDocumentAdmin(ModelAdmin):\n \"\"\"Manage the miscellaneous documents.\"\"\"\n\n form = forms.MiscellaneousDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n", "path": "website/documents/admin.py"}]}
1,570
274
gh_patches_debug_8038
rasdani/github-patches
git_diff
microsoft__botbuilder-python-302
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> NumberPrompt doesn't accept retry value ## Version v4.5 ## Describe the bug When you send an invalid number to a `NumberPrompt`, it sends out a retry prompt. When attempting to send a 2nd response after being reprompted, you get a timeout error. ## To Reproduce 1. Create a `NumberPrompt` object 2. When it prompts you for a number, send in a non-numeric value (e.g. `"hello"`) * this will trigger a retry prompt (e.g. `"You must enter a number."`) 3. Try sending in another value--no matter what type of value, you get a timeout error ![image](https://user-images.githubusercontent.com/35248895/62598200-7000fd00-b89d-11e9-9b02-cc04beb609d4.png) ![image](https://user-images.githubusercontent.com/35248895/62598223-8444fa00-b89d-11e9-918b-8578efd179ac.png) ## Expected behavior To be able to send in a 2nd value when reprompted ## Additional context ```python async def test_number_prompt_retry(self): async def exec_test(turn_context: TurnContext) -> None: dialog_context: DialogContext = await dialogs.create_context(turn_context) results: DialogTurnResult = await dialog_context.continue_dialog() if results.status == DialogTurnStatus.Empty: options = PromptOptions( prompt=Activity(type=ActivityTypes.message, text="Enter a number."), retry_prompt=Activity( type=ActivityTypes.message, text="You must enter a number." ), ) await dialog_context.prompt("NumberPrompt", options) elif results.status == DialogTurnStatus.Complete: number_result = results.result await turn_context.send_activity( MessageFactory.text(f"Bot received the number '{number_result}'.") ) await convo_state.save_changes(turn_context) adapter = TestAdapter(exec_test) convo_state = ConversationState(MemoryStorage()) dialog_state = convo_state.create_property("dialogState") dialogs = DialogSet(dialog_state) number_prompt = NumberPrompt( dialog_id="NumberPrompt", validator=None, default_locale=Culture.English ) dialogs.add(number_prompt) step1 = await adapter.send("hello") step2 = await step1.assert_reply("Enter a number.") # TODO: something is breaking in the validators or retry prompt # where it does not accept the 2nd answer after reprompting the user # for another value step3 = await step2.send("hello") step4 = await step3.assert_reply("You must enter a number.") step5 = await step4.send("64") await step5.assert_reply("Bot received the number '64'.") ``` [bug] </issue> <code> [start of libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from typing import Callable, Dict 5 6 from recognizers_number import recognize_number 7 from recognizers_text import Culture, ModelResult 8 from babel.numbers import parse_decimal 9 10 from botbuilder.core.turn_context import TurnContext 11 from botbuilder.schema import ActivityTypes 12 13 from .prompt import Prompt, PromptValidatorContext 14 from .prompt_options import PromptOptions 15 from .prompt_recognizer_result import PromptRecognizerResult 16 17 18 class NumberPrompt(Prompt): 19 # TODO: PromptValidator needs to be fixed 20 # Does not accept answer as intended (times out) 21 def __init__( 22 self, 23 dialog_id: str, 24 validator: Callable[[PromptValidatorContext], bool] = None, 25 default_locale: str = None, 26 ): 27 super(NumberPrompt, self).__init__(dialog_id, validator) 28 self.default_locale = default_locale 29 30 async def on_prompt( 31 self, 32 turn_context: TurnContext, 33 state: Dict[str, object], 34 options: PromptOptions, 35 is_retry: bool, 36 ): 37 if not turn_context: 38 raise TypeError("NumberPrompt.on_prompt(): turn_context cannot be None.") 39 if not options: 40 raise TypeError("NumberPrompt.on_prompt(): options cannot be None.") 41 42 if is_retry and options.retry_prompt is not None: 43 turn_context.send_activity(options.retry_prompt) 44 elif options.prompt is not None: 45 await turn_context.send_activity(options.prompt) 46 47 async def on_recognize( 48 self, 49 turn_context: TurnContext, 50 state: Dict[str, object], 51 options: PromptOptions, 52 ) -> PromptRecognizerResult: 53 if not turn_context: 54 raise TypeError("NumberPrompt.on_recognize(): turn_context cannot be None.") 55 56 result = PromptRecognizerResult() 57 if turn_context.activity.type == ActivityTypes.message: 58 message = turn_context.activity 59 culture = self._get_culture(turn_context) 60 results: [ModelResult] = recognize_number(message.text, culture) 61 62 if results: 63 result.succeeded = True 64 result.value = parse_decimal( 65 results[0].resolution["value"], locale=culture.replace("-", "_") 66 ) 67 68 return result 69 70 def _get_culture(self, turn_context: TurnContext): 71 culture = ( 72 turn_context.activity.locale 73 if turn_context.activity.locale 74 else self.default_locale 75 ) 76 77 if not culture: 78 culture = Culture.English 79 80 return culture 81 [end of libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py @@ -40,7 +40,7 @@ raise TypeError("NumberPrompt.on_prompt(): options cannot be None.") if is_retry and options.retry_prompt is not None: - turn_context.send_activity(options.retry_prompt) + await turn_context.send_activity(options.retry_prompt) elif options.prompt is not None: await turn_context.send_activity(options.prompt)
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n@@ -40,7 +40,7 @@\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n \n if is_retry and options.retry_prompt is not None:\n- turn_context.send_activity(options.retry_prompt)\n+ await turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n", "issue": "NumberPrompt doesn't accept retry value\n## Version\r\nv4.5\r\n\r\n## Describe the bug\r\nWhen you send an invalid number to a `NumberPrompt`, it sends out a retry prompt.\r\nWhen attempting to send a 2nd response after being reprompted, you get a timeout error.\r\n\r\n\r\n\r\n## To Reproduce\r\n1. Create a `NumberPrompt` object\r\n2. When it prompts you for a number, send in a non-numeric value (e.g. `\"hello\"`)\r\n * this will trigger a retry prompt (e.g. `\"You must enter a number.\"`)\r\n3. Try sending in another value--no matter what type of value, you get a timeout error\r\n\r\n![image](https://user-images.githubusercontent.com/35248895/62598200-7000fd00-b89d-11e9-9b02-cc04beb609d4.png)\r\n\r\n![image](https://user-images.githubusercontent.com/35248895/62598223-8444fa00-b89d-11e9-918b-8578efd179ac.png)\r\n\r\n\r\n\r\n\r\n## Expected behavior\r\nTo be able to send in a 2nd value when reprompted\r\n\r\n## Additional context\r\n```python\r\nasync def test_number_prompt_retry(self):\r\n async def exec_test(turn_context: TurnContext) -> None:\r\n dialog_context: DialogContext = await dialogs.create_context(turn_context)\r\n\r\n results: DialogTurnResult = await dialog_context.continue_dialog()\r\n\r\n if results.status == DialogTurnStatus.Empty:\r\n options = PromptOptions(\r\n prompt=Activity(type=ActivityTypes.message, text=\"Enter a number.\"),\r\n retry_prompt=Activity(\r\n type=ActivityTypes.message, text=\"You must enter a number.\"\r\n ),\r\n )\r\n await dialog_context.prompt(\"NumberPrompt\", options)\r\n elif results.status == DialogTurnStatus.Complete:\r\n number_result = results.result\r\n await turn_context.send_activity(\r\n MessageFactory.text(f\"Bot received the number '{number_result}'.\")\r\n )\r\n\r\n await convo_state.save_changes(turn_context)\r\n\r\n adapter = TestAdapter(exec_test)\r\n\r\n convo_state = ConversationState(MemoryStorage())\r\n dialog_state = convo_state.create_property(\"dialogState\")\r\n dialogs = DialogSet(dialog_state)\r\n number_prompt = NumberPrompt(\r\n dialog_id=\"NumberPrompt\", validator=None, default_locale=Culture.English\r\n )\r\n dialogs.add(number_prompt)\r\n\r\n step1 = await adapter.send(\"hello\")\r\n step2 = await step1.assert_reply(\"Enter a number.\")\r\n # TODO: something is breaking in the validators or retry prompt\r\n # where it does not accept the 2nd answer after reprompting the user\r\n # for another value\r\n step3 = await step2.send(\"hello\")\r\n step4 = await step3.assert_reply(\"You must enter a number.\")\r\n step5 = await step4.send(\"64\")\r\n await step5.assert_reply(\"Bot received the number '64'.\")\r\n```\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import Callable, Dict\n\nfrom recognizers_number import recognize_number\nfrom recognizers_text import Culture, ModelResult\nfrom babel.numbers import parse_decimal\n\nfrom botbuilder.core.turn_context import TurnContext\nfrom botbuilder.schema import ActivityTypes\n\nfrom .prompt import Prompt, PromptValidatorContext\nfrom .prompt_options import PromptOptions\nfrom .prompt_recognizer_result import PromptRecognizerResult\n\n\nclass NumberPrompt(Prompt):\n # TODO: PromptValidator needs to be fixed\n # Does not accept answer as intended (times out)\n def __init__(\n self,\n dialog_id: str,\n validator: Callable[[PromptValidatorContext], bool] = None,\n default_locale: str = None,\n ):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n\n async def on_prompt(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n is_retry: bool,\n ):\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_prompt(): turn_context cannot be None.\")\n if not options:\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n\n if is_retry and options.retry_prompt is not None:\n turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n\n async def on_recognize(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n ) -> PromptRecognizerResult:\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_recognize(): turn_context cannot be None.\")\n\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n culture = self._get_culture(turn_context)\n results: [ModelResult] = recognize_number(message.text, culture)\n\n if results:\n result.succeeded = True\n result.value = parse_decimal(\n results[0].resolution[\"value\"], locale=culture.replace(\"-\", \"_\")\n )\n\n return result\n\n def _get_culture(self, turn_context: TurnContext):\n culture = (\n turn_context.activity.locale\n if turn_context.activity.locale\n else self.default_locale\n )\n\n if not culture:\n culture = Culture.English\n\n return culture\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py"}]}
1,899
162
gh_patches_debug_24268
rasdani/github-patches
git_diff
dmlc__gluon-nlp-832
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ATIS/SNIPS datasets and GLUE datasets don't appear in the website API doc http://gluon-nlp.mxnet.io/api/modules/data.html does not show the details of ATISDataset/SNIPSDataset and GlueCoLA, GlueSST2, GlueSTSB, GlueQQP, GlueRTE, GlueMNLI, GlueQNLI, GlueWNLI </issue> <code> [start of src/gluonnlp/data/__init__.py] 1 # coding: utf-8 2 3 # Licensed to the Apache Software Foundation (ASF) under one 4 # or more contributor license agreements. See the NOTICE file 5 # distributed with this work for additional information 6 # regarding copyright ownership. The ASF licenses this file 7 # to you under the Apache License, Version 2.0 (the 8 # "License"); you may not use this file except in compliance 9 # with the License. You may obtain a copy of the License at 10 # 11 # http://www.apache.org/licenses/LICENSE-2.0 12 # 13 # Unless required by applicable law or agreed to in writing, 14 # software distributed under the License is distributed on an 15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 # KIND, either express or implied. See the License for the 17 # specific language governing permissions and limitations 18 # under the License. 19 20 # pylint: disable=wildcard-import 21 """This module includes common utilities such as data readers and counter.""" 22 23 from . import (batchify, candidate_sampler, conll, corpora, dataloader, 24 dataset, question_answering, registry, sampler, sentiment, 25 stream, transforms, translation, utils, 26 word_embedding_evaluation, intent_slot) 27 from .candidate_sampler import * 28 from .conll import * 29 from .glue import * 30 from .corpora import * 31 from .dataloader import * 32 from .dataset import * 33 from .question_answering import * 34 from .registry import * 35 from .sampler import * 36 from .sentiment import * 37 from .stream import * 38 from .transforms import * 39 from .translation import * 40 from .utils import * 41 from .word_embedding_evaluation import * 42 from .intent_slot import * 43 44 __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__ 45 + dataset.__all__ + corpora.__all__ + sentiment.__all__ + 46 word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ + 47 translation.__all__ + registry.__all__ + question_answering.__all__ 48 + dataloader.__all__ + candidate_sampler.__all__) 49 [end of src/gluonnlp/data/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/gluonnlp/data/__init__.py b/src/gluonnlp/data/__init__.py --- a/src/gluonnlp/data/__init__.py +++ b/src/gluonnlp/data/__init__.py @@ -23,7 +23,7 @@ from . import (batchify, candidate_sampler, conll, corpora, dataloader, dataset, question_answering, registry, sampler, sentiment, stream, transforms, translation, utils, - word_embedding_evaluation, intent_slot) + word_embedding_evaluation, intent_slot, glue) from .candidate_sampler import * from .conll import * from .glue import * @@ -42,7 +42,8 @@ from .intent_slot import * __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__ - + dataset.__all__ + corpora.__all__ + sentiment.__all__ + - word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ + - translation.__all__ + registry.__all__ + question_answering.__all__ - + dataloader.__all__ + candidate_sampler.__all__) + + dataset.__all__ + corpora.__all__ + sentiment.__all__ + + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ + + translation.__all__ + registry.__all__ + question_answering.__all__ + + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__ + + glue.__all__)
{"golden_diff": "diff --git a/src/gluonnlp/data/__init__.py b/src/gluonnlp/data/__init__.py\n--- a/src/gluonnlp/data/__init__.py\n+++ b/src/gluonnlp/data/__init__.py\n@@ -23,7 +23,7 @@\n from . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n- word_embedding_evaluation, intent_slot)\n+ word_embedding_evaluation, intent_slot, glue)\n from .candidate_sampler import *\n from .conll import *\n from .glue import *\n@@ -42,7 +42,8 @@\n from .intent_slot import *\n \n __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n- + dataset.__all__ + corpora.__all__ + sentiment.__all__ +\n- word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +\n- translation.__all__ + registry.__all__ + question_answering.__all__\n- + dataloader.__all__ + candidate_sampler.__all__)\n+ + dataset.__all__ + corpora.__all__ + sentiment.__all__\n+ + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__\n+ + translation.__all__ + registry.__all__ + question_answering.__all__\n+ + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__\n+ + glue.__all__)\n", "issue": "ATIS/SNIPS datasets and GLUE datasets don't appear in the website API doc \nhttp://gluon-nlp.mxnet.io/api/modules/data.html\r\n\r\ndoes not show the details of ATISDataset/SNIPSDataset and GlueCoLA, GlueSST2, GlueSTSB, GlueQQP, GlueRTE, GlueMNLI, GlueQNLI, GlueWNLI\r\n\n", "before_files": [{"content": "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=wildcard-import\n\"\"\"This module includes common utilities such as data readers and counter.\"\"\"\n\nfrom . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n word_embedding_evaluation, intent_slot)\nfrom .candidate_sampler import *\nfrom .conll import *\nfrom .glue import *\nfrom .corpora import *\nfrom .dataloader import *\nfrom .dataset import *\nfrom .question_answering import *\nfrom .registry import *\nfrom .sampler import *\nfrom .sentiment import *\nfrom .stream import *\nfrom .transforms import *\nfrom .translation import *\nfrom .utils import *\nfrom .word_embedding_evaluation import *\nfrom .intent_slot import *\n\n__all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n + dataset.__all__ + corpora.__all__ + sentiment.__all__ +\n word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +\n translation.__all__ + registry.__all__ + question_answering.__all__\n + dataloader.__all__ + candidate_sampler.__all__)\n", "path": "src/gluonnlp/data/__init__.py"}]}
1,170
348
gh_patches_debug_29639
rasdani/github-patches
git_diff
frappe__frappe-2519
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move app installation to background Long installs timeout the installation of the app and leads to broken installs. </issue> <code> [start of frappe/desk/page/applications/applications.py] 1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors 2 # MIT License. See license.txt 3 4 from __future__ import unicode_literals 5 import frappe 6 import frappe.utils 7 import frappe.installer 8 import frappe.sessions 9 import subprocess 10 import os 11 import json 12 from frappe import _ 13 from distutils.spawn import find_executable 14 15 @frappe.whitelist() 16 def get_app_list(): 17 """Get list of all apps with properties, installed, category from hooks and 18 `frappe/data/app_listing/` if an entry exists""" 19 out = {} 20 installed = frappe.get_installed_apps() 21 for app in frappe.get_all_apps(True): 22 app_hooks = frappe.get_hooks(app_name=app) 23 24 if app not in installed and app_hooks.get('hide_in_installer'): 25 continue 26 27 out[app] = {} 28 for key in ("app_name", "app_title", "app_description", "app_icon", 29 "app_publisher", "app_version", "app_url", "app_color"): 30 val = app_hooks.get(key) or [] 31 out[app][key] = val[0] if len(val) else "" 32 33 if app in installed: 34 out[app]["installed"] = 1 35 36 for app_from_list in get_app_listing().values(): 37 if app_from_list.app_name in out: 38 out[app_from_list.app_name].update(app_from_list) 39 else: 40 if not frappe.conf.disallow_app_listing: 41 out[app_from_list.app_name] = app_from_list 42 43 return out 44 45 def get_app_listing(): 46 """Get apps listed in `frappe/data/app_listing/`""" 47 apps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing') 48 out = {} 49 for app in os.listdir(apps_listing_dir): 50 if app.endswith(".json"): 51 with open(os.path.join(apps_listing_dir, app)) as f: 52 out[app[:-5]] = frappe._dict(json.load(f)) 53 return out 54 55 @frappe.whitelist() 56 def install_app(name): 57 """Install app, if app is not installed in local environment, install it via git url in 58 `frappe/data/app_listing/`""" 59 frappe.only_for("System Manager") 60 61 if name not in frappe.get_all_apps(True): 62 if not frappe.conf.disallow_app_listing: 63 get_app(name) 64 frappe.cache().delete_value(["app_hooks"]) 65 # reload sys.path 66 import site 67 reload(site) 68 else: 69 # will only come via direct API 70 frappe.throw("Listing app not allowed") 71 72 app_hooks = frappe.get_hooks(app_name=name) 73 if app_hooks.get('hide_in_installer'): 74 frappe.throw(_("You cannot install this app")) 75 76 frappe.publish_realtime("install_app_progress", {"status": _("Installing App {0}").format(name)}, 77 user=frappe.session.user) 78 79 frappe.installer.install_app(name) 80 81 frappe.publish_realtime("install_app_progress", {"status": _("{0} Installed").format(name)}, 82 user=frappe.session.user) 83 84 def get_app(name): 85 """Get app using git clone and install it in bench environment""" 86 app_listing = get_app_listing() 87 if name not in app_listing: 88 frappe.throw(_("Unknown app {0}").format(name)) 89 raise frappe.ValidationError 90 91 frappe.publish_realtime("install_app_progress", {"status": _("Downloading App {0}").format(name)}, 92 user=frappe.session.user) 93 94 args = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']] 95 96 try: 97 subprocess.check_call(args, cwd=frappe.utils.get_bench_path(), 98 stderr=subprocess.STDOUT) 99 return "okay" 100 except subprocess.CalledProcessError as e: 101 frappe.msgprint("<b>" + " ".join(args) + "</b>") 102 frappe.msgprint(e.output) 103 return e.output 104 [end of frappe/desk/page/applications/applications.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/frappe/desk/page/applications/applications.py b/frappe/desk/page/applications/applications.py --- a/frappe/desk/page/applications/applications.py +++ b/frappe/desk/page/applications/applications.py @@ -11,6 +11,7 @@ import json from frappe import _ from distutils.spawn import find_executable +from frappe.utils.background_jobs import enqueue @frappe.whitelist() def get_app_list(): @@ -73,6 +74,12 @@ if app_hooks.get('hide_in_installer'): frappe.throw(_("You cannot install this app")) + enqueue('frappe.desk.page.applications.applications.start_install', name=name) + + frappe.msgprint(_('Queued for install')) + + +def start_install(name): frappe.publish_realtime("install_app_progress", {"status": _("Installing App {0}").format(name)}, user=frappe.session.user) @@ -81,6 +88,20 @@ frappe.publish_realtime("install_app_progress", {"status": _("{0} Installed").format(name)}, user=frappe.session.user) [email protected]() +def remove_app(name): + """Remove installed app""" + frappe.only_for("System Manager") + + if name in frappe.get_installed_apps(): + enqueue('frappe.desk.page.applications.applications.start_remove', name=name) + + frappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name))) + +def start_remove(name): + frappe.installer.remove_app(app_name=name, yes=True) + frappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name))) + def get_app(name): """Get app using git clone and install it in bench environment""" app_listing = get_app_listing()
{"golden_diff": "diff --git a/frappe/desk/page/applications/applications.py b/frappe/desk/page/applications/applications.py\n--- a/frappe/desk/page/applications/applications.py\n+++ b/frappe/desk/page/applications/applications.py\n@@ -11,6 +11,7 @@\n import json\n from frappe import _\n from distutils.spawn import find_executable\n+from frappe.utils.background_jobs import enqueue\n \n @frappe.whitelist()\n def get_app_list():\n@@ -73,6 +74,12 @@\n \tif app_hooks.get('hide_in_installer'):\n \t\tfrappe.throw(_(\"You cannot install this app\"))\n \n+\tenqueue('frappe.desk.page.applications.applications.start_install', name=name)\n+\n+\tfrappe.msgprint(_('Queued for install'))\n+\n+\n+def start_install(name):\n \tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n \t\tuser=frappe.session.user)\n \n@@ -81,6 +88,20 @@\n \tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n \t\tuser=frappe.session.user)\n \[email protected]()\n+def remove_app(name):\n+\t\"\"\"Remove installed app\"\"\"\n+\tfrappe.only_for(\"System Manager\")\n+\n+\tif name in frappe.get_installed_apps():\n+\t\tenqueue('frappe.desk.page.applications.applications.start_remove', name=name)\n+\n+\tfrappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name)))\n+\n+def start_remove(name):\n+\tfrappe.installer.remove_app(app_name=name, yes=True)\n+\tfrappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name)))\n+\n def get_app(name):\n \t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n \tapp_listing = get_app_listing()\n", "issue": "Move app installation to background\nLong installs timeout the installation of the app and leads to broken installs.\n\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport frappe.utils\nimport frappe.installer\nimport frappe.sessions\nimport subprocess\nimport os\nimport json\nfrom frappe import _\nfrom distutils.spawn import find_executable\n\[email protected]()\ndef get_app_list():\n\t\"\"\"Get list of all apps with properties, installed, category from hooks and\n\t`frappe/data/app_listing/` if an entry exists\"\"\"\n\tout = {}\n\tinstalled = frappe.get_installed_apps()\n\tfor app in frappe.get_all_apps(True):\n\t\tapp_hooks = frappe.get_hooks(app_name=app)\n\n\t\tif app not in installed and app_hooks.get('hide_in_installer'):\n\t\t\tcontinue\n\n\t\tout[app] = {}\n\t\tfor key in (\"app_name\", \"app_title\", \"app_description\", \"app_icon\",\n\t\t\t\"app_publisher\", \"app_version\", \"app_url\", \"app_color\"):\n\t\t\t val = app_hooks.get(key) or []\n\t\t\t out[app][key] = val[0] if len(val) else \"\"\n\n\t\tif app in installed:\n\t\t\tout[app][\"installed\"] = 1\n\n\tfor app_from_list in get_app_listing().values():\n\t\tif app_from_list.app_name in out:\n\t\t\tout[app_from_list.app_name].update(app_from_list)\n\t\telse:\n\t\t\tif not frappe.conf.disallow_app_listing:\n\t\t\t\tout[app_from_list.app_name] = app_from_list\n\n\treturn out\n\ndef get_app_listing():\n\t\"\"\"Get apps listed in `frappe/data/app_listing/`\"\"\"\n\tapps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing')\n\tout = {}\n\tfor app in os.listdir(apps_listing_dir):\n\t\tif app.endswith(\".json\"):\n\t\t\twith open(os.path.join(apps_listing_dir, app)) as f:\n\t\t\t\tout[app[:-5]] = frappe._dict(json.load(f))\n\treturn out\n\[email protected]()\ndef install_app(name):\n\t\"\"\"Install app, if app is not installed in local environment, install it via git url in\n\t`frappe/data/app_listing/`\"\"\"\n\tfrappe.only_for(\"System Manager\")\n\n\tif name not in frappe.get_all_apps(True):\n\t\tif not frappe.conf.disallow_app_listing:\n\t\t\tget_app(name)\n\t\t\tfrappe.cache().delete_value([\"app_hooks\"])\n\t\t\t# reload sys.path\n\t\t\timport site\n\t\t\treload(site)\n\t\telse:\n\t\t\t# will only come via direct API\n\t\t\tfrappe.throw(\"Listing app not allowed\")\n\n\tapp_hooks = frappe.get_hooks(app_name=name)\n\tif app_hooks.get('hide_in_installer'):\n\t\tfrappe.throw(_(\"You cannot install this app\"))\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\tfrappe.installer.install_app(name)\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n\t\tuser=frappe.session.user)\n\ndef get_app(name):\n\t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n\tapp_listing = get_app_listing()\n\tif name not in app_listing:\n\t\tfrappe.throw(_(\"Unknown app {0}\").format(name))\n\t\traise frappe.ValidationError\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Downloading App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\targs = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']]\n\n\ttry:\n\t\tsubprocess.check_call(args, cwd=frappe.utils.get_bench_path(),\n\t\t\tstderr=subprocess.STDOUT)\n\t\treturn \"okay\"\n\texcept subprocess.CalledProcessError as e:\n\t\tfrappe.msgprint(\"<b>\" + \" \".join(args) + \"</b>\")\n\t\tfrappe.msgprint(e.output)\n\t\treturn e.output\n", "path": "frappe/desk/page/applications/applications.py"}]}
1,663
422
gh_patches_debug_1748
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-750
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typing error in recognize_google() methode In mycroft/stt/\_\_init\_\_.py line 74 : Replacing mistyped 's' parameter by self.lang fixed the problem. </issue> <code> [start of mycroft/stt/__init__.py] 1 # Copyright 2016 Mycroft AI, Inc. 2 # 3 # This file is part of Mycroft Core. 4 # 5 # Mycroft Core is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Mycroft Core is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>. 17 from abc import ABCMeta, abstractmethod 18 19 from speech_recognition import Recognizer 20 21 from mycroft.api import STTApi 22 from mycroft.configuration import ConfigurationManager 23 from mycroft.util.log import getLogger 24 25 __author__ = "jdorleans" 26 27 LOG = getLogger("STT") 28 29 30 class STT(object): 31 __metaclass__ = ABCMeta 32 33 def __init__(self): 34 config_core = ConfigurationManager.get() 35 self.lang = str(self.init_language(config_core)) 36 config_stt = config_core.get("stt", {}) 37 self.config = config_stt.get(config_stt.get("module"), {}) 38 self.credential = self.config.get("credential", {}) 39 self.recognizer = Recognizer() 40 41 @staticmethod 42 def init_language(config_core): 43 langs = config_core.get("lang", "en-US").split("-") 44 return langs[0].lower() + "-" + langs[1].upper() 45 46 @abstractmethod 47 def execute(self, audio, language=None): 48 pass 49 50 51 class TokenSTT(STT): 52 __metaclass__ = ABCMeta 53 54 def __init__(self): 55 super(TokenSTT, self).__init__() 56 self.token = str(self.credential.get("token")) 57 58 59 class BasicSTT(STT): 60 __metaclass__ = ABCMeta 61 62 def __init__(self): 63 super(BasicSTT, self).__init__() 64 self.username = str(self.credential.get("username")) 65 self.password = str(self.credential.get("password")) 66 67 68 class GoogleSTT(TokenSTT): 69 def __init__(self): 70 super(GoogleSTT, self).__init__() 71 72 def execute(self, audio, language=None): 73 self.lang = language or self.lang 74 return self.recognizer.recognize_google(audio, self.token, s) 75 76 77 class WITSTT(TokenSTT): 78 def __init__(self): 79 super(WITSTT, self).__init__() 80 81 def execute(self, audio, language=None): 82 LOG.warn("WITSTT language should be configured at wit.ai settings.") 83 return self.recognizer.recognize_wit(audio, self.token) 84 85 86 class IBMSTT(BasicSTT): 87 def __init__(self): 88 super(IBMSTT, self).__init__() 89 90 def execute(self, audio, language=None): 91 self.lang = language or self.lang 92 return self.recognizer.recognize_ibm(audio, self.username, 93 self.password, self.lang) 94 95 96 class MycroftSTT(STT): 97 def __init__(self): 98 super(MycroftSTT, self).__init__() 99 self.api = STTApi() 100 101 def execute(self, audio, language=None): 102 self.lang = language or self.lang 103 return self.api.stt(audio.get_flac_data(), self.lang, 1)[0] 104 105 106 class STTFactory(object): 107 CLASSES = { 108 "mycroft": MycroftSTT, 109 "google": GoogleSTT, 110 "wit": WITSTT, 111 "ibm": IBMSTT 112 } 113 114 @staticmethod 115 def create(): 116 config = ConfigurationManager.get().get("stt", {}) 117 module = config.get("module", "mycroft") 118 clazz = STTFactory.CLASSES.get(module) 119 return clazz() 120 [end of mycroft/stt/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mycroft/stt/__init__.py b/mycroft/stt/__init__.py --- a/mycroft/stt/__init__.py +++ b/mycroft/stt/__init__.py @@ -71,7 +71,7 @@ def execute(self, audio, language=None): self.lang = language or self.lang - return self.recognizer.recognize_google(audio, self.token, s) + return self.recognizer.recognize_google(audio, self.token, self.lang) class WITSTT(TokenSTT):
{"golden_diff": "diff --git a/mycroft/stt/__init__.py b/mycroft/stt/__init__.py\n--- a/mycroft/stt/__init__.py\n+++ b/mycroft/stt/__init__.py\n@@ -71,7 +71,7 @@\n \n def execute(self, audio, language=None):\n self.lang = language or self.lang\n- return self.recognizer.recognize_google(audio, self.token, s)\n+ return self.recognizer.recognize_google(audio, self.token, self.lang)\n \n \n class WITSTT(TokenSTT):\n", "issue": "Typing error in recognize_google() methode\nIn mycroft/stt/\\_\\_init\\_\\_.py line 74 :\r\nReplacing mistyped 's' parameter by self.lang fixed the problem.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\nfrom abc import ABCMeta, abstractmethod\n\nfrom speech_recognition import Recognizer\n\nfrom mycroft.api import STTApi\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.util.log import getLogger\n\n__author__ = \"jdorleans\"\n\nLOG = getLogger(\"STT\")\n\n\nclass STT(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n config_core = ConfigurationManager.get()\n self.lang = str(self.init_language(config_core))\n config_stt = config_core.get(\"stt\", {})\n self.config = config_stt.get(config_stt.get(\"module\"), {})\n self.credential = self.config.get(\"credential\", {})\n self.recognizer = Recognizer()\n\n @staticmethod\n def init_language(config_core):\n langs = config_core.get(\"lang\", \"en-US\").split(\"-\")\n return langs[0].lower() + \"-\" + langs[1].upper()\n\n @abstractmethod\n def execute(self, audio, language=None):\n pass\n\n\nclass TokenSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(TokenSTT, self).__init__()\n self.token = str(self.credential.get(\"token\"))\n\n\nclass BasicSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(BasicSTT, self).__init__()\n self.username = str(self.credential.get(\"username\"))\n self.password = str(self.credential.get(\"password\"))\n\n\nclass GoogleSTT(TokenSTT):\n def __init__(self):\n super(GoogleSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_google(audio, self.token, s)\n\n\nclass WITSTT(TokenSTT):\n def __init__(self):\n super(WITSTT, self).__init__()\n\n def execute(self, audio, language=None):\n LOG.warn(\"WITSTT language should be configured at wit.ai settings.\")\n return self.recognizer.recognize_wit(audio, self.token)\n\n\nclass IBMSTT(BasicSTT):\n def __init__(self):\n super(IBMSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_ibm(audio, self.username,\n self.password, self.lang)\n\n\nclass MycroftSTT(STT):\n def __init__(self):\n super(MycroftSTT, self).__init__()\n self.api = STTApi()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]\n\n\nclass STTFactory(object):\n CLASSES = {\n \"mycroft\": MycroftSTT,\n \"google\": GoogleSTT,\n \"wit\": WITSTT,\n \"ibm\": IBMSTT\n }\n\n @staticmethod\n def create():\n config = ConfigurationManager.get().get(\"stt\", {})\n module = config.get(\"module\", \"mycroft\")\n clazz = STTFactory.CLASSES.get(module)\n return clazz()\n", "path": "mycroft/stt/__init__.py"}]}
1,716
124
gh_patches_debug_18654
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-430
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move dev dependencies from setup.py to Pipfile The dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`. Move dev dependencies from setup.py to Pipfile The dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 """ Setup.py """ 3 4 import os 5 import sys 6 7 from setuptools import find_packages, setup 8 9 # Add source directory to PATH variable to enable import of version number 10 sys.path.append(os.path.abspath('src')) 11 # pylint: disable=wrong-import-position 12 from backend.settings import VERSION 13 14 setup( 15 name='integreat_cms', 16 version=VERSION, 17 packages=find_packages('src'), 18 package_dir={'': 'src'}, 19 include_package_data=True, 20 scripts=['src/integreat-cms-cli'], 21 data_files=[ 22 (f'lib/integreat-{root}', [os.path.join(root, f) for f in files]) 23 for root, _, files in os.walk('src/cms/templates/') 24 ] + [ 25 (f'lib/integreat-{root}', [os.path.join(root, f) for f in files]) 26 for root, _, files in os.walk('src/cms/static/') 27 ] + [ 28 ('usr/lib/systemd/system/', ['systemd/[email protected]']) 29 ], 30 install_requires=[ 31 'cffi', 32 'Django~=2.2.13', 33 'django-cors-headers', 34 'django-filer', 35 'django-mptt', 36 'django-widget-tweaks', 37 'idna', 38 'lxml', 39 'psycopg2-binary', 40 'python-dateutil', 41 'requests', 42 'rules', 43 'six', 44 'webauthn', 45 ], 46 extras_require={ 47 'dev': [ 48 'django-compressor', 49 'django-compressor-toolkit', 50 'packaging', 51 'pylint', 52 'pylint-django', 53 'pylint_runner', 54 'sphinx', 55 'sphinxcontrib-django', 56 'sphinx_rtd_theme', 57 'coverage', 58 'django_coverage_plugin', 59 ] 60 }, 61 author='Integreat App Project', 62 author_email='[email protected]', 63 description='Content Management System for the Integreat App', 64 license='GPL-2.0-or-later', 65 keywords='Django Integreat CMS', 66 url='http://github.com/Integreat/', 67 classifiers=[ 68 'Development Status :: 5 - Production/Stable', 69 'Intended Audience :: Developers', 70 'Programming Language :: Python :: 3.7', 71 ] 72 ) 73 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ from backend.settings import VERSION setup( - name='integreat_cms', + name='integreat-cms', version=VERSION, packages=find_packages('src'), package_dir={'': 'src'}, @@ -43,21 +43,6 @@ 'six', 'webauthn', ], - extras_require={ - 'dev': [ - 'django-compressor', - 'django-compressor-toolkit', - 'packaging', - 'pylint', - 'pylint-django', - 'pylint_runner', - 'sphinx', - 'sphinxcontrib-django', - 'sphinx_rtd_theme', - 'coverage', - 'django_coverage_plugin', - ] - }, author='Integreat App Project', author_email='[email protected]', description='Content Management System for the Integreat App',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n from backend.settings import VERSION\n \n setup(\n- name='integreat_cms',\n+ name='integreat-cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n@@ -43,21 +43,6 @@\n 'six',\n 'webauthn',\n ],\n- extras_require={\n- 'dev': [\n- 'django-compressor',\n- 'django-compressor-toolkit',\n- 'packaging',\n- 'pylint',\n- 'pylint-django',\n- 'pylint_runner',\n- 'sphinx',\n- 'sphinxcontrib-django',\n- 'sphinx_rtd_theme',\n- 'coverage',\n- 'django_coverage_plugin',\n- ]\n- },\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n", "issue": "Move dev dependencies from setup.py to Pipfile\nThe dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.\nMove dev dependencies from setup.py to Pipfile\nThe dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\" Setup.py \"\"\"\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Add source directory to PATH variable to enable import of version number\nsys.path.append(os.path.abspath('src'))\n# pylint: disable=wrong-import-position\nfrom backend.settings import VERSION\n\nsetup(\n name='integreat_cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n scripts=['src/integreat-cms-cli'],\n data_files=[\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/templates/')\n ] + [\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/static/')\n ] + [\n ('usr/lib/systemd/system/', ['systemd/[email protected]'])\n ],\n install_requires=[\n 'cffi',\n 'Django~=2.2.13',\n 'django-cors-headers',\n 'django-filer',\n 'django-mptt',\n 'django-widget-tweaks',\n 'idna',\n 'lxml',\n 'psycopg2-binary',\n 'python-dateutil',\n 'requests',\n 'rules',\n 'six',\n 'webauthn',\n ],\n extras_require={\n 'dev': [\n 'django-compressor',\n 'django-compressor-toolkit',\n 'packaging',\n 'pylint',\n 'pylint-django',\n 'pylint_runner',\n 'sphinx',\n 'sphinxcontrib-django',\n 'sphinx_rtd_theme',\n 'coverage',\n 'django_coverage_plugin',\n ]\n },\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n license='GPL-2.0-or-later',\n keywords='Django Integreat CMS',\n url='http://github.com/Integreat/',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.7',\n ]\n)\n", "path": "setup.py"}]}
1,329
236
gh_patches_debug_9950
rasdani/github-patches
git_diff
ManimCommunity__manim-684
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add (opengraph) metadata to documentation Previews to links to the documentation are currently not available due to missing opengraph metadata. Also, a description meta tag should be added. </issue> <code> [start of docs/source/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 13 import os 14 import subprocess 15 import sys 16 from distutils.sysconfig import get_python_lib 17 from pathlib import Path 18 19 sys.path.insert(0, os.path.abspath(".")) 20 21 22 if os.environ.get("READTHEDOCS") == "True": 23 site_path = get_python_lib() 24 # bindings for pangocffi, cairocffi, pangocairocffi need to be generated 25 subprocess.run(["python", "pangocffi/ffi_build.py"], cwd=site_path) 26 subprocess.run(["python", "cairocffi/ffi_build.py"], cwd=site_path) 27 subprocess.run(["python", "pangocairocffi/ffi_build.py"], cwd=site_path) 28 # we need to add ffmpeg to the path 29 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries") 30 # the included binary is named ffmpeg-linux..., create a symlink 31 [ffmpeg_bin] = [ 32 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-") 33 ] 34 os.symlink( 35 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg") 36 ) 37 os.environ["PATH"] += os.pathsep + ffmpeg_path 38 39 40 # -- Project information ----------------------------------------------------- 41 42 project = "Manim" 43 copyright = "2020, The Manim Community Dev Team" 44 author = "The Manim Community Dev Team" 45 46 47 # -- General configuration --------------------------------------------------- 48 49 # Add any Sphinx extension module names here, as strings. They can be 50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 51 # ones. 52 extensions = [ 53 "sphinx.ext.autodoc", 54 "recommonmark", 55 "sphinx_copybutton", 56 "sphinx.ext.napoleon", 57 "sphinx.ext.autosummary", 58 "sphinx.ext.doctest", 59 "manim_directive", 60 ] 61 62 # Automatically generate stub pages when using the .. autosummary directive 63 autosummary_generate = True 64 65 # controls whether functions documented by the autofunction directive 66 # appear with their full module names 67 add_module_names = False 68 69 # Add any paths that contain templates here, relative to this directory. 70 templates_path = ["_templates"] 71 72 # List of patterns, relative to source directory, that match files and 73 # directories to ignore when looking for source files. 74 # This pattern also affects html_static_path and html_extra_path. 75 exclude_patterns = [] 76 77 78 # -- Options for HTML output ------------------------------------------------- 79 80 # The theme to use for HTML and HTML Help pages. See the documentation for 81 # a list of builtin themes. 82 # 83 import guzzle_sphinx_theme 84 85 html_theme_path = guzzle_sphinx_theme.html_theme_path() 86 html_theme = "guzzle_sphinx_theme" 87 html_favicon = str(Path("_static/favicon.ico")) 88 89 # There's a standing issue with Sphinx's new-style sidebars. This is a 90 # workaround. Taken from 91 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826 92 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]} 93 94 # Register the theme as an extension to generate a sitemap.xml 95 extensions.append("guzzle_sphinx_theme") 96 97 # Add any paths that contain custom static files (such as style sheets) here, 98 # relative to this directory. They are copied after the builtin static files, 99 # so a file named "default.css" will overwrite the builtin "default.css". 100 html_static_path = ["_static"] 101 102 # This specifies any additional css files that will override the theme's 103 html_css_files = ["custom.css"] 104 [end of docs/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -56,6 +56,7 @@ "sphinx.ext.napoleon", "sphinx.ext.autosummary", "sphinx.ext.doctest", + "sphinxext.opengraph", "manim_directive", ] @@ -101,3 +102,8 @@ # This specifies any additional css files that will override the theme's html_css_files = ["custom.css"] + +# opengraph settings +ogp_image = "https://www.manim.community/logo.png" +ogp_site_name = "Manim Community | Documentation" +ogp_site_url = "https://docs.manim.community/"
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -56,6 +56,7 @@\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n+ \"sphinxext.opengraph\",\n \"manim_directive\",\n ]\n \n@@ -101,3 +102,8 @@\n \n # This specifies any additional css files that will override the theme's\n html_css_files = [\"custom.css\"]\n+\n+# opengraph settings\n+ogp_image = \"https://www.manim.community/logo.png\"\n+ogp_site_name = \"Manim Community | Documentation\"\n+ogp_site_url = \"https://docs.manim.community/\"\n", "issue": "Add (opengraph) metadata to documentation\nPreviews to links to the documentation are currently not available due to missing opengraph metadata.\r\n\r\nAlso, a description meta tag should be added.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n", "path": "docs/source/conf.py"}]}
1,671
172
gh_patches_debug_11488
rasdani/github-patches
git_diff
pytorch__vision-355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> utils.save_image fails when passing list of images utils.save_image fails when passing in a list of images, as the code tries to call .cpu on the list. Passing in a list should be possible according to the function's documentation. </issue> <code> [start of torchvision/utils.py] 1 import torch 2 import math 3 irange = range 4 5 6 def make_grid(tensor, nrow=8, padding=2, 7 normalize=False, range=None, scale_each=False, pad_value=0): 8 """Make a grid of images. 9 10 Args: 11 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) 12 or a list of images all of the same size. 13 nrow (int, optional): Number of images displayed in each row of the grid. 14 The Final grid size is (B / nrow, nrow). Default is 8. 15 padding (int, optional): amount of padding. Default is 2. 16 normalize (bool, optional): If True, shift the image to the range (0, 1), 17 by subtracting the minimum and dividing by the maximum pixel value. 18 range (tuple, optional): tuple (min, max) where min and max are numbers, 19 then these numbers are used to normalize the image. By default, min and max 20 are computed from the tensor. 21 scale_each (bool, optional): If True, scale each image in the batch of 22 images separately rather than the (min, max) over all images. 23 pad_value (float, optional): Value for the padded pixels. 24 25 Example: 26 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_ 27 28 """ 29 if not (torch.is_tensor(tensor) or 30 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): 31 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) 32 33 # if list of tensors, convert to a 4D mini-batch Tensor 34 if isinstance(tensor, list): 35 tensor = torch.stack(tensor, dim=0) 36 37 if tensor.dim() == 2: # single image H x W 38 tensor = tensor.view(1, tensor.size(0), tensor.size(1)) 39 if tensor.dim() == 3: # single image 40 if tensor.size(0) == 1: # if single-channel, convert to 3-channel 41 tensor = torch.cat((tensor, tensor, tensor), 0) 42 return tensor 43 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images 44 tensor = torch.cat((tensor, tensor, tensor), 1) 45 46 if normalize is True: 47 tensor = tensor.clone() # avoid modifying tensor in-place 48 if range is not None: 49 assert isinstance(range, tuple), \ 50 "range has to be a tuple (min, max) if specified. min and max are numbers" 51 52 def norm_ip(img, min, max): 53 img.clamp_(min=min, max=max) 54 img.add_(-min).div_(max - min) 55 56 def norm_range(t, range): 57 if range is not None: 58 norm_ip(t, range[0], range[1]) 59 else: 60 norm_ip(t, t.min(), t.max()) 61 62 if scale_each is True: 63 for t in tensor: # loop over mini-batch dimension 64 norm_range(t, range) 65 else: 66 norm_range(tensor, range) 67 68 # make the mini-batch of images into a grid 69 nmaps = tensor.size(0) 70 xmaps = min(nrow, nmaps) 71 ymaps = int(math.ceil(float(nmaps) / xmaps)) 72 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) 73 grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value) 74 k = 0 75 for y in irange(ymaps): 76 for x in irange(xmaps): 77 if k >= nmaps: 78 break 79 grid.narrow(1, y * height + padding, height - padding)\ 80 .narrow(2, x * width + padding, width - padding)\ 81 .copy_(tensor[k]) 82 k = k + 1 83 return grid 84 85 86 def save_image(tensor, filename, nrow=8, padding=2, 87 normalize=False, range=None, scale_each=False, pad_value=0): 88 """Save a given Tensor into an image file. 89 90 Args: 91 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, 92 saves the tensor as a grid of images by calling ``make_grid``. 93 **kwargs: Other arguments are documented in ``make_grid``. 94 """ 95 from PIL import Image 96 tensor = tensor.cpu() 97 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, 98 normalize=normalize, range=range, scale_each=scale_each) 99 ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy() 100 im = Image.fromarray(ndarr) 101 im.save(filename) 102 [end of torchvision/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/utils.py b/torchvision/utils.py --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -93,9 +93,8 @@ **kwargs: Other arguments are documented in ``make_grid``. """ from PIL import Image - tensor = tensor.cpu() grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value, normalize=normalize, range=range, scale_each=scale_each) - ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy() + ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy() im = Image.fromarray(ndarr) im.save(filename)
{"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -93,9 +93,8 @@\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n- tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n- ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()\n+ ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "issue": "utils.save_image fails when passing list of images\nutils.save_image fails when passing in a list of images, as the code tries to call .cpu on the list. \r\nPassing in a list should be possible according to the function's documentation.\n", "before_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n return tensor\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}]}
1,889
199
gh_patches_debug_35355
rasdani/github-patches
git_diff
scikit-image__scikit-image-2134
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `min_size` is not strictly conformed in the implementation of felzenszwalb ## Description With `min_size` specified, there're still some segments with sizes that less than it. I don't know if it is an inherent flaw of the algorithm. ## Way to reproduce ``` >>> I = skimage.io.imread('dragonbaby.jpg') >>> fz = felzenszwalb(I, scale=300, sigma=0.8, min_size=80) >>> (fz==9).sum() 1 ``` ![dragonbaby](https://cloud.githubusercontent.com/assets/7010007/14789864/39cd4a56-0ad4-11e6-88a4-235ebdd1e2fd.jpg) </issue> <code> [start of skimage/segmentation/_felzenszwalb.py] 1 import numpy as np 2 3 from .._shared.utils import warn 4 from ._felzenszwalb_cy import _felzenszwalb_grey 5 6 7 def felzenszwalb(image, scale=1, sigma=0.8, min_size=20): 8 """Computes Felsenszwalb's efficient graph based image segmentation. 9 10 Produces an oversegmentation of a multichannel (i.e. RGB) image 11 using a fast, minimum spanning tree based clustering on the image grid. 12 The parameter ``scale`` sets an observation level. Higher scale means 13 less and larger segments. ``sigma`` is the diameter of a Gaussian kernel, 14 used for smoothing the image prior to segmentation. 15 16 The number of produced segments as well as their size can only be 17 controlled indirectly through ``scale``. Segment size within an image can 18 vary greatly depending on local contrast. 19 20 For RGB images, the algorithm computes a separate segmentation for each 21 channel and then combines these. The combined segmentation is the 22 intersection of the separate segmentations on the color channels. 23 24 Parameters 25 ---------- 26 image : (width, height, 3) or (width, height) ndarray 27 Input image. 28 scale : float 29 Free parameter. Higher means larger clusters. 30 sigma : float 31 Width of Gaussian kernel used in preprocessing. 32 min_size : int 33 Minimum component size. Enforced using postprocessing. 34 35 Returns 36 ------- 37 segment_mask : (width, height) ndarray 38 Integer mask indicating segment labels. 39 40 References 41 ---------- 42 .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and 43 Huttenlocher, D.P. International Journal of Computer Vision, 2004 44 45 Examples 46 -------- 47 >>> from skimage.segmentation import felzenszwalb 48 >>> from skimage.data import coffee 49 >>> img = coffee() 50 >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5) 51 """ 52 53 if image.ndim == 2: 54 # assume single channel image 55 return _felzenszwalb_grey(image, scale=scale, sigma=sigma, 56 min_size=min_size) 57 58 elif image.ndim != 3: 59 raise ValueError("Felzenswalb segmentation can only operate on RGB and" 60 " grey images, but input array of ndim %d given." 61 % image.ndim) 62 63 # assume we got 2d image with multiple channels 64 n_channels = image.shape[2] 65 if n_channels != 3: 66 warn("Got image with %d channels. Is that really what you" 67 " wanted?" % image.shape[2]) 68 segmentations = [] 69 # compute quickshift for each channel 70 for c in range(n_channels): 71 channel = np.ascontiguousarray(image[:, :, c]) 72 s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma, 73 min_size=min_size) 74 segmentations.append(s) 75 76 # put pixels in same segment only if in the same segment in all images 77 # we do this by combining the channels to one number 78 n0 = segmentations[0].max() + 1 79 n1 = segmentations[1].max() + 1 80 segmentation = (segmentations[0] + segmentations[1] * n0 81 + segmentations[2] * n0 * n1) 82 # make segment labels consecutive numbers starting at 0 83 labels = np.unique(segmentation, return_inverse=True)[1] 84 return labels.reshape(image.shape[:2]) 85 [end of skimage/segmentation/_felzenszwalb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/segmentation/_felzenszwalb.py b/skimage/segmentation/_felzenszwalb.py --- a/skimage/segmentation/_felzenszwalb.py +++ b/skimage/segmentation/_felzenszwalb.py @@ -1,7 +1,7 @@ import numpy as np from .._shared.utils import warn -from ._felzenszwalb_cy import _felzenszwalb_grey +from ._felzenszwalb_cy import _felzenszwalb_cython def felzenszwalb(image, scale=1, sigma=0.8, min_size=20): @@ -17,9 +17,8 @@ controlled indirectly through ``scale``. Segment size within an image can vary greatly depending on local contrast. - For RGB images, the algorithm computes a separate segmentation for each - channel and then combines these. The combined segmentation is the - intersection of the separate segmentations on the color channels. + For RGB images, the algorithm uses the euclidean distance between pixels in + color space. Parameters ---------- @@ -50,35 +49,6 @@ >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5) """ - if image.ndim == 2: - # assume single channel image - return _felzenszwalb_grey(image, scale=scale, sigma=sigma, - min_size=min_size) - - elif image.ndim != 3: - raise ValueError("Felzenswalb segmentation can only operate on RGB and" - " grey images, but input array of ndim %d given." - % image.ndim) - - # assume we got 2d image with multiple channels - n_channels = image.shape[2] - if n_channels != 3: - warn("Got image with %d channels. Is that really what you" - " wanted?" % image.shape[2]) - segmentations = [] - # compute quickshift for each channel - for c in range(n_channels): - channel = np.ascontiguousarray(image[:, :, c]) - s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma, - min_size=min_size) - segmentations.append(s) - - # put pixels in same segment only if in the same segment in all images - # we do this by combining the channels to one number - n0 = segmentations[0].max() + 1 - n1 = segmentations[1].max() + 1 - segmentation = (segmentations[0] + segmentations[1] * n0 - + segmentations[2] * n0 * n1) - # make segment labels consecutive numbers starting at 0 - labels = np.unique(segmentation, return_inverse=True)[1] - return labels.reshape(image.shape[:2]) + image = np.atleast_3d(image) + return _felzenszwalb_cython(image, scale=scale, sigma=sigma, + min_size=min_size)
{"golden_diff": "diff --git a/skimage/segmentation/_felzenszwalb.py b/skimage/segmentation/_felzenszwalb.py\n--- a/skimage/segmentation/_felzenszwalb.py\n+++ b/skimage/segmentation/_felzenszwalb.py\n@@ -1,7 +1,7 @@\n import numpy as np\n \n from .._shared.utils import warn\n-from ._felzenszwalb_cy import _felzenszwalb_grey\n+from ._felzenszwalb_cy import _felzenszwalb_cython\n \n \n def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n@@ -17,9 +17,8 @@\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n \n- For RGB images, the algorithm computes a separate segmentation for each\n- channel and then combines these. The combined segmentation is the\n- intersection of the separate segmentations on the color channels.\n+ For RGB images, the algorithm uses the euclidean distance between pixels in\n+ color space.\n \n Parameters\n ----------\n@@ -50,35 +49,6 @@\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n \n- if image.ndim == 2:\n- # assume single channel image\n- return _felzenszwalb_grey(image, scale=scale, sigma=sigma,\n- min_size=min_size)\n-\n- elif image.ndim != 3:\n- raise ValueError(\"Felzenswalb segmentation can only operate on RGB and\"\n- \" grey images, but input array of ndim %d given.\"\n- % image.ndim)\n-\n- # assume we got 2d image with multiple channels\n- n_channels = image.shape[2]\n- if n_channels != 3:\n- warn(\"Got image with %d channels. Is that really what you\"\n- \" wanted?\" % image.shape[2])\n- segmentations = []\n- # compute quickshift for each channel\n- for c in range(n_channels):\n- channel = np.ascontiguousarray(image[:, :, c])\n- s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,\n- min_size=min_size)\n- segmentations.append(s)\n-\n- # put pixels in same segment only if in the same segment in all images\n- # we do this by combining the channels to one number\n- n0 = segmentations[0].max() + 1\n- n1 = segmentations[1].max() + 1\n- segmentation = (segmentations[0] + segmentations[1] * n0\n- + segmentations[2] * n0 * n1)\n- # make segment labels consecutive numbers starting at 0\n- labels = np.unique(segmentation, return_inverse=True)[1]\n- return labels.reshape(image.shape[:2])\n+ image = np.atleast_3d(image)\n+ return _felzenszwalb_cython(image, scale=scale, sigma=sigma,\n+ min_size=min_size)\n", "issue": "`min_size` is not strictly conformed in the implementation of felzenszwalb\n## Description\n\nWith `min_size` specified, there're still some segments with sizes that less than it. I don't know if it is an inherent flaw of the algorithm.\n## Way to reproduce\n\n```\n>>> I = skimage.io.imread('dragonbaby.jpg')\n>>> fz = felzenszwalb(I, scale=300, sigma=0.8, min_size=80)\n>>> (fz==9).sum()\n1\n```\n\n![dragonbaby](https://cloud.githubusercontent.com/assets/7010007/14789864/39cd4a56-0ad4-11e6-88a4-235ebdd1e2fd.jpg)\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .._shared.utils import warn\nfrom ._felzenszwalb_cy import _felzenszwalb_grey\n\n\ndef felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n \"\"\"Computes Felsenszwalb's efficient graph based image segmentation.\n\n Produces an oversegmentation of a multichannel (i.e. RGB) image\n using a fast, minimum spanning tree based clustering on the image grid.\n The parameter ``scale`` sets an observation level. Higher scale means\n less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,\n used for smoothing the image prior to segmentation.\n\n The number of produced segments as well as their size can only be\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n\n For RGB images, the algorithm computes a separate segmentation for each\n channel and then combines these. The combined segmentation is the\n intersection of the separate segmentations on the color channels.\n\n Parameters\n ----------\n image : (width, height, 3) or (width, height) ndarray\n Input image.\n scale : float\n Free parameter. Higher means larger clusters.\n sigma : float\n Width of Gaussian kernel used in preprocessing.\n min_size : int\n Minimum component size. Enforced using postprocessing.\n\n Returns\n -------\n segment_mask : (width, height) ndarray\n Integer mask indicating segment labels.\n\n References\n ----------\n .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and\n Huttenlocher, D.P. International Journal of Computer Vision, 2004\n\n Examples\n --------\n >>> from skimage.segmentation import felzenszwalb\n >>> from skimage.data import coffee\n >>> img = coffee()\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n\n if image.ndim == 2:\n # assume single channel image\n return _felzenszwalb_grey(image, scale=scale, sigma=sigma,\n min_size=min_size)\n\n elif image.ndim != 3:\n raise ValueError(\"Felzenswalb segmentation can only operate on RGB and\"\n \" grey images, but input array of ndim %d given.\"\n % image.ndim)\n\n # assume we got 2d image with multiple channels\n n_channels = image.shape[2]\n if n_channels != 3:\n warn(\"Got image with %d channels. Is that really what you\"\n \" wanted?\" % image.shape[2])\n segmentations = []\n # compute quickshift for each channel\n for c in range(n_channels):\n channel = np.ascontiguousarray(image[:, :, c])\n s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,\n min_size=min_size)\n segmentations.append(s)\n\n # put pixels in same segment only if in the same segment in all images\n # we do this by combining the channels to one number\n n0 = segmentations[0].max() + 1\n n1 = segmentations[1].max() + 1\n segmentation = (segmentations[0] + segmentations[1] * n0\n + segmentations[2] * n0 * n1)\n # make segment labels consecutive numbers starting at 0\n labels = np.unique(segmentation, return_inverse=True)[1]\n return labels.reshape(image.shape[:2])\n", "path": "skimage/segmentation/_felzenszwalb.py"}]}
1,670
712
gh_patches_debug_248
rasdani/github-patches
git_diff
statsmodels__statsmodels-3976
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The compat modules should use absolute imports The [statsmodels.compat.collections](https://github.com/statsmodels/statsmodels/blob/a88830efc3a99cfbe0ebc9fbfd77820fe748fc59/statsmodels/compat/collections.py#L7) imports the namesake standard library module without requesting absolute imports. While it seems to work in many cases, it causes a problem to packages that override `__import__`. See enlnt/pyq#18. Please consider adding ```python from __future__ import absolute_import ``` to the compat modules. </issue> <code> [start of statsmodels/compat/collections.py] 1 '''backported compatibility functions for Python's collections 2 3 ''' 4 5 try: 6 #python >= 2.7 7 from collections import OrderedDict 8 except ImportError: 9 #http://code.activestate.com/recipes/576693/ 10 #author: Raymond Hettinger 11 from .ordereddict import OrderedDict 12 13 try: 14 #python >= 2.7 15 from collections import Counter 16 except ImportError: 17 #http://code.activestate.com/recipes/576611/ 18 #author: Raymond Hettinger 19 from .counter import Counter 20 [end of statsmodels/compat/collections.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/statsmodels/compat/collections.py b/statsmodels/compat/collections.py --- a/statsmodels/compat/collections.py +++ b/statsmodels/compat/collections.py @@ -1,6 +1,7 @@ '''backported compatibility functions for Python's collections ''' +from __future__ import absolute_import try: #python >= 2.7
{"golden_diff": "diff --git a/statsmodels/compat/collections.py b/statsmodels/compat/collections.py\n--- a/statsmodels/compat/collections.py\n+++ b/statsmodels/compat/collections.py\n@@ -1,6 +1,7 @@\n '''backported compatibility functions for Python's collections\n \n '''\n+from __future__ import absolute_import\n \n try:\n #python >= 2.7\n", "issue": "The compat modules should use absolute imports\nThe [statsmodels.compat.collections](https://github.com/statsmodels/statsmodels/blob/a88830efc3a99cfbe0ebc9fbfd77820fe748fc59/statsmodels/compat/collections.py#L7) imports the namesake standard library module without requesting absolute imports. While it seems to work in many cases, it causes a problem to packages that override `__import__`. See enlnt/pyq#18.\r\n\r\nPlease consider adding\r\n\r\n```python\r\nfrom __future__ import absolute_import\r\n```\r\nto the compat modules.\n", "before_files": [{"content": "'''backported compatibility functions for Python's collections\n\n'''\n\ntry:\n #python >= 2.7\n from collections import OrderedDict\nexcept ImportError:\n #http://code.activestate.com/recipes/576693/\n #author: Raymond Hettinger\n from .ordereddict import OrderedDict\n\ntry:\n #python >= 2.7\n from collections import Counter\nexcept ImportError:\n #http://code.activestate.com/recipes/576611/\n #author: Raymond Hettinger\n from .counter import Counter\n", "path": "statsmodels/compat/collections.py"}]}
829
82
gh_patches_debug_22807
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-907
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Double in mapping thrown E7001 error *cfn-lint version: cfn-lint 0.20.1* *Description of issue.* When a mapping value is a double (ex. 1.1) it returns the error `E7001:Mapping [map] has invalid property at [property]` Examples: With double value: ![image](https://user-images.githubusercontent.com/2495780/57731384-887bc580-7668-11e9-998c-f983a1029716.png) Changed to Int: ![image](https://user-images.githubusercontent.com/2495780/57731469-b19c5600-7668-11e9-9521-20eadd2885a4.png) Example CFT: [environment.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3179852/environment.yaml.txt) </issue> <code> [start of src/cfnlint/rules/mappings/Configuration.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import six 18 from cfnlint import CloudFormationLintRule 19 from cfnlint import RuleMatch 20 21 22 class Configuration(CloudFormationLintRule): 23 """Check if Mappings are configured correctly""" 24 id = 'E7001' 25 shortdesc = 'Mappings are appropriately configured' 26 description = 'Check if Mappings are properly configured' 27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html' 28 tags = ['mappings'] 29 30 def match(self, cfn): 31 """Check CloudFormation Parameters""" 32 33 matches = [] 34 35 mappings = cfn.template.get('Mappings', {}) 36 if mappings: 37 for mapname, mapobj in mappings.items(): 38 if not isinstance(mapobj, dict): 39 message = 'Mapping {0} has invalid property' 40 matches.append(RuleMatch( 41 ['Mappings', mapname], 42 message.format(mapname) 43 )) 44 else: 45 for firstkey in mapobj: 46 firstkeyobj = mapobj[firstkey] 47 if not isinstance(firstkeyobj, dict): 48 message = 'Mapping {0} has invalid property at {1}' 49 matches.append(RuleMatch( 50 ['Mappings', mapname, firstkey], 51 message.format(mapname, firstkeyobj) 52 )) 53 else: 54 for secondkey in firstkeyobj: 55 if not isinstance( 56 firstkeyobj[secondkey], 57 (six.string_types, list, six.integer_types)): 58 message = 'Mapping {0} has invalid property at {1}' 59 matches.append(RuleMatch( 60 ['Mappings', mapname, firstkey, secondkey], 61 message.format(mapname, secondkey) 62 )) 63 64 return matches 65 [end of src/cfnlint/rules/mappings/Configuration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py --- a/src/cfnlint/rules/mappings/Configuration.py +++ b/src/cfnlint/rules/mappings/Configuration.py @@ -32,6 +32,8 @@ matches = [] + valid_map_types = (six.string_types, list, six.integer_types, float) + mappings = cfn.template.get('Mappings', {}) if mappings: for mapname, mapobj in mappings.items(): @@ -53,8 +55,7 @@ else: for secondkey in firstkeyobj: if not isinstance( - firstkeyobj[secondkey], - (six.string_types, list, six.integer_types)): + firstkeyobj[secondkey], valid_map_types): message = 'Mapping {0} has invalid property at {1}' matches.append(RuleMatch( ['Mappings', mapname, firstkey, secondkey],
{"golden_diff": "diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py\n--- a/src/cfnlint/rules/mappings/Configuration.py\n+++ b/src/cfnlint/rules/mappings/Configuration.py\n@@ -32,6 +32,8 @@\n \n matches = []\n \n+ valid_map_types = (six.string_types, list, six.integer_types, float)\n+\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n@@ -53,8 +55,7 @@\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n- firstkeyobj[secondkey],\n- (six.string_types, list, six.integer_types)):\n+ firstkeyobj[secondkey], valid_map_types):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n", "issue": "Double in mapping thrown E7001 error\n*cfn-lint version: cfn-lint 0.20.1*\r\n\r\n*Description of issue.*\r\nWhen a mapping value is a double (ex. 1.1) it returns the error `E7001:Mapping [map] has invalid property at [property]`\r\n\r\nExamples:\r\nWith double value:\r\n![image](https://user-images.githubusercontent.com/2495780/57731384-887bc580-7668-11e9-998c-f983a1029716.png)\r\n\r\nChanged to Int:\r\n![image](https://user-images.githubusercontent.com/2495780/57731469-b19c5600-7668-11e9-9521-20eadd2885a4.png)\r\n\r\nExample CFT: [environment.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3179852/environment.yaml.txt)\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n firstkeyobj[secondkey],\n (six.string_types, list, six.integer_types)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}]}
1,479
216
gh_patches_debug_29592
rasdani/github-patches
git_diff
e-valuation__EvaP-1484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Locked questionnaires failing in editor form #1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form. Steps to reproduce: 1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation. 2. Enable the evaluation for editor review. 3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field "General questionnaires" ("This field is required."). The locked questionnaire should count as a selected questionnaire and the form should be saved. A test should be added for this use case. </issue> <code> [start of evap/contributor/forms.py] 1 from datetime import datetime, timedelta 2 import logging 3 4 from django import forms 5 from django.conf import settings 6 from django.db.models import Q 7 from django.forms.widgets import CheckboxSelectMultiple 8 from django.utils.translation import gettext_lazy as _ 9 from evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField 10 from evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile 11 from evap.evaluation.tools import date_to_datetime 12 from evap.staff.forms import ContributionForm 13 14 logger = logging.getLogger(__name__) 15 16 17 class EvaluationForm(forms.ModelForm): 18 general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires")) 19 course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput()) 20 name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False) 21 name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False) 22 23 class Meta: 24 model = Evaluation 25 fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course') 26 27 def __init__(self, *args, **kwargs): 28 super().__init__(*args, **kwargs) 29 30 self.fields['name_de_field'].initial = self.instance.full_name_de 31 self.fields['name_en_field'].initial = self.instance.full_name_en 32 33 self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter( 34 Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct() 35 36 self.fields['vote_start_datetime'].localize = True 37 self.fields['vote_end_date'].localize = True 38 39 if self.instance.general_contribution: 40 self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()] 41 42 if not self.instance.allow_editors_to_edit: 43 for field in self._meta.fields: 44 self.fields[field].disabled = True 45 46 def clean(self): 47 super().clean() 48 49 vote_start_datetime = self.cleaned_data.get('vote_start_datetime') 50 vote_end_date = self.cleaned_data.get('vote_end_date') 51 if vote_start_datetime and vote_end_date: 52 if vote_start_datetime.date() > vote_end_date: 53 self.add_error("vote_start_datetime", "") 54 self.add_error("vote_end_date", _("The first day of evaluation must be before the last one.")) 55 56 def clean_vote_end_date(self): 57 vote_end_date = self.cleaned_data.get('vote_end_date') 58 59 # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date. 60 # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future. 61 if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now(): 62 raise forms.ValidationError(_("The last day of evaluation must be in the future.")) 63 return vote_end_date 64 65 def clean_general_questionnaires(self): 66 # Ensure all locked questionnaires still have the same status (included or not) 67 locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True) 68 69 not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs] 70 locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)] 71 72 return not_locked + locked 73 74 def save(self, *args, **kw): 75 evaluation = super().save(*args, **kw) 76 evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires')) 77 return evaluation 78 79 80 class EditorContributionForm(ContributionForm): 81 def __init__(self, *args, **kwargs): 82 super().__init__(*args, **kwargs) 83 84 existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None 85 86 self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter( 87 Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct() 88 self.fields['contributor'].queryset = UserProfile.objects.filter( 89 (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk) 90 ) 91 92 93 class DelegatesForm(forms.ModelForm): 94 delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True), 95 required=False) 96 97 class Meta: 98 model = UserProfile 99 fields = ('delegates',) 100 field_classes = { 101 'delegates': UserModelMultipleChoiceField, 102 } 103 104 def __init__(self, *args, **kwargs): 105 super().__init__(*args, **kwargs) 106 107 def save(self, *args, **kw): 108 super().save(*args, **kw) 109 logger.info('User "{}" edited the settings.'.format(self.instance.email)) 110 111 112 class DelegateSelectionForm(forms.Form): 113 delegate_to = UserModelChoiceField(label=_("Delegate to"), 114 queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True)) 115 [end of evap/contributor/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py --- a/evap/contributor/forms.py +++ b/evap/contributor/forms.py @@ -15,7 +15,7 @@ class EvaluationForm(forms.ModelForm): - general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires")) + general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_("General questionnaires")) course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput()) name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False) name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False) @@ -64,10 +64,14 @@ def clean_general_questionnaires(self): # Ensure all locked questionnaires still have the same status (included or not) - locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True) + not_locked = [] + if self.cleaned_data.get('general_questionnaires'): + not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False)) - not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs] - locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)] + locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True)) + + if not not_locked + locked: + self.add_error("general_questionnaires", _("At least one questionnaire must be selected.")) return not_locked + locked
{"golden_diff": "diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py\n--- a/evap/contributor/forms.py\n+++ b/evap/contributor/forms.py\n@@ -15,7 +15,7 @@\n \n \n class EvaluationForm(forms.ModelForm):\n- general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n+ general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n@@ -64,10 +64,14 @@\n \n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n- locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)\n+ not_locked = []\n+ if self.cleaned_data.get('general_questionnaires'):\n+ not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False))\n \n- not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]\n- locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]\n+ locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True))\n+\n+ if not not_locked + locked:\n+ self.add_error(\"general_questionnaires\", _(\"At least one questionnaire must be selected.\"))\n \n return not_locked + locked\n", "issue": "Locked questionnaires failing in editor form\n#1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form.\r\n\r\nSteps to reproduce:\r\n1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation.\r\n2. Enable the evaluation for editor review.\r\n3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field \"General questionnaires\" (\"This field is required.\").\r\n\r\nThe locked questionnaire should count as a selected questionnaire and the form should be saved.\r\nA test should be added for this use case.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nimport logging\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom django.utils.translation import gettext_lazy as _\nfrom evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField\nfrom evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile\nfrom evap.evaluation.tools import date_to_datetime\nfrom evap.staff.forms import ContributionForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass EvaluationForm(forms.ModelForm):\n general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n\n class Meta:\n model = Evaluation\n fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['name_de_field'].initial = self.instance.full_name_de\n self.fields['name_en_field'].initial = self.instance.full_name_en\n\n self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()\n\n self.fields['vote_start_datetime'].localize = True\n self.fields['vote_end_date'].localize = True\n\n if self.instance.general_contribution:\n self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n if not self.instance.allow_editors_to_edit:\n for field in self._meta.fields:\n self.fields[field].disabled = True\n\n def clean(self):\n super().clean()\n\n vote_start_datetime = self.cleaned_data.get('vote_start_datetime')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_datetime and vote_end_date:\n if vote_start_datetime.date() > vote_end_date:\n self.add_error(\"vote_start_datetime\", \"\")\n self.add_error(\"vote_end_date\", _(\"The first day of evaluation must be before the last one.\"))\n\n def clean_vote_end_date(self):\n vote_end_date = self.cleaned_data.get('vote_end_date')\n\n # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.\n # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.\n if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():\n raise forms.ValidationError(_(\"The last day of evaluation must be in the future.\"))\n return vote_end_date\n\n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)\n\n not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]\n locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]\n\n return not_locked + locked\n\n def save(self, *args, **kw):\n evaluation = super().save(*args, **kw)\n evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))\n return evaluation\n\n\nclass EditorContributionForm(ContributionForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()\n self.fields['contributor'].queryset = UserProfile.objects.filter(\n (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)\n )\n\n\nclass DelegatesForm(forms.ModelForm):\n delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),\n required=False)\n\n class Meta:\n model = UserProfile\n fields = ('delegates',)\n field_classes = {\n 'delegates': UserModelMultipleChoiceField,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n logger.info('User \"{}\" edited the settings.'.format(self.instance.email))\n\n\nclass DelegateSelectionForm(forms.Form):\n delegate_to = UserModelChoiceField(label=_(\"Delegate to\"),\n queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))\n", "path": "evap/contributor/forms.py"}]}
2,047
385
gh_patches_debug_25270
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2635
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests [DOC]: the sphinx theme is too old ### 📚 The doc issue As stated in #2579 , we want to use Read the Docs to host our documentation. In this way, tutorials and API documentations will be visited from a single entry. This issue will mainly discuss the appearance of the RTD website. Ideally, we should use Tailwind for style consistency. However, it can take some time to implement a tailwind-based theme, therefore, we should use an existing theme which looks more modern first. </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 import datetime 10 # If extensions (or modules to document with autodoc) are in another directory, 11 # add these directories to sys.path here. If the directory is relative to the 12 # documentation root, use os.path.abspath to make it absolute, like shown here. 13 # 14 import os 15 import sys 16 17 sys.path.insert(0, os.path.abspath('..')) 18 19 # -- Project information ----------------------------------------------------- 20 21 project = 'Colossal-AI' 22 copyright = f'{datetime.datetime.now().year}, HPC-AI Tech' 23 author = 'HPC-AI Technology Inc.' 24 25 # The full version, including alpha/beta/rc tags 26 release = '0.0.1' 27 28 29 # -- General configuration --------------------------------------------------- 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = [ 35 'sphinx.ext.autodoc', 36 'sphinx.ext.mathjax', 37 'sphinx.ext.napoleon', 38 'sphinx.ext.linkcode', 39 'myst_parser', 40 ] 41 42 # Disable docstring inheritance 43 autodoc_inherit_docstrings = False 44 45 # Disable displaying type annotations, these can be very verbose 46 autodoc_typehints = 'none' 47 48 # Enable overriding of function signatures in the first line of the docstring. 49 autodoc_docstring_signature = True 50 autodoc_default_options = { 51 'member-order': 'bysource', 52 } 53 54 # Add any paths that contain templates here, relative to this directory. 55 templates_path = ['_templates'] 56 57 # List of patterns, relative to source directory, that match files and 58 # directories to ignore when looking for source files. 59 # This pattern also affects html_static_path and html_extra_path. 60 exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store'] 61 62 # -- Options for HTML output ------------------------------------------------- 63 64 # The theme to use for HTML and HTML Help pages. See the documentation for 65 # a list of builtin themes. 66 # 67 html_theme = 'sphinx_rtd_theme' 68 html_show_sourcelink = False 69 html_theme_options = { 70 'navigation_depth': 3, 71 } 72 73 html_context = { 74 'display_github': False, 75 'github_user': 'hpcaitech', 76 'github_repo': 'ColossalAI', 77 # 'github_version': 'master/docs/', 78 } 79 80 # Add any paths that contain custom static files (such as style sheets) here, 81 # relative to this directory. They are copied after the builtin static files, 82 # so a file named "default.css" will overwrite the builtin "default.css". 83 html_static_path = ['_static'] 84 85 html_css_files = [ 86 'css/rtd_theme.css', 87 ] 88 89 # -- Extension configuration ------------------------------------------------- 90 source_suffix = ['.rst', '.md', '.MD'] 91 92 import inspect 93 import colossalai 94 def linkcode_resolve(domain, info): 95 """ 96 Determine the URL corresponding to Python object 97 """ 98 if domain != 'py': 99 return None 100 101 modname = info['module'] 102 fullname = info['fullname'] 103 104 submod = sys.modules.get(modname) 105 if submod is None: 106 return None 107 108 obj = submod 109 for part in fullname.split('.'): 110 try: 111 obj = getattr(obj, part) 112 except Exception: 113 return None 114 115 try: 116 fn = inspect.getsourcefile(obj) 117 except Exception: 118 fn = None 119 if not fn: 120 return None 121 122 try: 123 source, lineno = inspect.findsource(obj) 124 except Exception: 125 lineno = None 126 127 if lineno: 128 linespec = "#L%d" % (lineno + 1) 129 else: 130 linespec = "" 131 132 fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__)) 133 134 github = "https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}" 135 return github.format(fn, linespec) 136 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -23,8 +23,7 @@ author = 'HPC-AI Technology Inc.' # The full version, including alpha/beta/rc tags -release = '0.0.1' - +# release = '0.0.1' # -- General configuration --------------------------------------------------- @@ -64,14 +63,14 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = 'sphinx_book_theme' html_show_sourcelink = False html_theme_options = { 'navigation_depth': 3, } html_context = { - 'display_github': False, + 'display_github': True, 'github_user': 'hpcaitech', 'github_repo': 'ColossalAI', # 'github_version': 'master/docs/', @@ -90,7 +89,10 @@ source_suffix = ['.rst', '.md', '.MD'] import inspect + import colossalai + + def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -23,8 +23,7 @@\n author = 'HPC-AI Technology Inc.'\n \n # The full version, including alpha/beta/rc tags\n-release = '0.0.1'\n-\n+# release = '0.0.1'\n \n # -- General configuration ---------------------------------------------------\n \n@@ -64,14 +63,14 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'sphinx_rtd_theme'\n+html_theme = 'sphinx_book_theme'\n html_show_sourcelink = False\n html_theme_options = {\n 'navigation_depth': 3,\n }\n \n html_context = {\n- 'display_github': False,\n+ 'display_github': True,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n@@ -90,7 +89,10 @@\n source_suffix = ['.rst', '.md', '.MD']\n \n import inspect\n+\n import colossalai\n+\n+\n def linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[DOC]: the sphinx theme is too old\n### \ud83d\udcda The doc issue\n\nAs stated in #2579 , we want to use Read the Docs to host our documentation. In this way, tutorials and API documentations will be visited from a single entry. This issue will mainly discuss the appearance of the RTD website. Ideally, we should use Tailwind for style consistency. However, it can take some time to implement a tailwind-based theme, therefore, we should use an existing theme which looks more modern first.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Colossal-AI'\ncopyright = f'{datetime.datetime.now().year}, HPC-AI Tech'\nauthor = 'HPC-AI Technology Inc.'\n\n# The full version, including alpha/beta/rc tags\nrelease = '0.0.1'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.linkcode',\n 'myst_parser',\n]\n\n# Disable docstring inheritance\nautodoc_inherit_docstrings = False\n\n# Disable displaying type annotations, these can be very verbose\nautodoc_typehints = 'none'\n\n# Enable overriding of function signatures in the first line of the docstring.\nautodoc_docstring_signature = True\nautodoc_default_options = {\n 'member-order': 'bysource',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_show_sourcelink = False\nhtml_theme_options = {\n 'navigation_depth': 3,\n}\n\nhtml_context = {\n 'display_github': False,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/rtd_theme.css',\n]\n\n# -- Extension configuration -------------------------------------------------\nsource_suffix = ['.rst', '.md', '.MD']\n\nimport inspect\nimport colossalai\ndef linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n \"\"\"\n if domain != 'py':\n return None\n\n modname = info['module']\n fullname = info['fullname']\n\n submod = sys.modules.get(modname)\n if submod is None:\n return None\n\n obj = submod\n for part in fullname.split('.'):\n try:\n obj = getattr(obj, part)\n except Exception:\n return None\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n return None\n\n try:\n source, lineno = inspect.findsource(obj)\n except Exception:\n lineno = None\n\n if lineno:\n linespec = \"#L%d\" % (lineno + 1)\n else:\n linespec = \"\"\n\n fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__))\n\n github = \"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}\"\n return github.format(fn, linespec)\n", "path": "docs/conf.py"}]}
1,864
280
gh_patches_debug_25916
rasdani/github-patches
git_diff
nf-core__tools-381
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> problem with nfcore_cache.sqlite within /tmp Hi all, I think will be a nice idea to have the nfcore_cache.sqlite within a subfolder in tmp because if two users use the program at the same time the privileges will prevent to use the tool. For example I cannot even use nf-core --help Luca </issue> <code> [start of nf_core/utils.py] 1 #!/usr/bin/env python 2 """ 3 Common utility functions for the nf-core python package. 4 """ 5 6 import datetime 7 import json 8 import logging 9 import os 10 import subprocess 11 import tempfile 12 13 def fetch_wf_config(wf_path, wf=None): 14 """Uses Nextflow to retrieve the the configuration variables 15 from a Nextflow workflow. 16 17 Args: 18 wf_path (str): Nextflow workflow file system path. 19 20 Returns: 21 dict: Workflow configuration settings. 22 """ 23 24 config = dict() 25 cache_fn = None 26 cache_basedir = None 27 cache_path = None 28 29 # Build a cache directory if we can 30 if os.path.isdir(os.path.join(os.getenv("HOME"), '.nextflow')): 31 cache_basedir = os.path.join(os.getenv("HOME"), '.nextflow', 'nf-core') 32 if not os.path.isdir(cache_basedir): 33 os.mkdir(cache_basedir) 34 35 # If we're given a workflow object with a commit, see if we have a cached copy 36 if cache_basedir and wf and wf.full_name and wf.commit_sha: 37 cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha) 38 cache_path = os.path.join(cache_basedir, cache_fn) 39 if os.path.isfile(cache_path): 40 logging.debug("Found a config cache, loading: {}".format(cache_path)) 41 with open(cache_path, 'r') as fh: 42 config = json.load(fh) 43 return config 44 45 46 # Call `nextflow config` and pipe stderr to /dev/null 47 try: 48 with open(os.devnull, 'w') as devnull: 49 nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull) 50 except OSError as e: 51 if e.errno == os.errno.ENOENT: 52 raise AssertionError("It looks like Nextflow is not installed. It is required for most nf-core functions.") 53 except subprocess.CalledProcessError as e: 54 raise AssertionError("`nextflow config` returned non-zero error code: %s,\n %s", e.returncode, e.output) 55 else: 56 for l in nfconfig_raw.splitlines(): 57 ul = l.decode('utf-8') 58 k, v = ul.split(' = ', 1) 59 config[k] = v 60 61 # If we can, save a cached copy 62 if cache_path: 63 logging.debug("Saving config cache: {}".format(cache_path)) 64 with open(cache_path, 'w') as fh: 65 json.dump(config, fh, indent=4) 66 67 return config 68 69 70 def setup_requests_cachedir(): 71 """Sets up local caching for faster remote HTTP requests. 72 73 Caching directory will be generated by tempfile.gettempdir() under 74 a nfcore_cache subdir. 75 """ 76 # Only import it if we need it 77 import requests_cache 78 79 cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache') 80 if not os.path.exists(cachedir): 81 os.mkdir(cachedir) 82 requests_cache.install_cache( 83 os.path.join(cachedir, 'nfcore_cache'), 84 expire_after=datetime.timedelta(hours=1), 85 backend='sqlite', 86 ) 87 # Make world-writeable so that multi-user installations work 88 os.chmod(cachedir, 0o777) 89 os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777) 90 [end of nf_core/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nf_core/utils.py b/nf_core/utils.py --- a/nf_core/utils.py +++ b/nf_core/utils.py @@ -8,7 +8,6 @@ import logging import os import subprocess -import tempfile def fetch_wf_config(wf_path, wf=None): """Uses Nextflow to retrieve the the configuration variables @@ -70,20 +69,18 @@ def setup_requests_cachedir(): """Sets up local caching for faster remote HTTP requests. - Caching directory will be generated by tempfile.gettempdir() under - a nfcore_cache subdir. + Caching directory will be set up in the user's home directory under + a .nfcore_cache subdir. """ # Only import it if we need it import requests_cache + - cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache') + cachedir = os.path.join(os.getenv("HOME"), os.path.join('.nfcore', 'cache')) if not os.path.exists(cachedir): - os.mkdir(cachedir) + os.makedirs(cachedir) requests_cache.install_cache( - os.path.join(cachedir, 'nfcore_cache'), + os.path.join(cachedir, 'github_info'), expire_after=datetime.timedelta(hours=1), backend='sqlite', ) - # Make world-writeable so that multi-user installations work - os.chmod(cachedir, 0o777) - os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)
{"golden_diff": "diff --git a/nf_core/utils.py b/nf_core/utils.py\n--- a/nf_core/utils.py\n+++ b/nf_core/utils.py\n@@ -8,7 +8,6 @@\n import logging\n import os\n import subprocess\n-import tempfile\n \n def fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n@@ -70,20 +69,18 @@\n def setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n \n- Caching directory will be generated by tempfile.gettempdir() under\n- a nfcore_cache subdir.\n+ Caching directory will be set up in the user's home directory under\n+ a .nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n+ \n \n- cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')\n+ cachedir = os.path.join(os.getenv(\"HOME\"), os.path.join('.nfcore', 'cache'))\n if not os.path.exists(cachedir):\n- os.mkdir(cachedir)\n+ os.makedirs(cachedir)\n requests_cache.install_cache(\n- os.path.join(cachedir, 'nfcore_cache'),\n+ os.path.join(cachedir, 'github_info'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n- # Make world-writeable so that multi-user installations work\n- os.chmod(cachedir, 0o777)\n- os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)\n", "issue": "problem with nfcore_cache.sqlite within /tmp\nHi all,\r\nI think will be a nice idea to have the nfcore_cache.sqlite within a subfolder in tmp because if two users use the program at the same time the privileges will prevent to use the tool.\r\n\r\nFor example I cannot even use nf-core --help \r\n\r\nLuca\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommon utility functions for the nf-core python package.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport os\nimport subprocess\nimport tempfile\n\ndef fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n from a Nextflow workflow.\n\n Args:\n wf_path (str): Nextflow workflow file system path.\n\n Returns:\n dict: Workflow configuration settings.\n \"\"\"\n\n config = dict()\n cache_fn = None\n cache_basedir = None\n cache_path = None\n\n # Build a cache directory if we can\n if os.path.isdir(os.path.join(os.getenv(\"HOME\"), '.nextflow')):\n cache_basedir = os.path.join(os.getenv(\"HOME\"), '.nextflow', 'nf-core')\n if not os.path.isdir(cache_basedir):\n os.mkdir(cache_basedir)\n\n # If we're given a workflow object with a commit, see if we have a cached copy\n if cache_basedir and wf and wf.full_name and wf.commit_sha:\n cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha)\n cache_path = os.path.join(cache_basedir, cache_fn)\n if os.path.isfile(cache_path):\n logging.debug(\"Found a config cache, loading: {}\".format(cache_path))\n with open(cache_path, 'r') as fh:\n config = json.load(fh)\n return config\n\n\n # Call `nextflow config` and pipe stderr to /dev/null\n try:\n with open(os.devnull, 'w') as devnull:\n nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull)\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n raise AssertionError(\"It looks like Nextflow is not installed. It is required for most nf-core functions.\")\n except subprocess.CalledProcessError as e:\n raise AssertionError(\"`nextflow config` returned non-zero error code: %s,\\n %s\", e.returncode, e.output)\n else:\n for l in nfconfig_raw.splitlines():\n ul = l.decode('utf-8')\n k, v = ul.split(' = ', 1)\n config[k] = v\n\n # If we can, save a cached copy\n if cache_path:\n logging.debug(\"Saving config cache: {}\".format(cache_path))\n with open(cache_path, 'w') as fh:\n json.dump(config, fh, indent=4)\n\n return config\n\n\ndef setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n\n Caching directory will be generated by tempfile.gettempdir() under\n a nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n\n cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')\n if not os.path.exists(cachedir):\n os.mkdir(cachedir)\n requests_cache.install_cache(\n os.path.join(cachedir, 'nfcore_cache'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n # Make world-writeable so that multi-user installations work\n os.chmod(cachedir, 0o777)\n os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)\n", "path": "nf_core/utils.py"}]}
1,511
353
gh_patches_debug_26270
rasdani/github-patches
git_diff
e-valuation__EvaP-2036
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Translations in Javascript and Typescript When writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then. Instead of doing this, we want to have a unified approach where the use-site can just write `trans("The server is not responding.")` or so. There are two possible approaches: 1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = "English" | "German"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less): ```html <script type="text/javascript"> window.translationDictionary = { "de": { {% language 'de' %} "The server is not responding": "{% trans 'The server is not responding' %}", {% endlanguage %} } }; </script> ``` 2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix. I think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly. </issue> <code> [start of evap/development/management/commands/translate.py] 1 from django.core.management import call_command 2 from django.core.management.base import BaseCommand 3 4 5 class Command(BaseCommand): 6 args = "" 7 help = 'Execute "makemessages --locale=de --ignore=node_modules/*"' 8 9 def handle(self, *args, **options): 10 self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"') 11 call_command("makemessages", "--locale=de", "--ignore=node_modules/*") 12 [end of evap/development/management/commands/translate.py] [start of evap/urls.py] 1 import django.contrib.auth.views 2 from django.conf import settings 3 from django.urls import include, path 4 5 urlpatterns = [ 6 path("", include('evap.evaluation.urls')), 7 path("staff/", include('evap.staff.urls')), 8 path("results/", include('evap.results.urls')), 9 path("student/", include('evap.student.urls')), 10 path("contributor/", include('evap.contributor.urls')), 11 path("rewards/", include('evap.rewards.urls')), 12 path("grades/", include('evap.grades.urls')), 13 14 path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"), 15 path("oidc/", include('mozilla_django_oidc.urls')), 16 ] 17 18 if settings.DEBUG: 19 urlpatterns += [path('development/', include('evap.development.urls'))] 20 21 if settings.ENABLE_DEBUG_TOOLBAR: 22 # pylint does not correctly evaluate this if, so it will raise an import-error on 23 # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases. 24 import debug_toolbar # pylint: disable=import-error, useless-suppression 25 urlpatterns += [path('__debug__/', include(debug_toolbar.urls))] 26 [end of evap/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py --- a/evap/development/management/commands/translate.py +++ b/evap/development/management/commands/translate.py @@ -9,3 +9,11 @@ def handle(self, *args, **options): self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"') call_command("makemessages", "--locale=de", "--ignore=node_modules/*") + call_command( + "makemessages", + "--domain=djangojs", + "--extension=js,ts", + "--locale=de", + "--ignore=node_modules/*", + "--ignore=evap/static/js/*.min.js", + ) diff --git a/evap/urls.py b/evap/urls.py --- a/evap/urls.py +++ b/evap/urls.py @@ -1,6 +1,9 @@ import django.contrib.auth.views from django.conf import settings from django.urls import include, path +from django.views.i18n import JavaScriptCatalog + +from evap.middleware import no_login_required urlpatterns = [ path("", include('evap.evaluation.urls')), @@ -13,6 +16,8 @@ path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"), path("oidc/", include('mozilla_django_oidc.urls')), + + path("catalog.js", no_login_required(JavaScriptCatalog.as_view()), name="javascript-catalog"), ] if settings.DEBUG:
{"golden_diff": "diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py\n--- a/evap/development/management/commands/translate.py\n+++ b/evap/development/management/commands/translate.py\n@@ -9,3 +9,11 @@\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n+ call_command(\n+ \"makemessages\",\n+ \"--domain=djangojs\",\n+ \"--extension=js,ts\",\n+ \"--locale=de\",\n+ \"--ignore=node_modules/*\",\n+ \"--ignore=evap/static/js/*.min.js\",\n+ )\ndiff --git a/evap/urls.py b/evap/urls.py\n--- a/evap/urls.py\n+++ b/evap/urls.py\n@@ -1,6 +1,9 @@\n import django.contrib.auth.views\n from django.conf import settings\n from django.urls import include, path\n+from django.views.i18n import JavaScriptCatalog\n+\n+from evap.middleware import no_login_required\n \n urlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n@@ -13,6 +16,8 @@\n \n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n+\n+ path(\"catalog.js\", no_login_required(JavaScriptCatalog.as_view()), name=\"javascript-catalog\"),\n ]\n \n if settings.DEBUG:\n", "issue": "Translations in Javascript and Typescript\nWhen writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then.\r\n\r\nInstead of doing this, we want to have a unified approach where the use-site can just write `trans(\"The server is not responding.\")` or so. There are two possible approaches:\r\n\r\n1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = \"English\" | \"German\"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less):\r\n```html\r\n<script type=\"text/javascript\">\r\n window.translationDictionary = {\r\n \"de\": {\r\n {% language 'de' %}\r\n \"The server is not responding\": \"{% trans 'The server is not responding' %}\",\r\n {% endlanguage %}\r\n }\r\n };\r\n</script>\r\n```\r\n2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix.\r\n\r\nI think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly.\n", "before_files": [{"content": "from django.core.management import call_command\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = 'Execute \"makemessages --locale=de --ignore=node_modules/*\"'\n\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n", "path": "evap/development/management/commands/translate.py"}, {"content": "import django.contrib.auth.views\nfrom django.conf import settings\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n path(\"staff/\", include('evap.staff.urls')),\n path(\"results/\", include('evap.results.urls')),\n path(\"student/\", include('evap.student.urls')),\n path(\"contributor/\", include('evap.contributor.urls')),\n path(\"rewards/\", include('evap.rewards.urls')),\n path(\"grades/\", include('evap.grades.urls')),\n\n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += [path('development/', include('evap.development.urls'))]\n\n if settings.ENABLE_DEBUG_TOOLBAR:\n # pylint does not correctly evaluate this if, so it will raise an import-error on\n # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases.\n import debug_toolbar # pylint: disable=import-error, useless-suppression\n urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]\n", "path": "evap/urls.py"}]}
1,387
370
gh_patches_debug_1560
rasdani/github-patches
git_diff
NVIDIA__TransformerEngine-813
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `warnings.simplefilter('default')` in global scope causes excessive DeprecationWarnings https://github.com/NVIDIA/TransformerEngine/blob/f85553ea369da15fd726ab279818e415be48a228/transformer_engine/common/utils.py#L9 Importing the `transformer_engine.common.utils` resets the warning filters to default settings using `warnings.simplefilter('default')` in the global scope. This results in the console being flooded with DeprecationWarnings, which are normally ignored by Python by default. Would it be possible to move setting the warning filter config to a more controlled scope in this module? </issue> <code> [start of transformer_engine/common/utils.py] 1 # Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 # 3 # See LICENSE for license information. 4 """The utilities for Transformer Engine""" 5 import inspect 6 import warnings 7 from enum import Enum 8 9 warnings.simplefilter('default') 10 11 12 class DeprecatedEnum: # pylint: disable=too-few-public-methods 13 """DeprecatedEnum""" 14 15 def __init__(self, enum_cls, msg): 16 self.enum_cls = enum_cls 17 self.msg = msg 18 19 def __iter__(self): 20 return iter(list(self.enum_cls.__members__.values())) 21 22 def __getattr__(self, name): 23 if name in self.enum_cls.__members__: 24 warnings.warn(self.msg, DeprecationWarning) 25 return self.enum_cls.__members__[name] 26 raise AttributeError(f"{self.enum_cls} does not contain {name}") 27 28 29 def deprecate_wrapper(obj, msg): 30 """Deprecate wrapper""" 31 if inspect.isclass(obj): 32 if issubclass(obj, Enum): 33 return DeprecatedEnum(obj, msg) 34 35 class DeprecatedCls(obj): # pylint: disable=too-few-public-methods 36 """DeprecatedCls""" 37 38 def __init__(self, *args, **kwargs): 39 warnings.warn(msg, DeprecationWarning) 40 super().__init__(*args, **kwargs) 41 42 return DeprecatedCls 43 44 if inspect.isfunction(obj): 45 46 def deprecated(*args, **kwargs): 47 warnings.warn(msg, DeprecationWarning) 48 return obj(*args, **kwargs) 49 50 return deprecated 51 52 raise NotImplementedError( 53 f"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.") 54 [end of transformer_engine/common/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/transformer_engine/common/utils.py b/transformer_engine/common/utils.py --- a/transformer_engine/common/utils.py +++ b/transformer_engine/common/utils.py @@ -6,7 +6,8 @@ import warnings from enum import Enum -warnings.simplefilter('default') +warnings.filterwarnings( + "module", category=DeprecationWarning, module="transformer_engine.common.utils") class DeprecatedEnum: # pylint: disable=too-few-public-methods
{"golden_diff": "diff --git a/transformer_engine/common/utils.py b/transformer_engine/common/utils.py\n--- a/transformer_engine/common/utils.py\n+++ b/transformer_engine/common/utils.py\n@@ -6,7 +6,8 @@\n import warnings\n from enum import Enum\n \n-warnings.simplefilter('default')\n+warnings.filterwarnings(\n+ \"module\", category=DeprecationWarning, module=\"transformer_engine.common.utils\")\n \n \n class DeprecatedEnum: # pylint: disable=too-few-public-methods\n", "issue": "`warnings.simplefilter('default')` in global scope causes excessive DeprecationWarnings\nhttps://github.com/NVIDIA/TransformerEngine/blob/f85553ea369da15fd726ab279818e415be48a228/transformer_engine/common/utils.py#L9\r\n\r\nImporting the `transformer_engine.common.utils` resets the warning filters to default settings using `warnings.simplefilter('default')` in the global scope. This results in the console being flooded with DeprecationWarnings, which are normally ignored by Python by default.\r\n\r\nWould it be possible to move setting the warning filter config to a more controlled scope in this module?\n", "before_files": [{"content": "# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\"\"\"The utilities for Transformer Engine\"\"\"\nimport inspect\nimport warnings\nfrom enum import Enum\n\nwarnings.simplefilter('default')\n\n\nclass DeprecatedEnum: # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedEnum\"\"\"\n\n def __init__(self, enum_cls, msg):\n self.enum_cls = enum_cls\n self.msg = msg\n\n def __iter__(self):\n return iter(list(self.enum_cls.__members__.values()))\n\n def __getattr__(self, name):\n if name in self.enum_cls.__members__:\n warnings.warn(self.msg, DeprecationWarning)\n return self.enum_cls.__members__[name]\n raise AttributeError(f\"{self.enum_cls} does not contain {name}\")\n\n\ndef deprecate_wrapper(obj, msg):\n \"\"\"Deprecate wrapper\"\"\"\n if inspect.isclass(obj):\n if issubclass(obj, Enum):\n return DeprecatedEnum(obj, msg)\n\n class DeprecatedCls(obj): # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedCls\"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n super().__init__(*args, **kwargs)\n\n return DeprecatedCls\n\n if inspect.isfunction(obj):\n\n def deprecated(*args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n return obj(*args, **kwargs)\n\n return deprecated\n\n raise NotImplementedError(\n f\"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.\")\n", "path": "transformer_engine/common/utils.py"}]}
1,144
109
gh_patches_debug_76
rasdani/github-patches
git_diff
streamlit__streamlit-2570
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> URL markup does not get generated as a link # Summary URLs used to generate an anchor tag automatically in markup. Now it does not # Steps to reproduce Code snippet: ``` st.write(f""" As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome! --- ### Connect With Us - We can be found at https://streamlit.io and https://twitter.com/streamlit - Come by [the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions, post awesome apps, or just say hi! """) ``` ## Expected behavior: [0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py) ![image](https://user-images.githubusercontent.com/24946400/103850694-fb278900-5075-11eb-8052-1d8fa9a639a7.png) ## Actual behavior: [0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py) ![image](https://user-images.githubusercontent.com/24946400/103850623-b8fe4780-5075-11eb-9592-689366dcd06c.png) ## Is this a regression? Yes as of 0.74 </issue> <code> [start of lib/setup.py] 1 import os 2 import setuptools 3 import sys 4 5 from setuptools.command.install import install 6 7 try: 8 from pipenv.project import Project 9 from pipenv.utils import convert_deps_to_pip 10 except: 11 exit_msg = ( 12 "pipenv is required to package Streamlit. Please install pipenv and try again" 13 ) 14 sys.exit(exit_msg) 15 16 VERSION = "0.74.0" # PEP-440 17 18 NAME = "streamlit" 19 20 DESCRIPTION = "The fastest way to build data apps in Python" 21 22 LONG_DESCRIPTION = ( 23 "Streamlit's open-source app framework is the easiest way " 24 "for data scientists and machine learning engineers to " 25 "create beautiful, performant apps in only a few hours! " 26 "All in pure Python. All for free." 27 ) 28 29 pipfile = Project(chdir=False).parsed_pipfile 30 31 packages = pipfile["packages"].copy() 32 requirements = convert_deps_to_pip(packages, r=False) 33 34 35 class VerifyVersionCommand(install): 36 """Custom command to verify that the git tag matches our version""" 37 38 description = "verify that the git tag matches our version" 39 40 def run(self): 41 tag = os.getenv("CIRCLE_TAG") 42 43 if tag != VERSION: 44 info = "Git tag: {0} does not match the version of this app: {1}".format( 45 tag, VERSION 46 ) 47 sys.exit(info) 48 49 50 setuptools.setup( 51 name=NAME, 52 version=VERSION, 53 description=DESCRIPTION, 54 long_description=LONG_DESCRIPTION, 55 url="https://streamlit.io", 56 author="Streamlit Inc", 57 author_email="[email protected]", 58 python_requires=">=3.6", 59 license="Apache 2", 60 packages=setuptools.find_packages(exclude=["tests", "tests.*"]), 61 # Requirements 62 install_requires=requirements, 63 zip_safe=False, # install source files not egg 64 include_package_data=True, # copy html and friends 65 entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]}, 66 # For Windows so that streamlit * commands work ie. 67 # - streamlit version 68 # - streamlit hello 69 scripts=["bin/streamlit.cmd"], 70 cmdclass={ 71 "verify": VerifyVersionCommand, 72 }, 73 ) 74 [end of lib/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/setup.py b/lib/setup.py --- a/lib/setup.py +++ b/lib/setup.py @@ -13,7 +13,7 @@ ) sys.exit(exit_msg) -VERSION = "0.74.0" # PEP-440 +VERSION = "0.74.1" # PEP-440 NAME = "streamlit"
{"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -13,7 +13,7 @@\n )\n sys.exit(exit_msg)\n \n-VERSION = \"0.74.0\" # PEP-440\n+VERSION = \"0.74.1\" # PEP-440\n \n NAME = \"streamlit\"\n", "issue": "URL markup does not get generated as a link\n# Summary\r\nURLs used to generate an anchor tag automatically in markup. Now it does not\r\n\r\n\r\n# Steps to reproduce\r\nCode snippet:\r\n\r\n```\r\nst.write(f\"\"\"\r\n As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome!\r\n\r\n ---\r\n\r\n ### Connect With Us\r\n\r\n - We can be found at https://streamlit.io and https://twitter.com/streamlit\r\n - Come by\r\n [the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions,\r\n post awesome apps, or just say hi!\r\n \"\"\")\r\n```\r\n\r\n## Expected behavior:\r\n[0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py)\r\n![image](https://user-images.githubusercontent.com/24946400/103850694-fb278900-5075-11eb-8052-1d8fa9a639a7.png)\r\n\r\n\r\n## Actual behavior:\r\n[0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py)\r\n![image](https://user-images.githubusercontent.com/24946400/103850623-b8fe4780-5075-11eb-9592-689366dcd06c.png)\r\n\r\n\r\n## Is this a regression?\r\nYes as of 0.74\r\n\n", "before_files": [{"content": "import os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.74.0\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]}
1,535
92
gh_patches_debug_37257
rasdani/github-patches
git_diff
svthalia__concrexit-3722
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lock admin panel behind 2FA ### What? <!-- A clear and concise high-level description of what you want to happen. --> lock the admin panel behind the 2FA functionality ### Why? <!-- A clear and concise motivation why we should consider implementing this. --> Admin panel has sensitive data so it should be protected. So requiring 2FA makes sense. ### How? <!-- Optionally some guidance, ideas, context. --> Probably nice to have a decorator to be able to lock other things of the site behind 2FA in the future. </issue> <code> [start of website/thaliawebsite/admin.py] 1 """Settings for the admin site.""" 2 from django.contrib import admin 3 from django.utils.translation import gettext_lazy as _ 4 5 admin.site.site_header = _("Thalia administration") 6 admin.site.site_title = _("Thalia") 7 [end of website/thaliawebsite/admin.py] [start of website/thaliawebsite/views.py] 1 """General views for the website.""" 2 3 from django.contrib.admin.views.decorators import staff_member_required 4 from django.contrib.auth.views import LogoutView as BaseLogoutView 5 from django.contrib.auth.views import PasswordResetView 6 from django.core.exceptions import PermissionDenied 7 from django.http import HttpResponse, HttpResponseForbidden 8 from django.shortcuts import redirect 9 from django.utils.decorators import method_decorator 10 from django.views.generic import ListView, TemplateView 11 from django.views.generic.base import View 12 13 from django_ratelimit.decorators import ratelimit 14 from two_factor.views import LoginView 15 16 17 class IndexView(TemplateView): 18 template_name = "index.html" 19 20 21 @method_decorator(staff_member_required, "dispatch") 22 class TestCrashView(View): 23 """Test view to intentionally crash to test the error handling.""" 24 25 def dispatch(self, request, *args, **kwargs) -> HttpResponse: 26 if not request.user.is_superuser: 27 return HttpResponseForbidden("This is not for you") 28 raise Exception("Test exception") 29 30 31 class PagedView(ListView): 32 """A ListView with automatic pagination.""" 33 34 def get_context_data(self, **kwargs) -> dict: 35 context = super().get_context_data(**kwargs) 36 page = context["page_obj"].number 37 paginator = context["paginator"] 38 39 # Show the two pages before and after the current page 40 page_range_start = max(1, page - 2) 41 page_range_stop = min(page + 3, paginator.num_pages + 1) 42 43 # Add extra pages if we show less than 5 pages 44 page_range_start = min(page_range_start, page_range_stop - 5) 45 page_range_start = max(1, page_range_start) 46 47 # Add extra pages if we still show less than 5 pages 48 page_range_stop = max(page_range_stop, page_range_start + 5) 49 page_range_stop = min(page_range_stop, paginator.num_pages + 1) 50 51 page_range = range(page_range_start, page_range_stop) 52 53 querydict = self.request.GET.copy() 54 55 if "page" in querydict: 56 del querydict["page"] 57 58 context.update( 59 { 60 "page_range": page_range, 61 "base_url": f"{self.request.path}?{querydict.urlencode()}&" 62 if querydict 63 else f"{self.request.path}?", 64 } 65 ) 66 67 return context 68 69 70 class RateLimitedPasswordResetView(PasswordResetView): 71 @method_decorator(ratelimit(key="ip", rate="5/h")) 72 def post(self, request, *args, **kwargs): 73 return super().post(request, *args, **kwargs) 74 75 76 class RateLimitedLoginView(LoginView): 77 @method_decorator(ratelimit(key="ip", rate="30/h")) 78 @method_decorator(ratelimit(key="post:username", rate="30/h")) 79 def post(self, request, *args, **kwargs): 80 return super().post(request, *args, **kwargs) 81 82 83 class LogoutView(BaseLogoutView): 84 # Allow GET logout still (this was deprecated in Django 5.0). 85 http_method_names = ["get", "post", "options"] 86 87 def get(self, request, *args, **kwargs): 88 return self.post(request, *args, **kwargs) 89 90 91 def rate_limited_view(request, *args, **kwargs): 92 return HttpResponse("You are rate limited", status=429) 93 94 95 def admin_unauthorized_view(request): 96 if not request.member: 97 url = "/user/account/login" 98 args = request.META.get("QUERY_STRING", "") 99 if args: 100 url = f"{url}?{args}" 101 return redirect(url) 102 elif not request.member.is_staff and not request.member.is_superuser: 103 raise PermissionDenied("You are not allowed to access the administration page.") 104 else: 105 return redirect(request.GET.get("next", "/")) 106 [end of website/thaliawebsite/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/thaliawebsite/admin.py b/website/thaliawebsite/admin.py --- a/website/thaliawebsite/admin.py +++ b/website/thaliawebsite/admin.py @@ -1,6 +1,17 @@ """Settings for the admin site.""" + +from django.conf import settings from django.contrib import admin from django.utils.translation import gettext_lazy as _ -admin.site.site_header = _("Thalia administration") -admin.site.site_title = _("Thalia") +from django_otp import user_has_device + + +class ThaliaAdminSite(admin.AdminSite): + site_header = _("Thalia administration") + site_title = _("Thalia") + + def has_permission(self, request): + return super().has_permission(request) and ( + settings.DEBUG or user_has_device(request.user) + ) diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py --- a/website/thaliawebsite/views.py +++ b/website/thaliawebsite/views.py @@ -1,5 +1,6 @@ """General views for the website.""" +from django.contrib import messages from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.views import LogoutView as BaseLogoutView from django.contrib.auth.views import PasswordResetView @@ -10,6 +11,7 @@ from django.views.generic import ListView, TemplateView from django.views.generic.base import View +from django_otp import user_has_device from django_ratelimit.decorators import ratelimit from two_factor.views import LoginView @@ -58,9 +60,11 @@ context.update( { "page_range": page_range, - "base_url": f"{self.request.path}?{querydict.urlencode()}&" - if querydict - else f"{self.request.path}?", + "base_url": ( + f"{self.request.path}?{querydict.urlencode()}&" + if querydict + else f"{self.request.path}?" + ), } ) @@ -101,5 +105,11 @@ return redirect(url) elif not request.member.is_staff and not request.member.is_superuser: raise PermissionDenied("You are not allowed to access the administration page.") + elif not user_has_device(request.member): + messages.error( + request, + "You need to set up two-factor authentication to access the administration page.", + ) + return redirect("two_factor:setup") else: return redirect(request.GET.get("next", "/"))
{"golden_diff": "diff --git a/website/thaliawebsite/admin.py b/website/thaliawebsite/admin.py\n--- a/website/thaliawebsite/admin.py\n+++ b/website/thaliawebsite/admin.py\n@@ -1,6 +1,17 @@\n \"\"\"Settings for the admin site.\"\"\"\n+\n+from django.conf import settings\n from django.contrib import admin\n from django.utils.translation import gettext_lazy as _\n \n-admin.site.site_header = _(\"Thalia administration\")\n-admin.site.site_title = _(\"Thalia\")\n+from django_otp import user_has_device\n+\n+\n+class ThaliaAdminSite(admin.AdminSite):\n+ site_header = _(\"Thalia administration\")\n+ site_title = _(\"Thalia\")\n+\n+ def has_permission(self, request):\n+ return super().has_permission(request) and (\n+ settings.DEBUG or user_has_device(request.user)\n+ )\ndiff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py\n--- a/website/thaliawebsite/views.py\n+++ b/website/thaliawebsite/views.py\n@@ -1,5 +1,6 @@\n \"\"\"General views for the website.\"\"\"\n \n+from django.contrib import messages\n from django.contrib.admin.views.decorators import staff_member_required\n from django.contrib.auth.views import LogoutView as BaseLogoutView\n from django.contrib.auth.views import PasswordResetView\n@@ -10,6 +11,7 @@\n from django.views.generic import ListView, TemplateView\n from django.views.generic.base import View\n \n+from django_otp import user_has_device\n from django_ratelimit.decorators import ratelimit\n from two_factor.views import LoginView\n \n@@ -58,9 +60,11 @@\n context.update(\n {\n \"page_range\": page_range,\n- \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n- if querydict\n- else f\"{self.request.path}?\",\n+ \"base_url\": (\n+ f\"{self.request.path}?{querydict.urlencode()}&\"\n+ if querydict\n+ else f\"{self.request.path}?\"\n+ ),\n }\n )\n \n@@ -101,5 +105,11 @@\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n+ elif not user_has_device(request.member):\n+ messages.error(\n+ request,\n+ \"You need to set up two-factor authentication to access the administration page.\",\n+ )\n+ return redirect(\"two_factor:setup\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "issue": "Lock admin panel behind 2FA\n### What?\r\n<!-- A clear and concise high-level description of what you want to happen. -->\r\nlock the admin panel behind the 2FA functionality\r\n\r\n### Why?\r\n<!-- A clear and concise motivation why we should consider implementing this. -->\r\nAdmin panel has sensitive data so it should be protected. So requiring 2FA makes sense.\r\n\r\n### How?\r\n<!-- Optionally some guidance, ideas, context. -->\r\nProbably nice to have a decorator to be able to lock other things of the site behind 2FA in the future.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Settings for the admin site.\"\"\"\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nadmin.site.site_header = _(\"Thalia administration\")\nadmin.site.site_title = _(\"Thalia\")\n", "path": "website/thaliawebsite/admin.py"}, {"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LogoutView as BaseLogoutView\nfrom django.contrib.auth.views import PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\nfrom two_factor.views import LoginView\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n querydict = self.request.GET.copy()\n\n if \"page\" in querydict:\n del querydict[\"page\"]\n\n context.update(\n {\n \"page_range\": page_range,\n \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n if querydict\n else f\"{self.request.path}?\",\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass LogoutView(BaseLogoutView):\n # Allow GET logout still (this was deprecated in Django 5.0).\n http_method_names = [\"get\", \"post\", \"options\"]\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/account/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}]}
1,755
566
gh_patches_debug_170
rasdani/github-patches
git_diff
pydantic__pydantic-4418
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> V1.10 release To do/decide: * [x] #2557 - **merged** * [x] #2745 - needs some tweaks, but we need to decide if it's a good idea before V2 * [x] #2190 - **deferred** * [x] cherry pick stuff from v1.9 branch, maybe just history #4350 * [x] #3346 * [x] #3593 - **deferred** * [x] #3946 * [x] #4028 - **API will change in v2** * [x] #4354 * [x] #4216 * [x] #4191 * [x] #3941 - revert or fix * [x] #4339 * [x] #4356 </issue> <code> [start of pydantic/version.py] 1 __all__ = 'compiled', 'VERSION', 'version_info' 2 3 VERSION = '1.9.2' 4 5 try: 6 import cython # type: ignore 7 except ImportError: 8 compiled: bool = False 9 else: # pragma: no cover 10 try: 11 compiled = cython.compiled 12 except AttributeError: 13 compiled = False 14 15 16 def version_info() -> str: 17 import platform 18 import sys 19 from importlib import import_module 20 from pathlib import Path 21 22 optional_deps = [] 23 for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'): 24 try: 25 import_module(p.replace('-', '_')) 26 except ImportError: 27 continue 28 optional_deps.append(p) 29 30 info = { 31 'pydantic version': VERSION, 32 'pydantic compiled': compiled, 33 'install path': Path(__file__).resolve().parent, 34 'python version': sys.version, 35 'platform': platform.platform(), 36 'optional deps. installed': optional_deps, 37 } 38 return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items()) 39 [end of pydantic/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pydantic/version.py b/pydantic/version.py --- a/pydantic/version.py +++ b/pydantic/version.py @@ -1,6 +1,6 @@ __all__ = 'compiled', 'VERSION', 'version_info' -VERSION = '1.9.2' +VERSION = '1.10.0a1' try: import cython # type: ignore
{"golden_diff": "diff --git a/pydantic/version.py b/pydantic/version.py\n--- a/pydantic/version.py\n+++ b/pydantic/version.py\n@@ -1,6 +1,6 @@\n __all__ = 'compiled', 'VERSION', 'version_info'\n \n-VERSION = '1.9.2'\n+VERSION = '1.10.0a1'\n \n try:\n import cython # type: ignore\n", "issue": "V1.10 release\nTo do/decide:\r\n* [x] #2557 - **merged**\r\n* [x] #2745 - needs some tweaks, but we need to decide if it's a good idea before V2\r\n* [x] #2190 - **deferred**\r\n* [x] cherry pick stuff from v1.9 branch, maybe just history #4350\r\n* [x] #3346\r\n* [x] #3593 - **deferred**\r\n* [x] #3946\r\n* [x] #4028 - **API will change in v2**\r\n* [x] #4354\r\n* [x] #4216\r\n* [x] #4191\r\n* [x] #3941 - revert or fix\r\n* [x] #4339\r\n* [x] #4356\n", "before_files": [{"content": "__all__ = 'compiled', 'VERSION', 'version_info'\n\nVERSION = '1.9.2'\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n\ndef version_info() -> str:\n import platform\n import sys\n from importlib import import_module\n from pathlib import Path\n\n optional_deps = []\n for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):\n try:\n import_module(p.replace('-', '_'))\n except ImportError:\n continue\n optional_deps.append(p)\n\n info = {\n 'pydantic version': VERSION,\n 'pydantic compiled': compiled,\n 'install path': Path(__file__).resolve().parent,\n 'python version': sys.version,\n 'platform': platform.platform(),\n 'optional deps. installed': optional_deps,\n }\n return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for k, v in info.items())\n", "path": "pydantic/version.py"}]}
1,069
94
gh_patches_debug_554
rasdani/github-patches
git_diff
scikit-image__scikit-image-353
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Please add info how to run the skimage unit tests at the end of the installation instructions I couldn't find instructions how to run the skimage unit tests. First I tried ``` python -c 'import skimage; skimage.test() ``` which ran 287 tests and gave 16 errors, all the same: ``` ImportError: cannot import name BytesIO ``` Then I tried ``` nosetests --exe skimage ``` which ran 490 tests, no error. Full output is here: https://gist.github.com/3832077 Apparently it is important to not use `skimage.test()`, but `nosetests` instead? Could you please add this info somewhere, the first place I would have looked is at the end of http://skimage.org/docs/dev/install.html ( or make "nosetests" or "run tests" in the sphinx search find the appropriate command to run). Thanks! </issue> <code> [start of skimage/__init__.py] 1 """Image Processing SciKit (Toolbox for SciPy) 2 3 ``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image 4 processing and computer vision. 5 6 The main package of ``skimage`` only provides a few utilities for converting 7 between image data types; for most features, you need to import one of the 8 following subpackages: 9 10 Subpackages 11 ----------- 12 color 13 Color space conversion. 14 data 15 Test images and example data. 16 draw 17 Image drawing primitives (lines, text, etc.). 18 exposure 19 Image intensity adjustment (e.g., histogram equalization). 20 feature 21 Feature detection (e.g. texture analysis, corners, etc.). 22 filter 23 Sharpening, edge finding, denoising, etc. 24 graph 25 Graph-theoretic operations, e.g. dynamic programming (shortest paths). 26 io 27 Reading, saving, and displaying images and video. 28 measure 29 Measurement of image properties, e.g., similarity and contours. 30 morphology 31 Morphological operations, e.g. opening or skeletonization. 32 segmentation 33 Splitting an image into self-similar regions. 34 transform 35 Geometric and other transforms, e.g. rotation or the Radon transform. 36 util 37 Generic utilities. 38 39 Utility Functions 40 ----------------- 41 get_log 42 Returns the ``skimage`` log. Use this to print debug output. 43 img_as_float 44 Convert an image to floating point format, with values in [0, 1]. 45 img_as_uint 46 Convert an image to unsigned integer format, with values in [0, 65535]. 47 img_as_int 48 Convert an image to signed integer format, with values in [-32768, 32767]. 49 img_as_ubyte 50 Convert an image to unsigned byte format, with values in [0, 255]. 51 52 """ 53 54 import os.path as _osp 55 56 pkg_dir = _osp.abspath(_osp.dirname(__file__)) 57 data_dir = _osp.join(pkg_dir, 'data') 58 59 try: 60 from .version import version as __version__ 61 except ImportError: 62 __version__ = "unbuilt-dev" 63 64 65 def _setup_test(verbose=False): 66 import functools 67 68 args = ['', '--exe', '-w', pkg_dir] 69 if verbose: 70 args.extend(['-v', '-s']) 71 72 try: 73 import nose as _nose 74 except ImportError: 75 def broken_test_func(): 76 """This would invoke the skimage test suite, but nose couldn't be 77 imported so the test suite can not run. 78 """ 79 raise ImportError("Could not load nose. Unit tests not available.") 80 return broken_test_func 81 else: 82 f = functools.partial(_nose.run, 'skimage', argv=args) 83 f.__doc__ = 'Invoke the skimage test suite.' 84 return f 85 86 87 test = _setup_test() 88 test_verbose = _setup_test(verbose=True) 89 90 91 def get_log(name=None): 92 """Return a console logger. 93 94 Output may be sent to the logger using the `debug`, `info`, `warning`, 95 `error` and `critical` methods. 96 97 Parameters 98 ---------- 99 name : str 100 Name of the log. 101 102 References 103 ---------- 104 .. [1] Logging facility for Python, 105 http://docs.python.org/library/logging.html 106 107 """ 108 import logging 109 110 if name is None: 111 name = 'skimage' 112 else: 113 name = 'skimage.' + name 114 115 log = logging.getLogger(name) 116 return log 117 118 119 def _setup_log(): 120 """Configure root logger. 121 122 """ 123 import logging 124 import sys 125 126 formatter = logging.Formatter( 127 '%(name)s: %(levelname)s: %(message)s' 128 ) 129 130 try: 131 handler = logging.StreamHandler(stream=sys.stdout) 132 except TypeError: 133 handler = logging.StreamHandler(strm=sys.stdout) 134 handler.setFormatter(formatter) 135 136 log = get_log() 137 log.addHandler(handler) 138 log.setLevel(logging.WARNING) 139 log.propagate = False 140 141 _setup_log() 142 143 from .util.dtype import * 144 [end of skimage/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/__init__.py b/skimage/__init__.py --- a/skimage/__init__.py +++ b/skimage/__init__.py @@ -65,7 +65,7 @@ def _setup_test(verbose=False): import functools - args = ['', '--exe', '-w', pkg_dir] + args = ['', pkg_dir, '--exe'] if verbose: args.extend(['-v', '-s'])
{"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -65,7 +65,7 @@\n def _setup_test(verbose=False):\n import functools\n \n- args = ['', '--exe', '-w', pkg_dir]\n+ args = ['', pkg_dir, '--exe']\n if verbose:\n args.extend(['-v', '-s'])\n", "issue": "Please add info how to run the skimage unit tests at the end of the installation instructions\nI couldn't find instructions how to run the skimage unit tests.\n\nFirst I tried\n\n```\npython -c 'import skimage; skimage.test()\n```\n\nwhich ran 287 tests and gave 16 errors, all the same:\n\n```\nImportError: cannot import name BytesIO\n```\n\nThen I tried\n\n```\nnosetests --exe skimage\n```\n\nwhich ran 490 tests, no error.\n\nFull output is here: https://gist.github.com/3832077\n\nApparently it is important to not use `skimage.test()`, but `nosetests` instead?\nCould you please add this info somewhere, the first place I would have looked is at the end of http://skimage.org/docs/dev/install.html ( or make \"nosetests\" or \"run tests\" in the sphinx search find the appropriate command to run).\n\nThanks!\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', '--exe', '-w', pkg_dir]\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n def broken_test_func():\n \"\"\"This would invoke the skimage test suite, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\n\ntest = _setup_test()\ntest_verbose = _setup_test(verbose=True)\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n handler.setFormatter(formatter)\n\n log = get_log()\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n log.propagate = False\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]}
1,934
103
gh_patches_debug_36057
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-1581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Change order of technical description and list of functions in documentation Three modules, [`kernels`](https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html), [`grouping`](https://pennylane.readthedocs.io/en/latest/code/qml_grouping.html), and [`qaoa`](https://pennylane.readthedocs.io/en/latest/code/qml_qaoa.html) have their module documentation ordered such that there is first a lengthy description of the theory, and the actual list of functions comes after. We should update the docs of these modules so that the functions appear *first*, and the technical details come afterwards (as was recently discussed in #1160). This will improve readability of the documentation and make it easier to find the details of a desired function. </issue> <code> [start of pennylane/qaoa/__init__.py] 1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 r""" 15 This module contains functionality to construct QAOA workflows in PennyLane. 16 """ 17 18 from .mixers import * 19 from .cost import * 20 from .layers import * 21 import pennylane.qaoa.cycle 22 [end of pennylane/qaoa/__init__.py] [start of pennylane/kernels/__init__.py] 1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 r""" 15 This subpackage defines functions that relate to quantum kernel methods. 16 On one hand this includes functions to call a quantum kernel systematically 17 on training and test datasets to obtain the *kernel matrix*. 18 On the other hand it provides postprocessing methods for those kernel 19 matrices which can be used to mitigate device noise and sampling errors. 20 21 Given a kernel 22 23 .. math :: 24 25 k: \mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}, \quad 26 (x_1, x_2)\mapsto k(x_1, x_2) 27 28 the kernel matrix of :math:`k` on a training dataset 29 :math:`\{(x_1, y_1),\cdots (x_n, y_n)\}` with :math:`x_i\in\mathbb{R}^d` 30 and :math:`y_i\in\{-1, 1\}` is defined as 31 32 .. math :: 33 34 K_{ij} = k(x_i, x_j). 35 36 For valid kernels, this is a real symmetric positive semi-definite matrix. 37 We also define the *ideal kernel matrix* for the training dataset which 38 perfectly predicts whether two points have identical labels or not: 39 40 .. math :: 41 42 K^\ast_{ij} = y_i y_j 43 44 We can measure the similarity between :math:`K` and :math:`K^\ast`, 45 through the *kernel polarity* which can be expressed as the Frobenius inner 46 product between the two matrices: 47 48 .. math :: 49 50 \operatorname{P}(k) = \langle K^\ast, K \rangle_F = \sum_{i,j=1}^n y_i y_j k(x_i, x_j) 51 52 Additionally, there is the *kernel-target alignment*, which is the normalized 53 counterpart to the kernel polarity: 54 55 .. math :: 56 57 \operatorname{TA}(k) &= \frac{P(k)}{\lVert K^\ast \rVert_F\;\lVert K \rVert_F}\\ 58 \lVert K\rVert_F &= \sqrt{\sum_{i,j=1}^n k(x_i, x_j)^2}\\ 59 \lVert K^\ast\rVert_F &= \sqrt{\sum_{i,j=1}^n (y_iy_j)^2} 60 61 For datasets with different numbers of training points per class the labels are rescaled 62 by the number of datapoints in the respective class to avoid that kernel polarity and 63 kernel-target alignment are dominated by the properties of the kernel for just a single class. 64 65 Given a callable kernel function, all these quantities can readily be computed 66 using the methods in this module. 67 """ 68 from .cost_functions import ( 69 polarity, 70 target_alignment, 71 ) 72 from .postprocessing import ( 73 threshold_matrix, 74 displace_matrix, 75 flip_matrix, 76 closest_psd_matrix, 77 mitigate_depolarizing_noise, 78 ) 79 from .utils import ( 80 kernel_matrix, 81 square_kernel_matrix, 82 ) 83 [end of pennylane/kernels/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pennylane/kernels/__init__.py b/pennylane/kernels/__init__.py --- a/pennylane/kernels/__init__.py +++ b/pennylane/kernels/__init__.py @@ -13,58 +13,8 @@ # limitations under the License. r""" This subpackage defines functions that relate to quantum kernel methods. -On one hand this includes functions to call a quantum kernel systematically -on training and test datasets to obtain the *kernel matrix*. -On the other hand it provides postprocessing methods for those kernel -matrices which can be used to mitigate device noise and sampling errors. - -Given a kernel - -.. math :: - - k: \mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}, \quad - (x_1, x_2)\mapsto k(x_1, x_2) - -the kernel matrix of :math:`k` on a training dataset -:math:`\{(x_1, y_1),\cdots (x_n, y_n)\}` with :math:`x_i\in\mathbb{R}^d` -and :math:`y_i\in\{-1, 1\}` is defined as - -.. math :: - - K_{ij} = k(x_i, x_j). - -For valid kernels, this is a real symmetric positive semi-definite matrix. -We also define the *ideal kernel matrix* for the training dataset which -perfectly predicts whether two points have identical labels or not: - -.. math :: - - K^\ast_{ij} = y_i y_j - -We can measure the similarity between :math:`K` and :math:`K^\ast`, -through the *kernel polarity* which can be expressed as the Frobenius inner -product between the two matrices: - -.. math :: - - \operatorname{P}(k) = \langle K^\ast, K \rangle_F = \sum_{i,j=1}^n y_i y_j k(x_i, x_j) - -Additionally, there is the *kernel-target alignment*, which is the normalized -counterpart to the kernel polarity: - -.. math :: - - \operatorname{TA}(k) &= \frac{P(k)}{\lVert K^\ast \rVert_F\;\lVert K \rVert_F}\\ - \lVert K\rVert_F &= \sqrt{\sum_{i,j=1}^n k(x_i, x_j)^2}\\ - \lVert K^\ast\rVert_F &= \sqrt{\sum_{i,j=1}^n (y_iy_j)^2} - -For datasets with different numbers of training points per class the labels are rescaled -by the number of datapoints in the respective class to avoid that kernel polarity and -kernel-target alignment are dominated by the properties of the kernel for just a single class. - -Given a callable kernel function, all these quantities can readily be computed -using the methods in this module. """ + from .cost_functions import ( polarity, target_alignment, diff --git a/pennylane/qaoa/__init__.py b/pennylane/qaoa/__init__.py --- a/pennylane/qaoa/__init__.py +++ b/pennylane/qaoa/__init__.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. r""" -This module contains functionality to construct QAOA workflows in PennyLane. +This module provides a collection of methods that help in the construction of +QAOA workflows. """ +import pennylane.qaoa.cycle from .mixers import * from .cost import * from .layers import * -import pennylane.qaoa.cycle
{"golden_diff": "diff --git a/pennylane/kernels/__init__.py b/pennylane/kernels/__init__.py\n--- a/pennylane/kernels/__init__.py\n+++ b/pennylane/kernels/__init__.py\n@@ -13,58 +13,8 @@\n # limitations under the License.\n r\"\"\"\n This subpackage defines functions that relate to quantum kernel methods.\n-On one hand this includes functions to call a quantum kernel systematically\n-on training and test datasets to obtain the *kernel matrix*.\n-On the other hand it provides postprocessing methods for those kernel\n-matrices which can be used to mitigate device noise and sampling errors.\n-\n-Given a kernel\n-\n-.. math ::\n-\n- k: \\mathbb{R}^d \\times \\mathbb{R}^d \\to \\mathbb{R}, \\quad\n- (x_1, x_2)\\mapsto k(x_1, x_2)\n-\n-the kernel matrix of :math:`k` on a training dataset\n-:math:`\\{(x_1, y_1),\\cdots (x_n, y_n)\\}` with :math:`x_i\\in\\mathbb{R}^d`\n-and :math:`y_i\\in\\{-1, 1\\}` is defined as\n-\n-.. math ::\n-\n- K_{ij} = k(x_i, x_j).\n-\n-For valid kernels, this is a real symmetric positive semi-definite matrix.\n-We also define the *ideal kernel matrix* for the training dataset which\n-perfectly predicts whether two points have identical labels or not:\n-\n-.. math ::\n-\n- K^\\ast_{ij} = y_i y_j\n-\n-We can measure the similarity between :math:`K` and :math:`K^\\ast`,\n-through the *kernel polarity* which can be expressed as the Frobenius inner\n-product between the two matrices:\n-\n-.. math ::\n-\n- \\operatorname{P}(k) = \\langle K^\\ast, K \\rangle_F = \\sum_{i,j=1}^n y_i y_j k(x_i, x_j)\n-\n-Additionally, there is the *kernel-target alignment*, which is the normalized\n-counterpart to the kernel polarity:\n-\n-.. math ::\n-\n- \\operatorname{TA}(k) &= \\frac{P(k)}{\\lVert K^\\ast \\rVert_F\\;\\lVert K \\rVert_F}\\\\\n- \\lVert K\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n k(x_i, x_j)^2}\\\\\n- \\lVert K^\\ast\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n (y_iy_j)^2}\n-\n-For datasets with different numbers of training points per class the labels are rescaled\n-by the number of datapoints in the respective class to avoid that kernel polarity and\n-kernel-target alignment are dominated by the properties of the kernel for just a single class.\n-\n-Given a callable kernel function, all these quantities can readily be computed\n-using the methods in this module.\n \"\"\"\n+\n from .cost_functions import (\n polarity,\n target_alignment,\ndiff --git a/pennylane/qaoa/__init__.py b/pennylane/qaoa/__init__.py\n--- a/pennylane/qaoa/__init__.py\n+++ b/pennylane/qaoa/__init__.py\n@@ -12,10 +12,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n r\"\"\"\n-This module contains functionality to construct QAOA workflows in PennyLane.\n+This module provides a collection of methods that help in the construction of\n+QAOA workflows.\n \"\"\"\n \n+import pennylane.qaoa.cycle\n from .mixers import *\n from .cost import *\n from .layers import *\n-import pennylane.qaoa.cycle\n", "issue": "Change order of technical description and list of functions in documentation\nThree modules, [`kernels`](https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html), [`grouping`](https://pennylane.readthedocs.io/en/latest/code/qml_grouping.html), and [`qaoa`](https://pennylane.readthedocs.io/en/latest/code/qml_qaoa.html) have their module documentation ordered such that there is first a lengthy description of the theory, and the actual list of functions comes after. We should update the docs of these modules so that the functions appear *first*, and the technical details come afterwards (as was recently discussed in #1160). This will improve readability of the documentation and make it easier to find the details of a desired function.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis module contains functionality to construct QAOA workflows in PennyLane.\n\"\"\"\n\nfrom .mixers import *\nfrom .cost import *\nfrom .layers import *\nimport pennylane.qaoa.cycle\n", "path": "pennylane/qaoa/__init__.py"}, {"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis subpackage defines functions that relate to quantum kernel methods.\nOn one hand this includes functions to call a quantum kernel systematically\non training and test datasets to obtain the *kernel matrix*.\nOn the other hand it provides postprocessing methods for those kernel\nmatrices which can be used to mitigate device noise and sampling errors.\n\nGiven a kernel\n\n.. math ::\n\n k: \\mathbb{R}^d \\times \\mathbb{R}^d \\to \\mathbb{R}, \\quad\n (x_1, x_2)\\mapsto k(x_1, x_2)\n\nthe kernel matrix of :math:`k` on a training dataset\n:math:`\\{(x_1, y_1),\\cdots (x_n, y_n)\\}` with :math:`x_i\\in\\mathbb{R}^d`\nand :math:`y_i\\in\\{-1, 1\\}` is defined as\n\n.. math ::\n\n K_{ij} = k(x_i, x_j).\n\nFor valid kernels, this is a real symmetric positive semi-definite matrix.\nWe also define the *ideal kernel matrix* for the training dataset which\nperfectly predicts whether two points have identical labels or not:\n\n.. math ::\n\n K^\\ast_{ij} = y_i y_j\n\nWe can measure the similarity between :math:`K` and :math:`K^\\ast`,\nthrough the *kernel polarity* which can be expressed as the Frobenius inner\nproduct between the two matrices:\n\n.. math ::\n\n \\operatorname{P}(k) = \\langle K^\\ast, K \\rangle_F = \\sum_{i,j=1}^n y_i y_j k(x_i, x_j)\n\nAdditionally, there is the *kernel-target alignment*, which is the normalized\ncounterpart to the kernel polarity:\n\n.. math ::\n\n \\operatorname{TA}(k) &= \\frac{P(k)}{\\lVert K^\\ast \\rVert_F\\;\\lVert K \\rVert_F}\\\\\n \\lVert K\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n k(x_i, x_j)^2}\\\\\n \\lVert K^\\ast\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n (y_iy_j)^2}\n\nFor datasets with different numbers of training points per class the labels are rescaled\nby the number of datapoints in the respective class to avoid that kernel polarity and\nkernel-target alignment are dominated by the properties of the kernel for just a single class.\n\nGiven a callable kernel function, all these quantities can readily be computed\nusing the methods in this module.\n\"\"\"\nfrom .cost_functions import (\n polarity,\n target_alignment,\n)\nfrom .postprocessing import (\n threshold_matrix,\n displace_matrix,\n flip_matrix,\n closest_psd_matrix,\n mitigate_depolarizing_noise,\n)\nfrom .utils import (\n kernel_matrix,\n square_kernel_matrix,\n)\n", "path": "pennylane/kernels/__init__.py"}]}
1,888
849
gh_patches_debug_34754
rasdani/github-patches
git_diff
akvo__akvo-rsr-1531
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add organisation filter for maps API resources </issue> <code> [start of akvo/rest/views/project_update_location.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import ProjectUpdateLocation 9 from ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer 10 from ..viewsets import BaseRSRViewSet 11 12 13 class ProjectUpdateLocationViewSet(BaseRSRViewSet): 14 """ 15 API endpoint that allows organisation locations to be viewed or edited. 16 """ 17 queryset = ProjectUpdateLocation.objects.all() 18 serializer_class = ProjectUpdateLocationSerializer 19 20 21 class MapProjectUpdateLocationViewSet(BaseRSRViewSet): 22 23 """Returns a resource tailored for generating a map of update locations. 24 25 Allowed parameters are: 26 limit (default 100 / max 500), and 27 location_target__project (filter on project ID) 28 """ 29 30 filter_fields = ('location_target__project', ) 31 max_paginate_by = 500 32 paginate_by = 100 33 queryset = ProjectUpdateLocation.objects.select_related( 34 'location_target', 35 'location_target__project').only( 36 'id', 'latitude', 'longitude', 37 'location_target__id', 'location_target__project', 'location_target__title', 38 'location_target__photo', 'location_target__video') 39 serializer_class = MapProjectUpdateLocationSerializer 40 [end of akvo/rest/views/project_update_location.py] [start of akvo/rest/views/organisation_location.py] 1 # -*- coding: utf-8 -*- 2 """Akvo RSR is covered by the GNU Affero General Public License. 3 See more details in the license.txt file located at the root folder of the Akvo RSR module. 4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 5 """ 6 7 from akvo.rsr.models import OrganisationLocation 8 from ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer 9 from ..viewsets import BaseRSRViewSet 10 11 12 class OrganisationLocationViewSet(BaseRSRViewSet): 13 """ 14 API endpoint that allows organisation locations to be viewed or edited. 15 """ 16 queryset = OrganisationLocation.objects.all() 17 serializer_class = OrganisationLocationSerializer 18 19 20 class MapOrganisationLocationViewSet(BaseRSRViewSet): 21 22 """Returns a resource tailored for generating a map of organisation locations. 23 24 Allowed parameters are: 25 limit (default 100 / max 500), 26 location_target (filter on organisation ID), and 27 country (filter on country ID) 28 """ 29 30 filter_fields = ('location_target', 'country') 31 max_paginate_by = 500 32 paginate_by = 100 33 queryset = OrganisationLocation.objects.select_related( 34 'location_target', 'country').only( 35 'id', 'latitude', 'longitude', 36 'location_target__id', 'location_target__name', 37 'location_target__logo', 38 'country') 39 serializer_class = MapOrganisationLocationSerializer 40 [end of akvo/rest/views/organisation_location.py] [start of akvo/rest/views/project_location.py] 1 # -*- coding: utf-8 -*- 2 """Akvo RSR is covered by the GNU Affero General Public License. 3 See more details in the license.txt file located at the root folder of the Akvo RSR module. 4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 5 """ 6 7 from akvo.rsr.models import ProjectLocation 8 from ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer 9 from ..viewsets import BaseRSRViewSet 10 11 12 class ProjectLocationViewSet(BaseRSRViewSet): 13 """ 14 """ 15 queryset = ProjectLocation.objects.all() 16 serializer_class = ProjectLocationSerializer 17 filter_fields = ('location_target', 'country', ) 18 19 20 class MapProjectLocationViewSet(BaseRSRViewSet): 21 22 """Returns a resource tailored for generating a map of project locations. 23 24 Allowed parameters are: 25 limit (default 100 / max 500), 26 location_target (filter on project ID), and 27 country (filter on country ID) 28 """ 29 30 filter_fields = ('location_target', 'country') 31 max_paginate_by = 500 32 paginate_by = 100 33 queryset = ProjectLocation.objects.select_related( 34 'location_target', 'country').only( 35 'id', 'latitude', 'longitude', 36 'location_target__id', 'location_target__title', 37 'location_target__current_image', 38 'country') 39 serializer_class = MapProjectLocationSerializer 40 [end of akvo/rest/views/project_location.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/organisation_location.py b/akvo/rest/views/organisation_location.py --- a/akvo/rest/views/organisation_location.py +++ b/akvo/rest/views/organisation_location.py @@ -22,9 +22,9 @@ """Returns a resource tailored for generating a map of organisation locations. Allowed parameters are: - limit (default 100 / max 500), - location_target (filter on organisation ID), and - country (filter on country ID) + __limit__ (default 100 / max 500), + __location_target__ (filter on organisation ID), and + __country__ (filter on country ID) """ filter_fields = ('location_target', 'country') diff --git a/akvo/rest/views/project_location.py b/akvo/rest/views/project_location.py --- a/akvo/rest/views/project_location.py +++ b/akvo/rest/views/project_location.py @@ -22,12 +22,17 @@ """Returns a resource tailored for generating a map of project locations. Allowed parameters are: - limit (default 100 / max 500), - location_target (filter on project ID), and - country (filter on country ID) + __limit__ (default 100 / max 500), + __location_target__ (filter on project ID), + __location_target\__partners__ (filter on organisation ID), and + __country__ (filter on country ID) """ - filter_fields = ('location_target', 'country') + filter_fields = ( + 'location_target', + 'location_target__partners', + 'country' + ) max_paginate_by = 500 paginate_by = 100 queryset = ProjectLocation.objects.select_related( diff --git a/akvo/rest/views/project_update_location.py b/akvo/rest/views/project_update_location.py --- a/akvo/rest/views/project_update_location.py +++ b/akvo/rest/views/project_update_location.py @@ -23,11 +23,18 @@ """Returns a resource tailored for generating a map of update locations. Allowed parameters are: - limit (default 100 / max 500), and - location_target__project (filter on project ID) + __limit__ (default 100 / max 500), + __location_target\__project__ (filter on project ID), + __location_target\__project\__partners__ + (filter on organisation ID of the projects' organisations), + __location_target\__user\__employers__ (filter on organisation ID of the users' organisations) """ - filter_fields = ('location_target__project', ) + filter_fields = ( + 'location_target__project', + 'location_target__project__partners', + 'location_target__user__employers' + ) max_paginate_by = 500 paginate_by = 100 queryset = ProjectUpdateLocation.objects.select_related(
{"golden_diff": "diff --git a/akvo/rest/views/organisation_location.py b/akvo/rest/views/organisation_location.py\n--- a/akvo/rest/views/organisation_location.py\n+++ b/akvo/rest/views/organisation_location.py\n@@ -22,9 +22,9 @@\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500),\n- location_target (filter on organisation ID), and\n- country (filter on country ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target__ (filter on organisation ID), and\n+ __country__ (filter on country ID)\n \"\"\"\n \n filter_fields = ('location_target', 'country')\ndiff --git a/akvo/rest/views/project_location.py b/akvo/rest/views/project_location.py\n--- a/akvo/rest/views/project_location.py\n+++ b/akvo/rest/views/project_location.py\n@@ -22,12 +22,17 @@\n \"\"\"Returns a resource tailored for generating a map of project locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500),\n- location_target (filter on project ID), and\n- country (filter on country ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target__ (filter on project ID),\n+ __location_target\\__partners__ (filter on organisation ID), and\n+ __country__ (filter on country ID)\n \"\"\"\n \n- filter_fields = ('location_target', 'country')\n+ filter_fields = (\n+ 'location_target',\n+ 'location_target__partners',\n+ 'country'\n+ )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\ndiff --git a/akvo/rest/views/project_update_location.py b/akvo/rest/views/project_update_location.py\n--- a/akvo/rest/views/project_update_location.py\n+++ b/akvo/rest/views/project_update_location.py\n@@ -23,11 +23,18 @@\n \"\"\"Returns a resource tailored for generating a map of update locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500), and\n- location_target__project (filter on project ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target\\__project__ (filter on project ID),\n+ __location_target\\__project\\__partners__\n+ (filter on organisation ID of the projects' organisations),\n+ __location_target\\__user\\__employers__ (filter on organisation ID of the users' organisations)\n \"\"\"\n \n- filter_fields = ('location_target__project', )\n+ filter_fields = (\n+ 'location_target__project',\n+ 'location_target__project__partners',\n+ 'location_target__user__employers'\n+ )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n", "issue": "Add organisation filter for maps API resources\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectUpdateLocation\nfrom ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectUpdateLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = ProjectUpdateLocation.objects.all()\n serializer_class = ProjectUpdateLocationSerializer\n\n\nclass MapProjectUpdateLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of update locations.\n\n Allowed parameters are:\n limit (default 100 / max 500), and\n location_target__project (filter on project ID)\n \"\"\"\n\n filter_fields = ('location_target__project', )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n 'location_target',\n 'location_target__project').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__project', 'location_target__title',\n 'location_target__photo', 'location_target__video')\n serializer_class = MapProjectUpdateLocationSerializer\n", "path": "akvo/rest/views/project_update_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import OrganisationLocation\nfrom ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass OrganisationLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = OrganisationLocation.objects.all()\n serializer_class = OrganisationLocationSerializer\n\n\nclass MapOrganisationLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n\n Allowed parameters are:\n limit (default 100 / max 500),\n location_target (filter on organisation ID), and\n country (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = OrganisationLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__name',\n 'location_target__logo',\n 'country')\n serializer_class = MapOrganisationLocationSerializer\n", "path": "akvo/rest/views/organisation_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import ProjectLocation\nfrom ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectLocationViewSet(BaseRSRViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectLocation.objects.all()\n serializer_class = ProjectLocationSerializer\n filter_fields = ('location_target', 'country', )\n\n\nclass MapProjectLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of project locations.\n\n Allowed parameters are:\n limit (default 100 / max 500),\n location_target (filter on project ID), and\n country (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__title',\n 'location_target__current_image',\n 'country')\n serializer_class = MapProjectLocationSerializer\n", "path": "akvo/rest/views/project_location.py"}]}
1,752
697
gh_patches_debug_10696
rasdani/github-patches
git_diff
Kinto__kinto-1138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enforce the permission endpoint when the admin plugin is included. Enforce the permission endpoint when the admin plugin is included. </issue> <code> [start of kinto/__init__.py] 1 import pkg_resources 2 import logging 3 4 import kinto.core 5 from pyramid.config import Configurator 6 from pyramid.settings import asbool 7 from pyramid.security import Authenticated, Everyone 8 9 from kinto.authorization import RouteFactory 10 11 12 # Module version, as defined in PEP-0396. 13 __version__ = pkg_resources.get_distribution(__package__).version 14 15 # Implemented HTTP API Version 16 HTTP_API_VERSION = '1.16' 17 18 # Main kinto logger 19 logger = logging.getLogger(__name__) 20 21 22 DEFAULT_SETTINGS = { 23 'flush_endpoint_enabled': False, 24 'retry_after_seconds': 3, 25 'cache_backend': 'kinto.core.cache.memory', 26 'permission_backend': 'kinto.core.permission.memory', 27 'storage_backend': 'kinto.core.storage.memory', 28 'project_docs': 'https://kinto.readthedocs.io/', 29 'bucket_create_principals': Authenticated, 30 'permissions_read_principals': Everyone, 31 'multiauth.authorization_policy': ( 32 'kinto.authorization.AuthorizationPolicy'), 33 'experimental_collection_schema_validation': False, 34 'experimental_permissions_endpoint': False, 35 'http_api_version': HTTP_API_VERSION, 36 'bucket_id_generator': 'kinto.views.NameGenerator', 37 'collection_id_generator': 'kinto.views.NameGenerator', 38 'group_id_generator': 'kinto.views.NameGenerator', 39 'record_id_generator': 'kinto.views.RelaxedUUID' 40 } 41 42 43 def main(global_config, config=None, **settings): 44 if not config: 45 config = Configurator(settings=settings, root_factory=RouteFactory) 46 47 # Force project name, since it determines settings prefix. 48 config.add_settings({'kinto.project_name': 'kinto'}) 49 50 kinto.core.initialize(config, 51 version=__version__, 52 default_settings=DEFAULT_SETTINGS) 53 54 settings = config.get_settings() 55 56 # Expose capability 57 schema_enabled = asbool( 58 settings['experimental_collection_schema_validation'] 59 ) 60 if schema_enabled: 61 config.add_api_capability( 62 "schema", 63 description="Validates collection records with JSON schemas.", 64 url="https://kinto.readthedocs.io/en/latest/api/1.x/" 65 "collections.html#collection-json-schema") 66 67 # Scan Kinto views. 68 kwargs = {} 69 70 flush_enabled = asbool(settings['flush_endpoint_enabled']) 71 if flush_enabled: 72 config.add_api_capability( 73 "flush_endpoint", 74 description="The __flush__ endpoint can be used to remove all " 75 "data from all backends.", 76 url="https://kinto.readthedocs.io/en/latest/configuration/" 77 "settings.html#activating-the-flush-endpoint") 78 else: 79 kwargs['ignore'] = ['kinto.views.flush'] 80 81 # Permissions endpoint enabled if permission backend is setup. 82 permissions_endpoint_enabled = ( 83 asbool(settings['experimental_permissions_endpoint']) and 84 hasattr(config.registry, 'permission')) 85 if permissions_endpoint_enabled: 86 config.add_api_capability( 87 "permissions_endpoint", 88 description="The permissions endpoint can be used to list all " 89 "user objects permissions.", 90 url="https://kinto.readthedocs.io/en/latest/configuration/" 91 "settings.html#activating-the-permissions-endpoint") 92 else: 93 kwargs.setdefault('ignore', []).append('kinto.views.permissions') 94 95 config.scan("kinto.views", **kwargs) 96 97 app = config.make_wsgi_app() 98 99 # Install middleware (no-op if disabled) 100 return kinto.core.install_middlewares(app, settings) 101 [end of kinto/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/__init__.py b/kinto/__init__.py --- a/kinto/__init__.py +++ b/kinto/__init__.py @@ -79,8 +79,9 @@ kwargs['ignore'] = ['kinto.views.flush'] # Permissions endpoint enabled if permission backend is setup. + is_admin_enabled = 'kinto.plugins.admin' in settings['includes'] permissions_endpoint_enabled = ( - asbool(settings['experimental_permissions_endpoint']) and + (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and hasattr(config.registry, 'permission')) if permissions_endpoint_enabled: config.add_api_capability(
{"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -79,8 +79,9 @@\n kwargs['ignore'] = ['kinto.views.flush']\n \n # Permissions endpoint enabled if permission backend is setup.\n+ is_admin_enabled = 'kinto.plugins.admin' in settings['includes']\n permissions_endpoint_enabled = (\n- asbool(settings['experimental_permissions_endpoint']) and\n+ (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n", "issue": "Enforce the permission endpoint when the admin plugin is included.\n\nEnforce the permission endpoint when the admin plugin is included.\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated, Everyone\n\nfrom kinto.authorization import RouteFactory\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.16'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'permissions_read_principals': Everyone,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': False,\n 'experimental_permissions_endpoint': False,\n 'http_api_version': HTTP_API_VERSION,\n 'bucket_id_generator': 'kinto.views.NameGenerator',\n 'collection_id_generator': 'kinto.views.NameGenerator',\n 'group_id_generator': 'kinto.views.NameGenerator',\n 'record_id_generator': 'kinto.views.RelaxedUUID'\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings['flush_endpoint_enabled'])\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = ['kinto.views.flush']\n\n # Permissions endpoint enabled if permission backend is setup.\n permissions_endpoint_enabled = (\n asbool(settings['experimental_permissions_endpoint']) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n \"permissions_endpoint\",\n description=\"The permissions endpoint can be used to list all \"\n \"user objects permissions.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-permissions-endpoint\")\n else:\n kwargs.setdefault('ignore', []).append('kinto.views.permissions')\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (no-op if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}]}
1,490
145
gh_patches_debug_31308
rasdani/github-patches
git_diff
dask__distributed-4984
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop down tile to reveal "secret" dashboards We're accumulating a lot of _secret_ dashboard pages https://github.com/dask/distributed/blob/c2557938e6c4175534031cba5ca5ac9d2cdc95f7/distributed/dashboard/scheduler.py#L82-L119 although most are not easily accessible from the UI. Most of the pages are not useful for the ordinary user and are only relevant for specific edge cases or debugging. hence, it makes sense that they are not promoted as a top-level dashboard page. However, at least for debugging purposes, I would really appreciate if these pages were a bit easier to navigate. In particular I'm looking for a way which doesn't require me to know the exact endpoint for an individual plot and requires me to type it into my browser. I would propose to add a drop down menu / button which can be used to browse all _hidden_ dashboard pages. Disclaimer: I can't implement this. I barely know what bokeh is. </issue> <code> [start of distributed/dashboard/scheduler.py] 1 from urllib.parse import urljoin 2 3 from tornado import web 4 from tornado.ioloop import IOLoop 5 6 try: 7 import numpy as np 8 except ImportError: 9 np = False 10 11 from .components.nvml import gpu_doc # noqa: 1708 12 from .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc 13 from .components.scheduler import ( 14 AggregateAction, 15 BandwidthTypes, 16 BandwidthWorkers, 17 ComputePerKey, 18 CurrentLoad, 19 MemoryByKey, 20 NBytes, 21 NBytesCluster, 22 Occupancy, 23 SystemMonitor, 24 TaskGraph, 25 TaskGroupGraph, 26 TaskProgress, 27 TaskStream, 28 WorkerTable, 29 events_doc, 30 graph_doc, 31 individual_doc, 32 individual_profile_doc, 33 individual_profile_server_doc, 34 profile_doc, 35 profile_server_doc, 36 status_doc, 37 stealing_doc, 38 systemmonitor_doc, 39 tasks_doc, 40 tg_graph_doc, 41 workers_doc, 42 ) 43 from .core import BokehApplication 44 from .worker import counters_doc 45 46 template_variables = { 47 "pages": [ 48 "status", 49 "workers", 50 "tasks", 51 "system", 52 "profile", 53 "graph", 54 "groups", 55 "info", 56 ] 57 } 58 59 if NVML_ENABLED: 60 template_variables["pages"].insert(4, "gpu") 61 62 63 def connect(application, http_server, scheduler, prefix=""): 64 bokeh_app = BokehApplication( 65 applications, scheduler, prefix=prefix, template_variables=template_variables 66 ) 67 application.add_application(bokeh_app) 68 bokeh_app.initialize(IOLoop.current()) 69 70 bokeh_app.add_handlers( 71 r".*", 72 [ 73 ( 74 r"/", 75 web.RedirectHandler, 76 {"url": urljoin((prefix or "").strip("/") + "/", r"status")}, 77 ) 78 ], 79 ) 80 81 82 applications = { 83 "/system": systemmonitor_doc, 84 "/stealing": stealing_doc, 85 "/workers": workers_doc, 86 "/events": events_doc, 87 "/counters": counters_doc, 88 "/tasks": tasks_doc, 89 "/status": status_doc, 90 "/profile": profile_doc, 91 "/profile-server": profile_server_doc, 92 "/graph": graph_doc, 93 "/groups": tg_graph_doc, 94 "/gpu": gpu_doc, 95 "/individual-task-stream": individual_doc( 96 TaskStream, 100, n_rectangles=1000, clear_interval="10s" 97 ), 98 "/individual-progress": individual_doc(TaskProgress, 100, height=160), 99 "/individual-graph": individual_doc(TaskGraph, 200), 100 "/individual-groups": individual_doc(TaskGroupGraph, 200), 101 "/individual-nbytes": individual_doc(NBytes, 100), 102 "/individual-nbytes-cluster": individual_doc(NBytesCluster, 100), 103 "/individual-cpu": individual_doc(CurrentLoad, 100, fig_attr="cpu_figure"), 104 "/individual-nprocessing": individual_doc( 105 CurrentLoad, 100, fig_attr="processing_figure" 106 ), 107 "/individual-occupancy": individual_doc(Occupancy, 100), 108 "/individual-workers": individual_doc(WorkerTable, 500), 109 "/individual-bandwidth-types": individual_doc(BandwidthTypes, 500), 110 "/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500), 111 "/individual-memory-by-key": individual_doc(MemoryByKey, 500), 112 "/individual-compute-time-per-key": individual_doc(ComputePerKey, 500), 113 "/individual-aggregate-time-per-action": individual_doc(AggregateAction, 500), 114 "/individual-scheduler-system": individual_doc(SystemMonitor, 500), 115 "/individual-profile": individual_profile_doc, 116 "/individual-profile-server": individual_profile_server_doc, 117 "/individual-gpu-memory": gpu_memory_doc, 118 "/individual-gpu-utilization": gpu_utilization_doc, 119 } 120 [end of distributed/dashboard/scheduler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py --- a/distributed/dashboard/scheduler.py +++ b/distributed/dashboard/scheduler.py @@ -43,42 +43,6 @@ from .core import BokehApplication from .worker import counters_doc -template_variables = { - "pages": [ - "status", - "workers", - "tasks", - "system", - "profile", - "graph", - "groups", - "info", - ] -} - -if NVML_ENABLED: - template_variables["pages"].insert(4, "gpu") - - -def connect(application, http_server, scheduler, prefix=""): - bokeh_app = BokehApplication( - applications, scheduler, prefix=prefix, template_variables=template_variables - ) - application.add_application(bokeh_app) - bokeh_app.initialize(IOLoop.current()) - - bokeh_app.add_handlers( - r".*", - [ - ( - r"/", - web.RedirectHandler, - {"url": urljoin((prefix or "").strip("/") + "/", r"status")}, - ) - ], - ) - - applications = { "/system": systemmonitor_doc, "/stealing": stealing_doc, @@ -117,3 +81,40 @@ "/individual-gpu-memory": gpu_memory_doc, "/individual-gpu-utilization": gpu_utilization_doc, } + + +template_variables = { + "pages": [ + "status", + "workers", + "tasks", + "system", + "profile", + "graph", + "groups", + "info", + ], + "plots": [x.replace("/", "") for x in applications if "individual" in x], +} + +if NVML_ENABLED: + template_variables["pages"].insert(4, "gpu") + + +def connect(application, http_server, scheduler, prefix=""): + bokeh_app = BokehApplication( + applications, scheduler, prefix=prefix, template_variables=template_variables + ) + application.add_application(bokeh_app) + bokeh_app.initialize(IOLoop.current()) + + bokeh_app.add_handlers( + r".*", + [ + ( + r"/", + web.RedirectHandler, + {"url": urljoin((prefix or "").strip("/") + "/", r"status")}, + ) + ], + )
{"golden_diff": "diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py\n--- a/distributed/dashboard/scheduler.py\n+++ b/distributed/dashboard/scheduler.py\n@@ -43,42 +43,6 @@\n from .core import BokehApplication\n from .worker import counters_doc\n \n-template_variables = {\n- \"pages\": [\n- \"status\",\n- \"workers\",\n- \"tasks\",\n- \"system\",\n- \"profile\",\n- \"graph\",\n- \"groups\",\n- \"info\",\n- ]\n-}\n-\n-if NVML_ENABLED:\n- template_variables[\"pages\"].insert(4, \"gpu\")\n-\n-\n-def connect(application, http_server, scheduler, prefix=\"\"):\n- bokeh_app = BokehApplication(\n- applications, scheduler, prefix=prefix, template_variables=template_variables\n- )\n- application.add_application(bokeh_app)\n- bokeh_app.initialize(IOLoop.current())\n-\n- bokeh_app.add_handlers(\n- r\".*\",\n- [\n- (\n- r\"/\",\n- web.RedirectHandler,\n- {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n- )\n- ],\n- )\n-\n-\n applications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n@@ -117,3 +81,40 @@\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n }\n+\n+\n+template_variables = {\n+ \"pages\": [\n+ \"status\",\n+ \"workers\",\n+ \"tasks\",\n+ \"system\",\n+ \"profile\",\n+ \"graph\",\n+ \"groups\",\n+ \"info\",\n+ ],\n+ \"plots\": [x.replace(\"/\", \"\") for x in applications if \"individual\" in x],\n+}\n+\n+if NVML_ENABLED:\n+ template_variables[\"pages\"].insert(4, \"gpu\")\n+\n+\n+def connect(application, http_server, scheduler, prefix=\"\"):\n+ bokeh_app = BokehApplication(\n+ applications, scheduler, prefix=prefix, template_variables=template_variables\n+ )\n+ application.add_application(bokeh_app)\n+ bokeh_app.initialize(IOLoop.current())\n+\n+ bokeh_app.add_handlers(\n+ r\".*\",\n+ [\n+ (\n+ r\"/\",\n+ web.RedirectHandler,\n+ {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n+ )\n+ ],\n+ )\n", "issue": "Drop down tile to reveal \"secret\" dashboards\nWe're accumulating a lot of _secret_ dashboard pages https://github.com/dask/distributed/blob/c2557938e6c4175534031cba5ca5ac9d2cdc95f7/distributed/dashboard/scheduler.py#L82-L119\r\nalthough most are not easily accessible from the UI. Most of the pages are not useful for the ordinary user and are only relevant for specific edge cases or debugging. hence, it makes sense that they are not promoted as a top-level dashboard page.\r\n\r\nHowever, at least for debugging purposes, I would really appreciate if these pages were a bit easier to navigate. In particular I'm looking for a way which doesn't require me to know the exact endpoint for an individual plot and requires me to type it into my browser.\r\n\r\nI would propose to add a drop down menu / button which can be used to browse all _hidden_ dashboard pages.\r\n\r\nDisclaimer: I can't implement this. I barely know what bokeh is.\n", "before_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskGroupGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n tg_graph_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\ntemplate_variables = {\n \"pages\": [\n \"status\",\n \"workers\",\n \"tasks\",\n \"system\",\n \"profile\",\n \"graph\",\n \"groups\",\n \"info\",\n ]\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/groups\": tg_graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-groups\": individual_doc(TaskGroupGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n", "path": "distributed/dashboard/scheduler.py"}]}
1,869
549
gh_patches_debug_28060
rasdani/github-patches
git_diff
dynaconf__dynaconf-131
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> YAML.load without a loader is deprecated for security purposes We've started seeing the following warning: ``` lib/python3.6/site-packages/dynaconf/loaders/base.py:95: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. ``` See here: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation </issue> <code> [start of dynaconf/loaders/yaml_loader.py] 1 # coding: utf-8 2 import io 3 from pathlib import Path 4 from dynaconf import default_settings 5 from dynaconf.loaders.base import BaseLoader 6 from dynaconf.constants import YAML_EXTENSIONS 7 from dynaconf.utils import object_merge 8 try: 9 import yaml 10 except ImportError as e: # pragma: no cover 11 yaml = None 12 13 14 def load(obj, env=None, silent=True, key=None, filename=None): 15 """ 16 Reads and loads in to "obj" a single key or all keys from source file. 17 18 :param obj: the settings instance 19 :param env: settings current env default='development' 20 :param silent: if errors should raise 21 :param key: if defined load a single key, else load all in env 22 :param filename: Optional custom filename to load 23 :return: None 24 """ 25 if yaml is None: # pragma: no cover 26 BaseLoader.warn_not_installed(obj, 'yaml') 27 return 28 29 loader = BaseLoader( 30 obj=obj, 31 env=env, 32 identifier='yaml', 33 extensions=YAML_EXTENSIONS, 34 file_reader=yaml.load, 35 string_reader=yaml.load 36 ) 37 loader.load(filename=filename, key=key, silent=silent) 38 39 40 def write(settings_path, settings_data, merge=True): 41 """Write data to a settings file. 42 43 :param settings_path: the filepath 44 :param settings_data: a dictionary with data 45 :param merge: boolean if existing file should be merged with new data 46 """ 47 settings_path = Path(settings_path) 48 if settings_path.exists() and merge: # pragma: no cover 49 object_merge( 50 yaml.load( 51 io.open( 52 str(settings_path), 53 encoding=default_settings.ENCODING_FOR_DYNACONF 54 ) 55 ), 56 settings_data 57 ) 58 59 yaml.dump( 60 settings_data, 61 io.open( 62 str(settings_path), 'w', 63 encoding=default_settings.ENCODING_FOR_DYNACONF 64 ) 65 ) 66 [end of dynaconf/loaders/yaml_loader.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py --- a/dynaconf/loaders/yaml_loader.py +++ b/dynaconf/loaders/yaml_loader.py @@ -1,10 +1,13 @@ # coding: utf-8 import io +import os from pathlib import Path +from warnings import warn from dynaconf import default_settings from dynaconf.loaders.base import BaseLoader from dynaconf.constants import YAML_EXTENSIONS from dynaconf.utils import object_merge + try: import yaml except ImportError as e: # pragma: no cover @@ -26,13 +29,25 @@ BaseLoader.warn_not_installed(obj, 'yaml') return + # Resolve the loaders + # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation + # Possible values are `safe_load, full_load, unsafe_load, load` + yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load') + yaml_reader = getattr(yaml, yaml_loader_name, yaml.load) + if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover + warn( + "yaml.unsafe_load is deprecated." + " Please read https://msg.pyyaml.org/load for full details." + " Try to use full_load or safe_load." + ) + loader = BaseLoader( obj=obj, env=env, identifier='yaml', extensions=YAML_EXTENSIONS, - file_reader=yaml.load, - string_reader=yaml.load + file_reader=yaml_reader, + string_reader=yaml_reader ) loader.load(filename=filename, key=key, silent=silent)
{"golden_diff": "diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py\n--- a/dynaconf/loaders/yaml_loader.py\n+++ b/dynaconf/loaders/yaml_loader.py\n@@ -1,10 +1,13 @@\n # coding: utf-8\n import io\n+import os\n from pathlib import Path\n+from warnings import warn\n from dynaconf import default_settings\n from dynaconf.loaders.base import BaseLoader\n from dynaconf.constants import YAML_EXTENSIONS\n from dynaconf.utils import object_merge\n+\n try:\n import yaml\n except ImportError as e: # pragma: no cover\n@@ -26,13 +29,25 @@\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n \n+ # Resolve the loaders\n+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n+ # Possible values are `safe_load, full_load, unsafe_load, load`\n+ yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load')\n+ yaml_reader = getattr(yaml, yaml_loader_name, yaml.load)\n+ if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover\n+ warn(\n+ \"yaml.unsafe_load is deprecated.\"\n+ \" Please read https://msg.pyyaml.org/load for full details.\"\n+ \" Try to use full_load or safe_load.\"\n+ )\n+\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n- file_reader=yaml.load,\n- string_reader=yaml.load\n+ file_reader=yaml_reader,\n+ string_reader=yaml_reader\n )\n loader.load(filename=filename, key=key, silent=silent)\n", "issue": "YAML.load without a loader is deprecated for security purposes\nWe've started seeing the following warning:\r\n```\r\nlib/python3.6/site-packages/dynaconf/loaders/base.py:95: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.\r\n```\r\n\r\nSee here: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n", "before_files": [{"content": "# coding: utf-8\nimport io\nfrom pathlib import Path\nfrom dynaconf import default_settings\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.utils import object_merge\ntry:\n import yaml\nexcept ImportError as e: # pragma: no cover\n yaml = None\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n if yaml is None: # pragma: no cover\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n file_reader=yaml.load,\n string_reader=yaml.load\n )\n loader.load(filename=filename, key=key, silent=silent)\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n object_merge(\n yaml.load(\n io.open(\n str(settings_path),\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n ),\n settings_data\n )\n\n yaml.dump(\n settings_data,\n io.open(\n str(settings_path), 'w',\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}]}
1,206
402
gh_patches_debug_41208
rasdani/github-patches
git_diff
akvo__akvo-rsr-3751
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Audit log disaggregation categories and labels </issue> <code> [start of akvo/rest/views/indicator_dimension_name.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import IndicatorDimensionName 9 10 from ..serializers import IndicatorDimensionNameSerializer 11 from ..viewsets import PublicProjectViewSet 12 13 14 class IndicatorDimensionNameViewSet(PublicProjectViewSet): 15 """ 16 """ 17 queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values') 18 serializer_class = IndicatorDimensionNameSerializer 19 project_relation = 'project__' 20 [end of akvo/rest/views/indicator_dimension_name.py] [start of akvo/rest/views/indicator_dimension_value.py] 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 from akvo.rsr.models import IndicatorDimensionValue 9 10 from ..serializers import IndicatorDimensionValueSerializer 11 from ..viewsets import PublicProjectViewSet 12 13 14 class IndicatorDimensionValueViewSet(PublicProjectViewSet): 15 """ 16 """ 17 queryset = IndicatorDimensionValue.objects.all() 18 serializer_class = IndicatorDimensionValueSerializer 19 project_relation = 'name__project__' 20 [end of akvo/rest/views/indicator_dimension_value.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rest/views/indicator_dimension_name.py b/akvo/rest/views/indicator_dimension_name.py --- a/akvo/rest/views/indicator_dimension_name.py +++ b/akvo/rest/views/indicator_dimension_name.py @@ -5,6 +5,8 @@ # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. +from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION +from django.contrib.contenttypes.models import ContentType from akvo.rsr.models import IndicatorDimensionName from ..serializers import IndicatorDimensionNameSerializer @@ -17,3 +19,31 @@ queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values') serializer_class = IndicatorDimensionNameSerializer project_relation = 'project__' + + def create(self, request, *args, **kwargs): + response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs) + self._log_action(ADDITION, response.data, str(request.data)) + return response + + def update(self, request, *args, **kwargs): + response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs) + self._log_action(CHANGE, response.data, str(request.data)) + return response + + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + data = {'id': instance.id, 'name': instance.name} + response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs) + self._log_action(DELETION, data) + return response + + def _log_action(self, action_flag, instance, message=''): + user = self.request.user + LogEntry.objects.log_action( + user_id=user.pk, + content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk, + object_id=instance['id'], + object_repr=str(instance), + action_flag=action_flag, + change_message=message + ) diff --git a/akvo/rest/views/indicator_dimension_value.py b/akvo/rest/views/indicator_dimension_value.py --- a/akvo/rest/views/indicator_dimension_value.py +++ b/akvo/rest/views/indicator_dimension_value.py @@ -5,6 +5,8 @@ # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. +from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION +from django.contrib.contenttypes.models import ContentType from akvo.rsr.models import IndicatorDimensionValue from ..serializers import IndicatorDimensionValueSerializer @@ -17,3 +19,31 @@ queryset = IndicatorDimensionValue.objects.all() serializer_class = IndicatorDimensionValueSerializer project_relation = 'name__project__' + + def create(self, request, *args, **kwargs): + response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs) + self._log_action(ADDITION, response.data, str(request.data)) + return response + + def update(self, request, *args, **kwargs): + response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs) + self._log_action(CHANGE, response.data, str(request.data)) + return response + + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + data = {'id': instance.id, 'value': instance.value} + response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs) + self._log_action(DELETION, data) + return response + + def _log_action(self, action_flag, instance, message=''): + user = self.request.user + LogEntry.objects.log_action( + user_id=user.pk, + content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk, + object_id=instance['id'], + object_repr=str(instance), + action_flag=action_flag, + change_message=message + )
{"golden_diff": "diff --git a/akvo/rest/views/indicator_dimension_name.py b/akvo/rest/views/indicator_dimension_name.py\n--- a/akvo/rest/views/indicator_dimension_name.py\n+++ b/akvo/rest/views/indicator_dimension_name.py\n@@ -5,6 +5,8 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\n+from django.contrib.contenttypes.models import ContentType\n from akvo.rsr.models import IndicatorDimensionName\n \n from ..serializers import IndicatorDimensionNameSerializer\n@@ -17,3 +19,31 @@\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n+\n+ def create(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs)\n+ self._log_action(ADDITION, response.data, str(request.data))\n+ return response\n+\n+ def update(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs)\n+ self._log_action(CHANGE, response.data, str(request.data))\n+ return response\n+\n+ def destroy(self, request, *args, **kwargs):\n+ instance = self.get_object()\n+ data = {'id': instance.id, 'name': instance.name}\n+ response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs)\n+ self._log_action(DELETION, data)\n+ return response\n+\n+ def _log_action(self, action_flag, instance, message=''):\n+ user = self.request.user\n+ LogEntry.objects.log_action(\n+ user_id=user.pk,\n+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk,\n+ object_id=instance['id'],\n+ object_repr=str(instance),\n+ action_flag=action_flag,\n+ change_message=message\n+ )\ndiff --git a/akvo/rest/views/indicator_dimension_value.py b/akvo/rest/views/indicator_dimension_value.py\n--- a/akvo/rest/views/indicator_dimension_value.py\n+++ b/akvo/rest/views/indicator_dimension_value.py\n@@ -5,6 +5,8 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\n+from django.contrib.contenttypes.models import ContentType\n from akvo.rsr.models import IndicatorDimensionValue\n \n from ..serializers import IndicatorDimensionValueSerializer\n@@ -17,3 +19,31 @@\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n+\n+ def create(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs)\n+ self._log_action(ADDITION, response.data, str(request.data))\n+ return response\n+\n+ def update(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs)\n+ self._log_action(CHANGE, response.data, str(request.data))\n+ return response\n+\n+ def destroy(self, request, *args, **kwargs):\n+ instance = self.get_object()\n+ data = {'id': instance.id, 'value': instance.value}\n+ response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs)\n+ self._log_action(DELETION, data)\n+ return response\n+\n+ def _log_action(self, action_flag, instance, message=''):\n+ user = self.request.user\n+ LogEntry.objects.log_action(\n+ user_id=user.pk,\n+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk,\n+ object_id=instance['id'],\n+ object_repr=str(instance),\n+ action_flag=action_flag,\n+ change_message=message\n+ )\n", "issue": "Audit log disaggregation categories and labels\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimensionName\n\nfrom ..serializers import IndicatorDimensionNameSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionNameViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n", "path": "akvo/rest/views/indicator_dimension_name.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimensionValue\n\nfrom ..serializers import IndicatorDimensionValueSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionValueViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n", "path": "akvo/rest/views/indicator_dimension_value.py"}]}
932
926
gh_patches_debug_61680
rasdani/github-patches
git_diff
joke2k__faker-48
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Capital O missing an umlaut Hello, I noticed in faker/Providers/De_de/internet.py in the _to_ascii method, the capital O is missing an umlaut. It should be: ('Ö', 'Oe') Currently: replacements = ( ('ä', 'ae'), ('Ä', 'Ae'), ('ö', 'oe'), ('O', 'Oe'), ('ü', 'ue'), ('Ü', 'Ue'), ('ß', 'ss') </issue> <code> [start of faker/providers/de_DE/internet.py] 1 # coding=utf-8 2 from __future__ import unicode_literals 3 from ..internet import Provider as InternetProvider 4 5 import re 6 7 8 class Provider(InternetProvider): 9 10 free_email_domains = ( 11 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com', 12 'aol.de', 'gmx.de' 13 ) 14 tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de') 15 16 @staticmethod 17 def _to_ascii(string): 18 replacements = ( 19 ('ä', 'ae'), ('Ä', 'Ae'), 20 ('ö', 'oe'), ('O', 'Oe'), 21 ('ü', 'ue'), ('Ü', 'Ue'), 22 ('ß', 'ss') 23 ) 24 for search, replace in replacements: 25 string = string.replace(search, replace) 26 27 return string 28 29 def user_name(self): 30 pattern = self.random_element(self.user_name_formats) 31 return self._to_ascii( 32 self.bothify(self.generator.parse(pattern) 33 ).lower()) 34 35 def domain_word(self): 36 company = self.generator.format('company') 37 company_elements = company.split(' ') 38 company = self._to_ascii(company_elements.pop(0)) 39 return re.sub(r'\W', '', company).lower() 40 [end of faker/providers/de_DE/internet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/de_DE/internet.py b/faker/providers/de_DE/internet.py --- a/faker/providers/de_DE/internet.py +++ b/faker/providers/de_DE/internet.py @@ -17,7 +17,7 @@ def _to_ascii(string): replacements = ( ('ä', 'ae'), ('Ä', 'Ae'), - ('ö', 'oe'), ('O', 'Oe'), + ('ö', 'oe'), ('Ö', 'Oe'), ('ü', 'ue'), ('Ü', 'Ue'), ('ß', 'ss') )
{"golden_diff": "diff --git a/faker/providers/de_DE/internet.py b/faker/providers/de_DE/internet.py\n--- a/faker/providers/de_DE/internet.py\n+++ b/faker/providers/de_DE/internet.py\n@@ -17,7 +17,7 @@\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n- ('\u00f6', 'oe'), ('O', 'Oe'),\n+ ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n", "issue": "Capital O missing an umlaut\nHello, I noticed in faker/Providers/De_de/internet.py in the _to_ascii method, the capital O is missing an umlaut. \n\nIt should be: ('\u00d6', 'Oe') \n\nCurrently:\nreplacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('O', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom ..internet import Provider as InternetProvider\n\nimport re\n\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com',\n 'aol.de', 'gmx.de'\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de')\n\n @staticmethod\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('O', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n for search, replace in replacements:\n string = string.replace(search, replace)\n\n return string\n\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n return self._to_ascii(\n self.bothify(self.generator.parse(pattern)\n ).lower())\n\n def domain_word(self):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return re.sub(r'\\W', '', company).lower()\n", "path": "faker/providers/de_DE/internet.py"}]}
1,001
135
gh_patches_debug_5954
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2609
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issues with installation process that connects an existing DB - [x] Tester Marius reports (server credentials in Upwork) - [ ] It seems that even if you select existing database, it still tries to start a docker container for the database, creating a conflict? - [x] Tester Mohammad reports an error as well ([details here](https://docs.google.com/document/d/15m9eZFocAsU1V9inLKxC6i_KQxMdu28snRrBPOrf5Hk/edit)) </issue> <code> [start of db/install.py] 1 from sqlalchemy import text 2 from sqlalchemy.exc import OperationalError 3 4 from db import engine 5 from db.types import install 6 7 8 def install_mathesar( 9 database_name, username, password, hostname, port, skip_confirm 10 ): 11 """Create database and install Mathesar on it.""" 12 user_db_engine = engine.create_future_engine( 13 username, password, hostname, database_name, port, 14 connect_args={"connect_timeout": 10} 15 ) 16 try: 17 user_db_engine.connect() 18 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...") 19 install.install_mathesar_on_database(user_db_engine) 20 user_db_engine.dispose() 21 except OperationalError: 22 database_created = _create_database( 23 database_name=database_name, 24 hostname=hostname, 25 username=username, 26 password=password, 27 port=port, 28 skip_confirm=skip_confirm 29 ) 30 if database_created: 31 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...") 32 install.install_mathesar_on_database(user_db_engine) 33 user_db_engine.dispose() 34 else: 35 print(f"Skipping installing on DB with key {database_name}.") 36 37 38 def _create_database(database_name, hostname, username, password, port, skip_confirm=True): 39 if skip_confirm is True: 40 create_database = "y" 41 else: 42 create_database = input( 43 f"Create a new Database called {database_name}? (y/n) > " 44 ) 45 if create_database.lower() in ["y", "yes"]: 46 # We need to connect to an existing database inorder to create a new Database. 47 # So we use the default Database `postgres` that comes with postgres. 48 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible) 49 root_database = "postgres" 50 root_db_engine = engine.create_future_engine( 51 username, password, hostname, root_database, port, 52 connect_args={"connect_timeout": 10} 53 ) 54 with root_db_engine.connect() as conn: 55 conn.execution_options(isolation_level="AUTOCOMMIT") 56 conn.execute(text(f"CREATE DATABASE {database_name}")) 57 root_db_engine.dispose() 58 print(f"Created DB is {database_name}.") 59 return True 60 else: 61 print(f"Database {database_name} not created!") 62 return False 63 [end of db/install.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/install.py b/db/install.py --- a/db/install.py +++ b/db/install.py @@ -53,7 +53,7 @@ ) with root_db_engine.connect() as conn: conn.execution_options(isolation_level="AUTOCOMMIT") - conn.execute(text(f"CREATE DATABASE {database_name}")) + conn.execute(text(f'CREATE DATABASE "{database_name}"')) root_db_engine.dispose() print(f"Created DB is {database_name}.") return True
{"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -53,7 +53,7 @@\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n", "issue": "Issues with installation process that connects an existing DB\n- [x] Tester Marius reports (server credentials in Upwork)\r\n - [ ] It seems that even if you select existing database, it still tries to start a docker container for the database, creating a conflict?\r\n- [x] Tester Mohammad reports an error as well ([details here](https://docs.google.com/document/d/15m9eZFocAsU1V9inLKxC6i_KQxMdu28snRrBPOrf5Hk/edit))\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]}
1,261
112
gh_patches_debug_31675
rasdani/github-patches
git_diff
pyload__pyload-1369
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Uplea plugin out of date Hi, any download from uplea.com fails: pyLoad reports success on downloading but actually only the HTML page giving acces to download is downloaded... </issue> <code> [start of module/plugins/hoster/UpleaCom.py] 1 # -*- coding: utf-8 -*- 2 3 import re 4 5 from urlparse import urljoin 6 7 from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo 8 9 10 class UpleaCom(XFSHoster): 11 __name__ = "UpleaCom" 12 __type__ = "hoster" 13 __version__ = "0.06" 14 15 __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}' 16 17 __description__ = """Uplea.com hoster plugin""" 18 __license__ = "GPLv3" 19 __authors__ = [("Redleon", None)] 20 21 22 NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<' 23 SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>' 24 25 OFFLINE_PATTERN = r'>You followed an invalid or expired link' 26 27 LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"' 28 29 WAIT_PATTERN = r'timeText:([\d.]+),' 30 STEP_PATTERN = r'<a href="(/step/.+)">' 31 32 33 def setup(self): 34 self.multiDL = False 35 self.chunkLimit = 1 36 self.resumeDownload = True 37 38 39 def handleFree(self, pyfile): 40 m = re.search(self.STEP_PATTERN, self.html) 41 if m is None: 42 self.error(_("STEP_PATTERN not found")) 43 44 self.html = self.load(urljoin("http://uplea.com/", m.group(1))) 45 46 m = re.search(self.WAIT_PATTERN, self.html) 47 if m: 48 self.wait(m.group(1), True) 49 self.retry() 50 51 m = re.search(self.LINK_PATTERN, self.html) 52 if m is None: 53 self.error(_("LINK_PATTERN not found")) 54 55 self.link = m.group(1) 56 self.wait(15) 57 58 59 getInfo = create_getInfo(UpleaCom) 60 [end of module/plugins/hoster/UpleaCom.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py --- a/module/plugins/hoster/UpleaCom.py +++ b/module/plugins/hoster/UpleaCom.py @@ -10,23 +10,26 @@ class UpleaCom(XFSHoster): __name__ = "UpleaCom" __type__ = "hoster" - __version__ = "0.06" + __version__ = "0.07" __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}' __description__ = """Uplea.com hoster plugin""" __license__ = "GPLv3" - __authors__ = [("Redleon", None)] + __authors__ = [("Redleon", None), + ("GammaC0de", None)] NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<' - SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>' + SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>' + SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')] OFFLINE_PATTERN = r'>You followed an invalid or expired link' + PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file' - LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"' + LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"' - WAIT_PATTERN = r'timeText:([\d.]+),' + WAIT_PATTERN = r'timeText: ?([\d.]+),' STEP_PATTERN = r'<a href="(/step/.+)">' @@ -45,9 +48,14 @@ m = re.search(self.WAIT_PATTERN, self.html) if m: + self.logDebug(_("Waiting %s seconds") % m.group(1)) self.wait(m.group(1), True) self.retry() + m = re.search(self.PREMIUM_PATTERN, self.html) + if m: + self.error(_("This URL requires a premium account")) + m = re.search(self.LINK_PATTERN, self.html) if m is None: self.error(_("LINK_PATTERN not found"))
{"golden_diff": "diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py\n--- a/module/plugins/hoster/UpleaCom.py\n+++ b/module/plugins/hoster/UpleaCom.py\n@@ -10,23 +10,26 @@\n class UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.06\"\n+ __version__ = \"0.07\"\n \n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n \n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n- __authors__ = [(\"Redleon\", None)]\n+ __authors__ = [(\"Redleon\", None),\n+ (\"GammaC0de\", None)]\n \n \n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n- SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_])</span>'\n+ SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n+ SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n \n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n+ PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n \n- LINK_PATTERN = r'\"(http?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n+ LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n \n- WAIT_PATTERN = r'timeText:([\\d.]+),'\n+ WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n \n \n@@ -45,9 +48,14 @@\n \n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n+ self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n \n+ m = re.search(self.PREMIUM_PATTERN, self.html)\n+ if m:\n+ self.error(_(\"This URL requires a premium account\"))\n+\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n", "issue": "Uplea plugin out of date\nHi,\nany download from uplea.com fails:\npyLoad reports success on downloading but actually only the HTML page giving acces to download is downloaded...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.06\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_])</span>'\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n\n LINK_PATTERN = r'\"(http?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n\n WAIT_PATTERN = r'timeText:([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}]}
1,145
583
gh_patches_debug_1520
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-316
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wendy's e.g. https://locations.wendys.com/jamestown-ny-3438 </issue> <code> [start of locations/spiders/wendys.py] 1 import scrapy 2 import re 3 import json 4 from locations.items import GeojsonPointItem 5 6 DAY_MAPPING = { 7 'Monday': 'Mo', 8 'Tuesday': 'Tu', 9 'Wednesday': 'We', 10 'Thursday': 'Th', 11 'Friday': 'Fr', 12 'Saturday': 'Sa', 13 'Sunday': 'Su' 14 } 15 16 17 class WendysSpider(scrapy.Spider): 18 19 name = "wendys" 20 allowed_domains = ["locations.wendys.com"] 21 download_delay = 0 22 download_timeout = 30 23 start_urls = ( 24 'https://locations.wendys.com', 25 ) 26 27 def handle_error(self, failure): 28 self.log("Request failed: %s" % failure.request) 29 def parse_day(self, day): 30 return DAY_MAPPING[day.strip()] 31 def parse_times(self, times): 32 hours_to = [x.strip() for x in times.split('-')] 33 cleaned_times = [] 34 35 for hour in hours_to: 36 if re.search('pm$', hour): 37 hour = re.sub('pm', '', hour).strip() 38 hour_min = hour.split(":") 39 if int(hour_min[0]) < 12: 40 hour_min[0] = str(12 + int(hour_min[0])) 41 cleaned_times.append(":".join(hour_min)) 42 43 if re.search('am$', hour): 44 hour = re.sub('am', '', hour).strip() 45 hour_min = hour.split(":") 46 if len(hour_min[0]) <2: 47 hour_min[0] = hour_min[0].zfill(2) 48 else: 49 hour_min[0] = str(int(hour_min[0])) 50 51 cleaned_times.append(":".join(hour_min)) 52 return "-".join(cleaned_times) 53 54 def parse_hours(self, lis): 55 hours = [] 56 for li in lis: 57 day = li.xpath('./span[@class="day"]/text()').extract()[1] 58 times = li.xpath('./span[2]/text()').extract_first() 59 if times and day: 60 parsed_time = self.parse_times(times) 61 parsed_day = self.parse_day(day) 62 hours.append(parsed_day + ' ' + parsed_time) 63 64 return "; ".join(hours) 65 def parse_stores(self, response): 66 page_content = response.body_as_unicode() 67 json_content = re.findall('li.data.results =[^;]+' , page_content) 68 if len(json_content)>0: 69 json_content = json_content[0].replace('li.data.results =' ,'') 70 json_data = json.loads(json_content) 71 properties = { 72 'addr_full': json_data[0]['address'], 73 'phone':json_data[0]['phone'], 74 'city': json_data[0]['city'], 75 'state':json_data[0]['state'], 76 'postcode': json_data[0]['postal'], 77 'ref': json_data[0]['id'], 78 'website': response.url, 79 'lat': json_data[0]['lat'], 80 'lon': json_data[0]['lon'], 81 } 82 hours = self.parse_hours(response.xpath('//div[@class="hours"]/ol/li')) 83 if hours: 84 properties['opening_hours'] = hours 85 86 yield GeojsonPointItem(**properties) 87 88 def parse_city_stores(self, response): 89 stores = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/ul/li/a/@href').extract() 90 for store in stores: 91 if store: 92 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error) 93 94 def parse_state(self, response): 95 city_urls = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/div[@class="col"]/ul/li/a/@href').extract() 96 for path in city_urls: 97 yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error) 98 99 def parse(self, response): 100 urls = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/div[@class="col"]/ul/li/a/@href').extract() 101 for path in urls: 102 yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error) 103 [end of locations/spiders/wendys.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py --- a/locations/spiders/wendys.py +++ b/locations/spiders/wendys.py @@ -18,7 +18,7 @@ name = "wendys" allowed_domains = ["locations.wendys.com"] - download_delay = 0 + download_delay = 0.5 download_timeout = 30 start_urls = ( 'https://locations.wendys.com',
{"golden_diff": "diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py\n--- a/locations/spiders/wendys.py\n+++ b/locations/spiders/wendys.py\n@@ -18,7 +18,7 @@\n \n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n- download_delay = 0\n+ download_delay = 0.5\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n", "issue": "Wendy's\ne.g. https://locations.wendys.com/jamestown-ny-3438\n", "before_files": [{"content": "import scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass WendysSpider(scrapy.Spider):\n\n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n download_delay = 0\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n )\n\n def handle_error(self, failure):\n self.log(\"Request failed: %s\" % failure.request)\n def parse_day(self, day):\n return DAY_MAPPING[day.strip()]\n def parse_times(self, times):\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath('./span[@class=\"day\"]/text()').extract()[1]\n times = li.xpath('./span[2]/text()').extract_first()\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n def parse_stores(self, response):\n page_content = response.body_as_unicode()\n json_content = re.findall('li.data.results =[^;]+' , page_content)\n if len(json_content)>0:\n json_content = json_content[0].replace('li.data.results =' ,'')\n json_data = json.loads(json_content)\n properties = {\n 'addr_full': json_data[0]['address'],\n 'phone':json_data[0]['phone'],\n 'city': json_data[0]['city'],\n 'state':json_data[0]['state'],\n 'postcode': json_data[0]['postal'],\n 'ref': json_data[0]['id'],\n 'website': response.url,\n 'lat': json_data[0]['lat'],\n 'lon': json_data[0]['lon'],\n }\n hours = self.parse_hours(response.xpath('//div[@class=\"hours\"]/ol/li'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_city_stores(self, response):\n stores = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/ul/li/a/@href').extract()\n for store in stores:\n if store:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error)\n\n def parse_state(self, response):\n city_urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in city_urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error)\n", "path": "locations/spiders/wendys.py"}]}
1,690
117
gh_patches_debug_6762
rasdani/github-patches
git_diff
microsoft__DeepSpeed-4770
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> nv-sd CI test failure The Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7073374056 failed. </issue> <code> [start of deepspeed/model_implementations/diffusers/unet.py] 1 # Copyright (c) Microsoft Corporation. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # DeepSpeed Team 5 6 import torch 7 from ..features.cuda_graph import CUDAGraph 8 9 10 class DSUNet(CUDAGraph, torch.nn.Module): 11 12 def __init__(self, unet, enable_cuda_graph=True): 13 super().__init__(enable_cuda_graph=enable_cuda_graph) 14 self.unet = unet 15 # SD pipeline accesses this attribute 16 self.in_channels = unet.in_channels 17 self.device = self.unet.device 18 self.dtype = self.unet.dtype 19 self.config = self.unet.config 20 self.fwd_count = 0 21 self.unet.requires_grad_(requires_grad=False) 22 self.unet.to(memory_format=torch.channels_last) 23 self.cuda_graph_created = False 24 25 def _graph_replay(self, *inputs, **kwargs): 26 for i in range(len(inputs)): 27 if torch.is_tensor(inputs[i]): 28 self.static_inputs[i].copy_(inputs[i]) 29 for k in kwargs: 30 if torch.is_tensor(kwargs[k]): 31 self.static_kwargs[k].copy_(kwargs[k]) 32 self._cuda_graphs.replay() 33 return self.static_output 34 35 def forward(self, *inputs, **kwargs): 36 if self.enable_cuda_graph: 37 if self.cuda_graph_created: 38 outputs = self._graph_replay(*inputs, **kwargs) 39 else: 40 self._create_cuda_graph(*inputs, **kwargs) 41 outputs = self._graph_replay(*inputs, **kwargs) 42 return outputs 43 else: 44 return self._forward(*inputs, **kwargs) 45 46 def _create_cuda_graph(self, *inputs, **kwargs): 47 # warmup to create the workspace and cublas handle 48 cuda_stream = torch.cuda.Stream() 49 cuda_stream.wait_stream(torch.cuda.current_stream()) 50 with torch.cuda.stream(cuda_stream): 51 for i in range(3): 52 ret = self._forward(*inputs, **kwargs) 53 torch.cuda.current_stream().wait_stream(cuda_stream) 54 55 # create cuda_graph and assign static_inputs and static_outputs 56 self._cuda_graphs = torch.cuda.CUDAGraph() 57 self.static_inputs = inputs 58 self.static_kwargs = kwargs 59 60 with torch.cuda.graph(self._cuda_graphs): 61 self.static_output = self._forward(*self.static_inputs, **self.static_kwargs) 62 63 self.cuda_graph_created = True 64 65 def _forward(self, 66 sample, 67 timestamp, 68 encoder_hidden_states, 69 return_dict=True, 70 cross_attention_kwargs=None, 71 timestep_cond=None): 72 if cross_attention_kwargs: 73 return self.unet(sample, 74 timestamp, 75 encoder_hidden_states, 76 return_dict, 77 cross_attention_kwargs=cross_attention_kwargs) 78 else: 79 return self.unet(sample, timestamp, encoder_hidden_states, return_dict) 80 [end of deepspeed/model_implementations/diffusers/unet.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py --- a/deepspeed/model_implementations/diffusers/unet.py +++ b/deepspeed/model_implementations/diffusers/unet.py @@ -68,7 +68,8 @@ encoder_hidden_states, return_dict=True, cross_attention_kwargs=None, - timestep_cond=None): + timestep_cond=None, + added_cond_kwargs=None): if cross_attention_kwargs: return self.unet(sample, timestamp,
{"golden_diff": "diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py\n--- a/deepspeed/model_implementations/diffusers/unet.py\n+++ b/deepspeed/model_implementations/diffusers/unet.py\n@@ -68,7 +68,8 @@\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n- timestep_cond=None):\n+ timestep_cond=None,\n+ added_cond_kwargs=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n", "issue": "nv-sd CI test failure\nThe Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7073374056 failed.\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\nfrom ..features.cuda_graph import CUDAGraph\n\n\nclass DSUNet(CUDAGraph, torch.nn.Module):\n\n def __init__(self, unet, enable_cuda_graph=True):\n super().__init__(enable_cuda_graph=enable_cuda_graph)\n self.unet = unet\n # SD pipeline accesses this attribute\n self.in_channels = unet.in_channels\n self.device = self.unet.device\n self.dtype = self.unet.dtype\n self.config = self.unet.config\n self.fwd_count = 0\n self.unet.requires_grad_(requires_grad=False)\n self.unet.to(memory_format=torch.channels_last)\n self.cuda_graph_created = False\n\n def _graph_replay(self, *inputs, **kwargs):\n for i in range(len(inputs)):\n if torch.is_tensor(inputs[i]):\n self.static_inputs[i].copy_(inputs[i])\n for k in kwargs:\n if torch.is_tensor(kwargs[k]):\n self.static_kwargs[k].copy_(kwargs[k])\n self._cuda_graphs.replay()\n return self.static_output\n\n def forward(self, *inputs, **kwargs):\n if self.enable_cuda_graph:\n if self.cuda_graph_created:\n outputs = self._graph_replay(*inputs, **kwargs)\n else:\n self._create_cuda_graph(*inputs, **kwargs)\n outputs = self._graph_replay(*inputs, **kwargs)\n return outputs\n else:\n return self._forward(*inputs, **kwargs)\n\n def _create_cuda_graph(self, *inputs, **kwargs):\n # warmup to create the workspace and cublas handle\n cuda_stream = torch.cuda.Stream()\n cuda_stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(cuda_stream):\n for i in range(3):\n ret = self._forward(*inputs, **kwargs)\n torch.cuda.current_stream().wait_stream(cuda_stream)\n\n # create cuda_graph and assign static_inputs and static_outputs\n self._cuda_graphs = torch.cuda.CUDAGraph()\n self.static_inputs = inputs\n self.static_kwargs = kwargs\n\n with torch.cuda.graph(self._cuda_graphs):\n self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)\n\n self.cuda_graph_created = True\n\n def _forward(self,\n sample,\n timestamp,\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n timestep_cond=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n encoder_hidden_states,\n return_dict,\n cross_attention_kwargs=cross_attention_kwargs)\n else:\n return self.unet(sample, timestamp, encoder_hidden_states, return_dict)\n", "path": "deepspeed/model_implementations/diffusers/unet.py"}]}
1,336
128
gh_patches_debug_460
rasdani/github-patches
git_diff
gratipay__gratipay.com-3013
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Twitter asks for authorization even though I've already authorized Gittip As of #1369 Twitter is now asking me to authorize Giitip even though I've already done so. ![screen shot 2014-02-26 at 4 45 45 pm](https://f.cloud.github.com/assets/688886/2276800/fac5f68c-9f37-11e3-9dcc-5c77d35c6aa5.png) <bountysource-plugin> --- Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github). </bountysource-plugin> Twitter asks for authorization even though I've already authorized Gittip As of #1369 Twitter is now asking me to authorize Giitip even though I've already done so. ![screen shot 2014-02-26 at 4 45 45 pm](https://f.cloud.github.com/assets/688886/2276800/fac5f68c-9f37-11e3-9dcc-5c77d35c6aa5.png) <bountysource-plugin> --- Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github). </bountysource-plugin> </issue> <code> [start of gratipay/elsewhere/twitter.py] 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 3 from gratipay.elsewhere import PlatformOAuth1 4 from gratipay.elsewhere._extractors import key, not_available 5 6 7 class Twitter(PlatformOAuth1): 8 9 # Platform attributes 10 name = 'twitter' 11 display_name = 'Twitter' 12 account_url = 'https://twitter.com/{user_name}' 13 14 # Auth attributes 15 auth_url = 'https://api.twitter.com' 16 17 # API attributes 18 api_format = 'json' 19 api_url = 'https://api.twitter.com/1.1' 20 api_user_info_path = '/users/show.json?screen_name={user_name}' 21 api_user_self_info_path = '/account/verify_credentials.json' 22 ratelimit_headers_prefix = 'x-rate-limit-' 23 24 # User info extractors 25 x_user_id = key('id') 26 x_user_name = key('screen_name') 27 x_display_name = key('name') 28 x_email = not_available 29 x_avatar_url = key('profile_image_url_https', 30 clean=lambda v: v.replace('_normal.', '.')) 31 [end of gratipay/elsewhere/twitter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/elsewhere/twitter.py b/gratipay/elsewhere/twitter.py --- a/gratipay/elsewhere/twitter.py +++ b/gratipay/elsewhere/twitter.py @@ -13,6 +13,7 @@ # Auth attributes auth_url = 'https://api.twitter.com' + authorize_path = '/oauth/authenticate' # API attributes api_format = 'json'
{"golden_diff": "diff --git a/gratipay/elsewhere/twitter.py b/gratipay/elsewhere/twitter.py\n--- a/gratipay/elsewhere/twitter.py\n+++ b/gratipay/elsewhere/twitter.py\n@@ -13,6 +13,7 @@\n \n # Auth attributes\n auth_url = 'https://api.twitter.com'\n+ authorize_path = '/oauth/authenticate'\n \n # API attributes\n api_format = 'json'\n", "issue": "Twitter asks for authorization even though I've already authorized Gittip\nAs of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.\n\n![screen shot 2014-02-26 at 4 45 45 pm](https://f.cloud.github.com/assets/688886/2276800/fac5f68c-9f37-11e3-9dcc-5c77d35c6aa5.png)\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\nTwitter asks for authorization even though I've already authorized Gittip\nAs of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.\n\n![screen shot 2014-02-26 at 4 45 45 pm](https://f.cloud.github.com/assets/688886/2276800/fac5f68c-9f37-11e3-9dcc-5c77d35c6aa5.png)\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth1\nfrom gratipay.elsewhere._extractors import key, not_available\n\n\nclass Twitter(PlatformOAuth1):\n\n # Platform attributes\n name = 'twitter'\n display_name = 'Twitter'\n account_url = 'https://twitter.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://api.twitter.com'\n\n # API attributes\n api_format = 'json'\n api_url = 'https://api.twitter.com/1.1'\n api_user_info_path = '/users/show.json?screen_name={user_name}'\n api_user_self_info_path = '/account/verify_credentials.json'\n ratelimit_headers_prefix = 'x-rate-limit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('screen_name')\n x_display_name = key('name')\n x_email = not_available\n x_avatar_url = key('profile_image_url_https',\n clean=lambda v: v.replace('_normal.', '.'))\n", "path": "gratipay/elsewhere/twitter.py"}]}
1,347
97
gh_patches_debug_24607
rasdani/github-patches
git_diff
streamlink__streamlink-3185
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tv360.com.tr no playable stream ## Bug Report - [x] This is a bug report and I have read the contribution guidelines. ### Description can't find playable stream. ### Expected / Actual behavior stream supposed to be found ### Reproduction steps / Explicit stream URLs to test ``` 1. streamlink https://www.tv360.com.tr/canli-yayin ``` ### Log output ``` [cli][debug] OS: Windows 10 [cli][debug] Python: 3.8.2 [cli][debug] Streamlink: 1.5.0 [cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0) [cli][info] Found matching plugin tv360 for URL tv360.com.tr/canli-yayin error: No playable streams found on this URL: tv360.com.tr/canli-yayin ``` ### Additional comments, screenshots, etc. [Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate) </issue> <code> [start of src/streamlink/plugins/tv360.py] 1 from __future__ import print_function 2 3 import re 4 5 from streamlink.plugin import Plugin 6 from streamlink.plugin.api import validate 7 from streamlink.stream import HLSStream 8 9 10 class TV360(Plugin): 11 url_re = re.compile(r"https?://(?:www.)?tv360.com.tr/canli-yayin") 12 hls_re = re.compile(r'''hls.loadSource\(["'](http.*m3u8)["']\)''', re.DOTALL) 13 14 hls_schema = validate.Schema( 15 validate.transform(hls_re.search), 16 validate.any(None, validate.all(validate.get(1))) 17 ) 18 19 @classmethod 20 def can_handle_url(cls, url): 21 return cls.url_re.match(url) is not None 22 23 def _get_streams(self): 24 res = self.session.http.get(self.url) 25 hls_url = self.hls_re.search(res.text) 26 27 if hls_url: 28 return HLSStream.parse_variant_playlist(self.session, hls_url.group(1)) 29 30 31 __plugin__ = TV360 32 [end of src/streamlink/plugins/tv360.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/tv360.py b/src/streamlink/plugins/tv360.py --- a/src/streamlink/plugins/tv360.py +++ b/src/streamlink/plugins/tv360.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import re from streamlink.plugin import Plugin @@ -9,11 +7,11 @@ class TV360(Plugin): url_re = re.compile(r"https?://(?:www.)?tv360.com.tr/canli-yayin") - hls_re = re.compile(r'''hls.loadSource\(["'](http.*m3u8)["']\)''', re.DOTALL) + hls_re = re.compile(r'''src="(http.*m3u8)"''') hls_schema = validate.Schema( validate.transform(hls_re.search), - validate.any(None, validate.all(validate.get(1))) + validate.any(None, validate.all(validate.get(1), validate.url())) ) @classmethod @@ -21,11 +19,10 @@ return cls.url_re.match(url) is not None def _get_streams(self): - res = self.session.http.get(self.url) - hls_url = self.hls_re.search(res.text) + hls_url = self.session.http.get(self.url, schema=self.hls_schema) if hls_url: - return HLSStream.parse_variant_playlist(self.session, hls_url.group(1)) + return HLSStream.parse_variant_playlist(self.session, hls_url) __plugin__ = TV360
{"golden_diff": "diff --git a/src/streamlink/plugins/tv360.py b/src/streamlink/plugins/tv360.py\n--- a/src/streamlink/plugins/tv360.py\n+++ b/src/streamlink/plugins/tv360.py\n@@ -1,5 +1,3 @@\n-from __future__ import print_function\n-\n import re\n \n from streamlink.plugin import Plugin\n@@ -9,11 +7,11 @@\n \n class TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n- hls_re = re.compile(r'''hls.loadSource\\([\"'](http.*m3u8)[\"']\\)''', re.DOTALL)\n+ hls_re = re.compile(r'''src=\"(http.*m3u8)\"''')\n \n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n- validate.any(None, validate.all(validate.get(1)))\n+ validate.any(None, validate.all(validate.get(1), validate.url()))\n )\n \n @classmethod\n@@ -21,11 +19,10 @@\n return cls.url_re.match(url) is not None\n \n def _get_streams(self):\n- res = self.session.http.get(self.url)\n- hls_url = self.hls_re.search(res.text)\n+ hls_url = self.session.http.get(self.url, schema=self.hls_schema)\n \n if hls_url:\n- return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))\n+ return HLSStream.parse_variant_playlist(self.session, hls_url)\n \n \n __plugin__ = TV360\n", "issue": "tv360.com.tr no playable stream\n## Bug Report\r\n- [x] This is a bug report and I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\ncan't find playable stream.\r\n\r\n### Expected / Actual behavior\r\n\r\nstream supposed to be found\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n``` 1. streamlink https://www.tv360.com.tr/canli-yayin ```\r\n\r\n### Log output\r\n\r\n```\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.8.2\r\n[cli][debug] Streamlink: 1.5.0\r\n[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][info] Found matching plugin tv360 for URL tv360.com.tr/canli-yayin\r\nerror: No playable streams found on this URL: tv360.com.tr/canli-yayin\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n hls_re = re.compile(r'''hls.loadSource\\([\"'](http.*m3u8)[\"']\\)''', re.DOTALL)\n\n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n validate.any(None, validate.all(validate.get(1)))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n hls_url = self.hls_re.search(res.text)\n\n if hls_url:\n return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))\n\n\n__plugin__ = TV360\n", "path": "src/streamlink/plugins/tv360.py"}]}
1,087
364
gh_patches_debug_8778
rasdani/github-patches
git_diff
pytorch__ignite-1330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docstring of Canberra metric warning Following this comment > @sdesrozis could you please investigate why there is a warning here : https://travis-ci.org/github/pytorch/ignite/jobs/730492404#L2924 thanks ! > Doc link is apparently badly rendered : https://pytorch.org/ignite/master/contrib/metrics.html#ignite.contrib.metrics.regression.CanberraMetric > _Originally posted by @vfdev-5 in https://github.com/pytorch/ignite/pull/1314#issuecomment-699506241_ Namespace are shared so reference should be unique </issue> <code> [start of ignite/contrib/metrics/regression/canberra_metric.py] 1 from typing import Callable, Union 2 3 import torch 4 5 from ignite.contrib.metrics.regression._base import _BaseRegression 6 from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce 7 8 9 class CanberraMetric(_BaseRegression): 10 r""" 11 Calculates the Canberra Metric. 12 13 :math:`\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}` 14 15 where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value. 16 17 More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_ 18 19 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. 20 - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`. 21 22 .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006 23 .. _scikit-learn distance metrics: 24 https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html 25 26 """ 27 28 def __init__( 29 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu") 30 ): 31 self._sum_of_errors = None 32 super(CanberraMetric, self).__init__(output_transform, device) 33 34 @reinit__is_reduced 35 def reset(self): 36 self._sum_of_errors = torch.tensor(0.0, device=self._device) 37 38 def _update(self, output): 39 y_pred, y = output 40 errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y)) 41 self._sum_of_errors += torch.sum(errors).to(self._device) 42 43 @sync_all_reduce("_sum_of_errors") 44 def compute(self): 45 return self._sum_of_errors.item() 46 [end of ignite/contrib/metrics/regression/canberra_metric.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py --- a/ignite/contrib/metrics/regression/canberra_metric.py +++ b/ignite/contrib/metrics/regression/canberra_metric.py @@ -19,7 +19,6 @@ - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`. - .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006 .. _scikit-learn distance metrics: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
{"golden_diff": "diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py\n--- a/ignite/contrib/metrics/regression/canberra_metric.py\n+++ b/ignite/contrib/metrics/regression/canberra_metric.py\n@@ -19,7 +19,6 @@\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n \n- .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n", "issue": "Docstring of Canberra metric warning\nFollowing this comment \r\n\r\n\r\n> @sdesrozis could you please investigate why there is a warning here : https://travis-ci.org/github/pytorch/ignite/jobs/730492404#L2924 \r\nthanks !\r\n> Doc link is apparently badly rendered : https://pytorch.org/ignite/master/contrib/metrics.html#ignite.contrib.metrics.regression.CanberraMetric\r\n> _Originally posted by @vfdev-5 in https://github.com/pytorch/ignite/pull/1314#issuecomment-699506241_\r\n\r\nNamespace are shared so reference should be unique\r\n\n", "before_files": [{"content": "from typing import Callable, Union\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass CanberraMetric(_BaseRegression):\n r\"\"\"\n Calculates the Canberra Metric.\n\n :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{|A_j| + |P_j|}`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n self._sum_of_errors = None\n super(CanberraMetric, self).__init__(output_transform, device)\n\n @reinit__is_reduced\n def reset(self):\n self._sum_of_errors = torch.tensor(0.0, device=self._device)\n\n def _update(self, output):\n y_pred, y = output\n errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y))\n self._sum_of_errors += torch.sum(errors).to(self._device)\n\n @sync_all_reduce(\"_sum_of_errors\")\n def compute(self):\n return self._sum_of_errors.item()\n", "path": "ignite/contrib/metrics/regression/canberra_metric.py"}]}
1,233
202
gh_patches_debug_35818
rasdani/github-patches
git_diff
beetbox__beets-1779
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mbsubmit: cleanup and completion Glad to see a new release has been made! I'm getting back to work on beets after a few days away from the computer, hopefully bringing issue #1689 to a close eventually. As hinted on the previous discussion, this pull request is intended to take care of the `mbsubmit` plugin cleanup, now that the underlying pieces are in place. I have modified a bit the behaviour, making the decision of appending the `"Print tracks"` choice depend solely on `task.rec`. The default behaviour is to only append the choice to matches where the recommendation is equal or lower than `Recommendation.medium`, which hopefully covers the most obvious choices (albums with no matches, albums with weak-ish matches) and the original request by @awesomer, and also avoids polluting the prompt in the cases where the match is strong. A config option has been added that allows the user to modify this settings (extra-picky users might find it useful to always be able to print tracks for fixing spelling mistakes, other users might only want it on albums with no matches, etc). Other than that, a configuration option for setting the format string has been added as well - I can't think of a case where this might come in handy currently, but maybe more creative users might find it useful. A couple of notes: - currently, the plugin makes no effort of nicely formatting items that might be lacking some of the required fields. Would it be useful to add some extra checks and fall back to printing the filename (or something more advanced with the help of `fromfilename`, etc) in those cases? - there might be some problems on some combination on options: for example, if the user sets the threshold to `strong`, but launches the importer in non-timid mode, the prompt will not actually be displayed. Would a note on the (upcoming) documentation suffice, as handling this case probably requires some changes that seem to be a bit out of the scope of the plugin? As usual, any comments and input are more than welcome! </issue> <code> [start of beetsplug/mbsubmit.py] 1 # -*- coding: utf-8 -*- 2 # This file is part of beets. 3 # Copyright 2016, Adrian Sampson and Diego Moreda. 4 # 5 # Permission is hereby granted, free of charge, to any person obtaining 6 # a copy of this software and associated documentation files (the 7 # "Software"), to deal in the Software without restriction, including 8 # without limitation the rights to use, copy, modify, merge, publish, 9 # distribute, sublicense, and/or sell copies of the Software, and to 10 # permit persons to whom the Software is furnished to do so, subject to 11 # the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be 14 # included in all copies or substantial portions of the Software. 15 16 """Aid in submitting information to MusicBrainz. 17 18 This plugin allows the user to print track information in a format that is 19 parseable by the MusicBrainz track parser. Programmatic submitting is not 20 implemented by MusicBrainz yet. 21 """ 22 23 from __future__ import (division, absolute_import, print_function, 24 unicode_literals) 25 26 27 from beets.autotag import Recommendation 28 from beets.importer import action 29 from beets.plugins import BeetsPlugin 30 from beets.ui.commands import PromptChoice 31 from beetsplug.info import print_data 32 33 34 class MBSubmitPlugin(BeetsPlugin): 35 def __init__(self): 36 super(MBSubmitPlugin, self).__init__() 37 38 self.register_listener('before_choose_candidate', 39 self.before_choose_candidate_event) 40 41 def before_choose_candidate_event(self, session, task): 42 if not task.candidates or task.rec == Recommendation.none: 43 return [PromptChoice('p', 'Print tracks', self.print_tracks), 44 PromptChoice('k', 'print tracks and sKip', 45 self.print_tracks_and_skip)] 46 47 # Callbacks for choices. 48 def print_tracks(self, session, task): 49 for i in task.items: 50 print_data(None, i, '$track. $artist - $title ($length)') 51 52 def print_tracks_and_skip(self, session, task): 53 for i in task.items: 54 print_data(None, i, '$track. $artist - $title ($length)') 55 return action.SKIP 56 [end of beetsplug/mbsubmit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py --- a/beetsplug/mbsubmit.py +++ b/beetsplug/mbsubmit.py @@ -16,8 +16,10 @@ """Aid in submitting information to MusicBrainz. This plugin allows the user to print track information in a format that is -parseable by the MusicBrainz track parser. Programmatic submitting is not +parseable by the MusicBrainz track parser [1]. Programmatic submitting is not implemented by MusicBrainz yet. + +[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings """ from __future__ import (division, absolute_import, print_function, @@ -25,7 +27,6 @@ from beets.autotag import Recommendation -from beets.importer import action from beets.plugins import BeetsPlugin from beets.ui.commands import PromptChoice from beetsplug.info import print_data @@ -35,21 +36,26 @@ def __init__(self): super(MBSubmitPlugin, self).__init__() + self.config.add({ + 'format': '$track. $title - $artist ($length)', + 'threshold': 'medium', + }) + + # Validate and store threshold. + self.threshold = self.config['threshold'].as_choice({ + 'none': Recommendation.none, + 'low': Recommendation.low, + 'medium': Recommendation.medium, + 'strong': Recommendation.strong + }) + self.register_listener('before_choose_candidate', self.before_choose_candidate_event) def before_choose_candidate_event(self, session, task): - if not task.candidates or task.rec == Recommendation.none: - return [PromptChoice('p', 'Print tracks', self.print_tracks), - PromptChoice('k', 'print tracks and sKip', - self.print_tracks_and_skip)] + if task.rec <= self.threshold: + return [PromptChoice('p', 'Print tracks', self.print_tracks)] - # Callbacks for choices. def print_tracks(self, session, task): for i in task.items: - print_data(None, i, '$track. $artist - $title ($length)') - - def print_tracks_and_skip(self, session, task): - for i in task.items: - print_data(None, i, '$track. $artist - $title ($length)') - return action.SKIP + print_data(None, i, self.config['format'].get())
{"golden_diff": "diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py\n--- a/beetsplug/mbsubmit.py\n+++ b/beetsplug/mbsubmit.py\n@@ -16,8 +16,10 @@\n \"\"\"Aid in submitting information to MusicBrainz.\n \n This plugin allows the user to print track information in a format that is\n-parseable by the MusicBrainz track parser. Programmatic submitting is not\n+parseable by the MusicBrainz track parser [1]. Programmatic submitting is not\n implemented by MusicBrainz yet.\n+\n+[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n \"\"\"\n \n from __future__ import (division, absolute_import, print_function,\n@@ -25,7 +27,6 @@\n \n \n from beets.autotag import Recommendation\n-from beets.importer import action\n from beets.plugins import BeetsPlugin\n from beets.ui.commands import PromptChoice\n from beetsplug.info import print_data\n@@ -35,21 +36,26 @@\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n \n+ self.config.add({\n+ 'format': '$track. $title - $artist ($length)',\n+ 'threshold': 'medium',\n+ })\n+\n+ # Validate and store threshold.\n+ self.threshold = self.config['threshold'].as_choice({\n+ 'none': Recommendation.none,\n+ 'low': Recommendation.low,\n+ 'medium': Recommendation.medium,\n+ 'strong': Recommendation.strong\n+ })\n+\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n \n def before_choose_candidate_event(self, session, task):\n- if not task.candidates or task.rec == Recommendation.none:\n- return [PromptChoice('p', 'Print tracks', self.print_tracks),\n- PromptChoice('k', 'print tracks and sKip',\n- self.print_tracks_and_skip)]\n+ if task.rec <= self.threshold:\n+ return [PromptChoice('p', 'Print tracks', self.print_tracks)]\n \n- # Callbacks for choices.\n def print_tracks(self, session, task):\n for i in task.items:\n- print_data(None, i, '$track. $artist - $title ($length)')\n-\n- def print_tracks_and_skip(self, session, task):\n- for i in task.items:\n- print_data(None, i, '$track. $artist - $title ($length)')\n- return action.SKIP\n+ print_data(None, i, self.config['format'].get())\n", "issue": "mbsubmit: cleanup and completion\nGlad to see a new release has been made!\n\nI'm getting back to work on beets after a few days away from the computer, hopefully bringing issue #1689 to a close eventually. As hinted on the previous discussion, this pull request is intended to take care of the `mbsubmit` plugin cleanup, now that the underlying pieces are in place.\n\nI have modified a bit the behaviour, making the decision of appending the `\"Print tracks\"` choice depend solely on `task.rec`. The default behaviour is to only append the choice to matches where the recommendation is equal or lower than `Recommendation.medium`, which hopefully covers the most obvious choices (albums with no matches, albums with weak-ish matches) and the original request by @awesomer, and also avoids polluting the prompt in the cases where the match is strong. A config option has been added that allows the user to modify this settings (extra-picky users might find it useful to always be able to print tracks for fixing spelling mistakes, other users might only want it on albums with no matches, etc).\n\nOther than that, a configuration option for setting the format string has been added as well - I can't think of a case where this might come in handy currently, but maybe more creative users might find it useful.\n\nA couple of notes:\n- currently, the plugin makes no effort of nicely formatting items that might be lacking some of the required fields. Would it be useful to add some extra checks and fall back to printing the filename (or something more advanced with the help of `fromfilename`, etc) in those cases?\n- there might be some problems on some combination on options: for example, if the user sets the threshold to `strong`, but launches the importer in non-timid mode, the prompt will not actually be displayed. Would a note on the (upcoming) documentation suffice, as handling this case probably requires some changes that seem to be a bit out of the scope of the plugin?\n\nAs usual, any comments and input are more than welcome!\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\"\"\"\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\n\nfrom beets.autotag import Recommendation\nfrom beets.importer import action\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n\n def before_choose_candidate_event(self, session, task):\n if not task.candidates or task.rec == Recommendation.none:\n return [PromptChoice('p', 'Print tracks', self.print_tracks),\n PromptChoice('k', 'print tracks and sKip',\n self.print_tracks_and_skip)]\n\n # Callbacks for choices.\n def print_tracks(self, session, task):\n for i in task.items:\n print_data(None, i, '$track. $artist - $title ($length)')\n\n def print_tracks_and_skip(self, session, task):\n for i in task.items:\n print_data(None, i, '$track. $artist - $title ($length)')\n return action.SKIP\n", "path": "beetsplug/mbsubmit.py"}]}
1,532
560
gh_patches_debug_27451
rasdani/github-patches
git_diff
RedHatInsights__insights-core-3041
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Systemd_analyze parser is raising lots of exceptions in production The SystemdAnalyzeBlame parser is throwing a large number of the exception ValueError('too many values to unpack (expected 2)',) in production. </issue> <code> [start of insights/parsers/systemd_analyze.py] 1 """ 2 SystemdAnalyzeBlame - command ``systemd-analyze blame`` 3 ======================================================= 4 5 This module parses the output of command ``systemd-analyze blame``. 6 """ 7 from insights.specs import Specs 8 from insights import CommandParser, parser 9 from insights.parsers import SkipException 10 11 12 @parser(Specs.systemd_analyze_blame) 13 class SystemdAnalyzeBlame(CommandParser, dict): 14 """Parse the output of ``systemd-analyze blame`` as ``dict``. The time to 15 initialize is converted into seconds. 16 17 Typical output:: 18 19 33.080s cloud-init-local.service 20 32.423s unbound-anchor.service 21 2.773s kdump.service 22 1.699s dnf-makecache.service 23 1.304s cloud-init.service 24 1.073s initrd-switch-root.service 25 939ms cloud-config.service 26 872ms tuned.service 27 770ms cloud-final.service 28 29 Examples: 30 31 >>> 'cloud-init-local.service' in output 32 True 33 >>> output.get('cloud-init.service', 0) 34 1.304 35 36 Returns: 37 (dict): With unit-name & time as key-value pair. 38 Ex:: 39 40 {'cloud-config.service': 0.939, 41 'cloud-final.service': 0.77, 42 'cloud-init-local.service': 33.08, 43 'cloud-init.service': 1.304, 44 'dnf-makecache.service': 1.699, 45 'initrd-switch-root.service': 1.073, 46 'kdump.service': 2.773, 47 'tuned.service': 0.872, 48 'unbound-anchor.service': 32.423} 49 50 Raises: 51 SkipException: If content is not provided. 52 """ 53 def parse_content(self, content): 54 if not content: 55 raise SkipException 56 57 for c in content: 58 time, service = c.split() 59 if time.endswith('ms'): 60 _time = round(float(time.strip('ms')) / 1000, 5) 61 else: 62 _time = round(float(time.strip('ms')), 5) 63 64 self[service] = _time 65 [end of insights/parsers/systemd_analyze.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py --- a/insights/parsers/systemd_analyze.py +++ b/insights/parsers/systemd_analyze.py @@ -55,10 +55,34 @@ raise SkipException for c in content: - time, service = c.split() - if time.endswith('ms'): - _time = round(float(time.strip('ms')) / 1000, 5) - else: - _time = round(float(time.strip('ms')), 5) + cols = c.split() + # Check to make sure that the first character of the first + # entry is a number. This will hopefully exclude any errors + # that are outputted in the file. + if cols[0][0].isdigit(): + # The service should be the last column, so just + # remove the last column from the list before looping. + service = cols.pop() + time = 0 + for x in cols: + # Convert each column to seconds, and add them up. + if x.endswith('y'): + # Pulled the 31557600 from systemd src. + time += int(x.strip('y')) * 31557600 + elif x.endswith('month'): + # Pulled the 2629800 from systemd src. + time += int(x.strip('month')) * 2629800 + elif x.endswith('w'): + time += int(x.strip('w')) * 7 * 24 * 60 ** 2 + elif x.endswith('d'): + time += int(x.strip('d')) * 24 * 60 ** 2 + elif x.endswith('h'): + time += int(x.strip('h')) * 60 ** 2 + elif x.endswith('min'): + time += int(x.strip('min')) * 60 + elif x.endswith('ms'): + time += float(x.strip('ms')) / 1000 + elif x.endswith('s'): + time += float(x.strip('s')) - self[service] = _time + self[service] = time
{"golden_diff": "diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py\n--- a/insights/parsers/systemd_analyze.py\n+++ b/insights/parsers/systemd_analyze.py\n@@ -55,10 +55,34 @@\n raise SkipException\n \n for c in content:\n- time, service = c.split()\n- if time.endswith('ms'):\n- _time = round(float(time.strip('ms')) / 1000, 5)\n- else:\n- _time = round(float(time.strip('ms')), 5)\n+ cols = c.split()\n+ # Check to make sure that the first character of the first\n+ # entry is a number. This will hopefully exclude any errors\n+ # that are outputted in the file.\n+ if cols[0][0].isdigit():\n+ # The service should be the last column, so just\n+ # remove the last column from the list before looping.\n+ service = cols.pop()\n+ time = 0\n+ for x in cols:\n+ # Convert each column to seconds, and add them up.\n+ if x.endswith('y'):\n+ # Pulled the 31557600 from systemd src.\n+ time += int(x.strip('y')) * 31557600\n+ elif x.endswith('month'):\n+ # Pulled the 2629800 from systemd src.\n+ time += int(x.strip('month')) * 2629800\n+ elif x.endswith('w'):\n+ time += int(x.strip('w')) * 7 * 24 * 60 ** 2\n+ elif x.endswith('d'):\n+ time += int(x.strip('d')) * 24 * 60 ** 2\n+ elif x.endswith('h'):\n+ time += int(x.strip('h')) * 60 ** 2\n+ elif x.endswith('min'):\n+ time += int(x.strip('min')) * 60\n+ elif x.endswith('ms'):\n+ time += float(x.strip('ms')) / 1000\n+ elif x.endswith('s'):\n+ time += float(x.strip('s'))\n \n- self[service] = _time\n+ self[service] = time\n", "issue": "Systemd_analyze parser is raising lots of exceptions in production\nThe SystemdAnalyzeBlame parser is throwing a large number of the exception ValueError('too many values to unpack (expected 2)',) in production.\n", "before_files": [{"content": "\"\"\"\nSystemdAnalyzeBlame - command ``systemd-analyze blame``\n=======================================================\n\nThis module parses the output of command ``systemd-analyze blame``.\n\"\"\"\nfrom insights.specs import Specs\nfrom insights import CommandParser, parser\nfrom insights.parsers import SkipException\n\n\n@parser(Specs.systemd_analyze_blame)\nclass SystemdAnalyzeBlame(CommandParser, dict):\n \"\"\"Parse the output of ``systemd-analyze blame`` as ``dict``. The time to\n initialize is converted into seconds.\n\n Typical output::\n\n 33.080s cloud-init-local.service\n 32.423s unbound-anchor.service\n 2.773s kdump.service\n 1.699s dnf-makecache.service\n 1.304s cloud-init.service\n 1.073s initrd-switch-root.service\n 939ms cloud-config.service\n 872ms tuned.service\n 770ms cloud-final.service\n\n Examples:\n\n >>> 'cloud-init-local.service' in output\n True\n >>> output.get('cloud-init.service', 0)\n 1.304\n\n Returns:\n (dict): With unit-name & time as key-value pair.\n Ex::\n\n {'cloud-config.service': 0.939,\n 'cloud-final.service': 0.77,\n 'cloud-init-local.service': 33.08,\n 'cloud-init.service': 1.304,\n 'dnf-makecache.service': 1.699,\n 'initrd-switch-root.service': 1.073,\n 'kdump.service': 2.773,\n 'tuned.service': 0.872,\n 'unbound-anchor.service': 32.423}\n\n Raises:\n SkipException: If content is not provided.\n \"\"\"\n def parse_content(self, content):\n if not content:\n raise SkipException\n\n for c in content:\n time, service = c.split()\n if time.endswith('ms'):\n _time = round(float(time.strip('ms')) / 1000, 5)\n else:\n _time = round(float(time.strip('ms')), 5)\n\n self[service] = _time\n", "path": "insights/parsers/systemd_analyze.py"}]}
1,223
526
gh_patches_debug_30261
rasdani/github-patches
git_diff
mozilla__pontoon-2490
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Rename "Deadline" As part of https://github.com/mozilla/pontoon/pull/1565, we wrote that "Deadline sounds permanent, threatening, and ugly." Let's replace the word with something else. Maybe "Due date"? Rename "Deadline" As part of https://github.com/mozilla/pontoon/pull/1565, we wrote that "Deadline sounds permanent, threatening, and ugly." Let's replace the word with something else. Maybe "Due date"? </issue> <code> [start of pontoon/projects/management/commands/send_deadline_notifications.py] 1 import datetime 2 3 from django.contrib.auth.models import User 4 from django.core.management.base import BaseCommand 5 from notifications.signals import notify 6 7 from pontoon.base.models import Project 8 9 10 class Command(BaseCommand): 11 help = "Notify contributors about the approaching project deadline" 12 13 def handle(self, *args, **options): 14 """ 15 This command sends deadline reminders to contributors of projects that 16 are due in 7 days. If 2 days before the deadline project still isn't 17 complete for the contributor's locale, notifications are sent again. 18 19 The command is designed to run daily. 20 """ 21 for project in Project.objects.available(): 22 if project.deadline: 23 days_left = (project.deadline - datetime.date.today()).days 24 if days_left not in (2, 7): 25 continue 26 else: 27 continue 28 29 self.stdout.write(f"Sending deadline notifications for project {project}.") 30 31 is_project_public = project.visibility == Project.Visibility.PUBLIC 32 verb = f"due in {days_left} days" 33 locales = [] 34 35 for project_locale in project.project_locale.all(): 36 if project_locale.approved_strings < project_locale.total_strings: 37 locales.append(project_locale.locale) 38 39 contributors = ( 40 User.objects.filter( 41 translation__entity__resource__project=project, 42 translation__locale__in=locales, 43 profile__project_deadline_notifications=True, 44 ).distinct(), 45 ) 46 47 for contributor in contributors: 48 if is_project_public or contributor.is_superuser: 49 notify.send(project, recipient=contributor, verb=verb) 50 51 self.stdout.write(f"Deadline notifications for project {project} sent.") 52 [end of pontoon/projects/management/commands/send_deadline_notifications.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/projects/management/commands/send_deadline_notifications.py b/pontoon/projects/management/commands/send_deadline_notifications.py --- a/pontoon/projects/management/commands/send_deadline_notifications.py +++ b/pontoon/projects/management/commands/send_deadline_notifications.py @@ -8,12 +8,12 @@ class Command(BaseCommand): - help = "Notify contributors about the approaching project deadline" + help = "Notify contributors about the approaching project target date" def handle(self, *args, **options): """ - This command sends deadline reminders to contributors of projects that - are due in 7 days. If 2 days before the deadline project still isn't + This command sends target date reminders to contributors of projects that + are due in 7 days. If 2 days before the target date project still isn't complete for the contributor's locale, notifications are sent again. The command is designed to run daily. @@ -26,7 +26,9 @@ else: continue - self.stdout.write(f"Sending deadline notifications for project {project}.") + self.stdout.write( + f"Sending target date notifications for project {project}." + ) is_project_public = project.visibility == Project.Visibility.PUBLIC verb = f"due in {days_left} days" @@ -48,4 +50,4 @@ if is_project_public or contributor.is_superuser: notify.send(project, recipient=contributor, verb=verb) - self.stdout.write(f"Deadline notifications for project {project} sent.") + self.stdout.write(f"Target date notifications for project {project} sent.")
{"golden_diff": "diff --git a/pontoon/projects/management/commands/send_deadline_notifications.py b/pontoon/projects/management/commands/send_deadline_notifications.py\n--- a/pontoon/projects/management/commands/send_deadline_notifications.py\n+++ b/pontoon/projects/management/commands/send_deadline_notifications.py\n@@ -8,12 +8,12 @@\n \n \n class Command(BaseCommand):\n- help = \"Notify contributors about the approaching project deadline\"\n+ help = \"Notify contributors about the approaching project target date\"\n \n def handle(self, *args, **options):\n \"\"\"\n- This command sends deadline reminders to contributors of projects that\n- are due in 7 days. If 2 days before the deadline project still isn't\n+ This command sends target date reminders to contributors of projects that\n+ are due in 7 days. If 2 days before the target date project still isn't\n complete for the contributor's locale, notifications are sent again.\n \n The command is designed to run daily.\n@@ -26,7 +26,9 @@\n else:\n continue\n \n- self.stdout.write(f\"Sending deadline notifications for project {project}.\")\n+ self.stdout.write(\n+ f\"Sending target date notifications for project {project}.\"\n+ )\n \n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n@@ -48,4 +50,4 @@\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n \n- self.stdout.write(f\"Deadline notifications for project {project} sent.\")\n+ self.stdout.write(f\"Target date notifications for project {project} sent.\")\n", "issue": "Rename \"Deadline\"\nAs part of https://github.com/mozilla/pontoon/pull/1565, we wrote that \"Deadline sounds permanent, threatening, and ugly.\"\r\n\r\nLet's replace the word with something else.\r\n\r\nMaybe \"Due date\"?\nRename \"Deadline\"\nAs part of https://github.com/mozilla/pontoon/pull/1565, we wrote that \"Deadline sounds permanent, threatening, and ugly.\"\r\n\r\nLet's replace the word with something else.\r\n\r\nMaybe \"Due date\"?\n", "before_files": [{"content": "import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom notifications.signals import notify\n\nfrom pontoon.base.models import Project\n\n\nclass Command(BaseCommand):\n help = \"Notify contributors about the approaching project deadline\"\n\n def handle(self, *args, **options):\n \"\"\"\n This command sends deadline reminders to contributors of projects that\n are due in 7 days. If 2 days before the deadline project still isn't\n complete for the contributor's locale, notifications are sent again.\n\n The command is designed to run daily.\n \"\"\"\n for project in Project.objects.available():\n if project.deadline:\n days_left = (project.deadline - datetime.date.today()).days\n if days_left not in (2, 7):\n continue\n else:\n continue\n\n self.stdout.write(f\"Sending deadline notifications for project {project}.\")\n\n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n locales = []\n\n for project_locale in project.project_locale.all():\n if project_locale.approved_strings < project_locale.total_strings:\n locales.append(project_locale.locale)\n\n contributors = (\n User.objects.filter(\n translation__entity__resource__project=project,\n translation__locale__in=locales,\n profile__project_deadline_notifications=True,\n ).distinct(),\n )\n\n for contributor in contributors:\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n\n self.stdout.write(f\"Deadline notifications for project {project} sent.\")\n", "path": "pontoon/projects/management/commands/send_deadline_notifications.py"}]}
1,097
371
gh_patches_debug_563
rasdani/github-patches
git_diff
pex-tool__pex-910
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.5 On the docket: + [x] Kill `Pip.spawn_install_wheel` `overwrite` arg. #907 + [x] Silence pip warnings about Python 2.7. #908 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '2.1.4' 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.4' +__version__ = '2.1.5'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.4'\n+__version__ = '2.1.5'\n", "issue": "Release 2.1.5\nOn the docket:\r\n+ [x] Kill `Pip.spawn_install_wheel` `overwrite` arg. #907\r\n+ [x] Silence pip warnings about Python 2.7. #908\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.4'\n", "path": "pex/version.py"}]}
636
95
gh_patches_debug_31701
rasdani/github-patches
git_diff
searx__searx-1594
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Duden search engine not working anymore They changed the site layout. </issue> <code> [start of searx/engines/duden.py] 1 """ 2 Duden 3 @website https://www.duden.de 4 @provide-api no 5 @using-api no 6 @results HTML (using search portal) 7 @stable no (HTML can change) 8 @parse url, title, content 9 """ 10 11 from lxml import html, etree 12 import re 13 from searx.engines.xpath import extract_text 14 from searx.url_utils import quote 15 from searx import logger 16 17 categories = ['general'] 18 paging = True 19 language_support = False 20 21 # search-url 22 base_url = 'https://www.duden.de/' 23 search_url = base_url + 'suchen/dudenonline/{query}?page={offset}' 24 25 26 def request(query, params): 27 '''pre-request callback 28 params<dict>: 29 method : POST/GET 30 headers : {} 31 data : {} # if method == POST 32 url : '' 33 category: 'search category' 34 pageno : 1 # number of the requested page 35 ''' 36 37 offset = (params['pageno'] - 1) 38 params['url'] = search_url.format(offset=offset, query=quote(query)) 39 return params 40 41 42 def response(resp): 43 '''post-response callback 44 resp: requests response object 45 ''' 46 results = [] 47 48 dom = html.fromstring(resp.text) 49 50 try: 51 number_of_results_string = re.sub('[^0-9]', '', dom.xpath( 52 '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0] 53 ) 54 55 results.append({'number_of_results': int(number_of_results_string)}) 56 57 except: 58 logger.debug("Couldn't read number of results.") 59 pass 60 61 for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'): 62 try: 63 logger.debug("running for %s" % str(result)) 64 link = result.xpath('.//h2/a')[0] 65 url = link.attrib.get('href') 66 title = result.xpath('string(.//h2/a)') 67 content = extract_text(result.xpath('.//p')) 68 # append result 69 results.append({'url': url, 70 'title': title, 71 'content': content}) 72 except: 73 logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True)) 74 continue 75 76 return results 77 [end of searx/engines/duden.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/duden.py b/searx/engines/duden.py --- a/searx/engines/duden.py +++ b/searx/engines/duden.py @@ -11,7 +11,7 @@ from lxml import html, etree import re from searx.engines.xpath import extract_text -from searx.url_utils import quote +from searx.url_utils import quote, urljoin from searx import logger categories = ['general'] @@ -20,7 +20,7 @@ # search-url base_url = 'https://www.duden.de/' -search_url = base_url + 'suchen/dudenonline/{query}?page={offset}' +search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}' def request(query, params): @@ -35,7 +35,11 @@ ''' offset = (params['pageno'] - 1) - params['url'] = search_url.format(offset=offset, query=quote(query)) + if offset == 0: + search_url_fmt = base_url + 'suchen/dudenonline/{query}' + params['url'] = search_url_fmt.format(query=quote(query)) + else: + params['url'] = search_url.format(offset=offset, query=quote(query)) return params @@ -58,12 +62,11 @@ logger.debug("Couldn't read number of results.") pass - for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'): + for result in dom.xpath('//section[not(contains(@class, "essay"))]'): try: - logger.debug("running for %s" % str(result)) - link = result.xpath('.//h2/a')[0] - url = link.attrib.get('href') - title = result.xpath('string(.//h2/a)') + url = result.xpath('.//h2/a')[0].get('href') + url = urljoin(base_url, url) + title = result.xpath('string(.//h2/a)').strip() content = extract_text(result.xpath('.//p')) # append result results.append({'url': url,
{"golden_diff": "diff --git a/searx/engines/duden.py b/searx/engines/duden.py\n--- a/searx/engines/duden.py\n+++ b/searx/engines/duden.py\n@@ -11,7 +11,7 @@\n from lxml import html, etree\n import re\n from searx.engines.xpath import extract_text\n-from searx.url_utils import quote\n+from searx.url_utils import quote, urljoin\n from searx import logger\n \n categories = ['general']\n@@ -20,7 +20,7 @@\n \n # search-url\n base_url = 'https://www.duden.de/'\n-search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'\n+search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'\n \n \n def request(query, params):\n@@ -35,7 +35,11 @@\n '''\n \n offset = (params['pageno'] - 1)\n- params['url'] = search_url.format(offset=offset, query=quote(query))\n+ if offset == 0:\n+ search_url_fmt = base_url + 'suchen/dudenonline/{query}'\n+ params['url'] = search_url_fmt.format(query=quote(query))\n+ else:\n+ params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n \n \n@@ -58,12 +62,11 @@\n logger.debug(\"Couldn't read number of results.\")\n pass\n \n- for result in dom.xpath('//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'):\n+ for result in dom.xpath('//section[not(contains(@class, \"essay\"))]'):\n try:\n- logger.debug(\"running for %s\" % str(result))\n- link = result.xpath('.//h2/a')[0]\n- url = link.attrib.get('href')\n- title = result.xpath('string(.//h2/a)')\n+ url = result.xpath('.//h2/a')[0].get('href')\n+ url = urljoin(base_url, url)\n+ title = result.xpath('string(.//h2/a)').strip()\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n", "issue": "Duden search engine not working anymore\nThey changed the site layout.\n", "before_files": [{"content": "\"\"\"\n Duden\n @website https://www.duden.de\n @provide-api no\n @using-api no\n @results HTML (using search portal)\n @stable no (HTML can change)\n @parse url, title, content\n\"\"\"\n\nfrom lxml import html, etree\nimport re\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import quote\nfrom searx import logger\n\ncategories = ['general']\npaging = True\nlanguage_support = False\n\n# search-url\nbase_url = 'https://www.duden.de/'\nsearch_url = base_url + 'suchen/dudenonline/{query}?page={offset}'\n\n\ndef request(query, params):\n '''pre-request callback\n params<dict>:\n method : POST/GET\n headers : {}\n data : {} # if method == POST\n url : ''\n category: 'search category'\n pageno : 1 # number of the requested page\n '''\n\n offset = (params['pageno'] - 1)\n params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n\n\ndef response(resp):\n '''post-response callback\n resp: requests response object\n '''\n results = []\n\n dom = html.fromstring(resp.text)\n\n try:\n number_of_results_string = re.sub('[^0-9]', '', dom.xpath(\n '//a[@class=\"active\" and contains(@href,\"/suchen/dudenonline\")]/span/text()')[0]\n )\n\n results.append({'number_of_results': int(number_of_results_string)})\n\n except:\n logger.debug(\"Couldn't read number of results.\")\n pass\n\n for result in dom.xpath('//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'):\n try:\n logger.debug(\"running for %s\" % str(result))\n link = result.xpath('.//h2/a')[0]\n url = link.attrib.get('href')\n title = result.xpath('string(.//h2/a)')\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content})\n except:\n logger.debug('result parse error in:\\n%s', etree.tostring(result, pretty_print=True))\n continue\n\n return results\n", "path": "searx/engines/duden.py"}]}
1,223
512
gh_patches_debug_19671
rasdani/github-patches
git_diff
kartoza__prj.app-508
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Course name Currently the course name is quite long, i.e. XProject_QGIS Introduction_2017-07-05-2017-07-19 Would it be better if we can have a shorter course name, i.e. QGIS Introduction 101? What do you think @timlinux ? </issue> <code> [start of django_project/certification/models/certifying_organisation.py] 1 # coding=utf-8 2 """Certifying organisation model definitions for certification apps. 3 4 """ 5 6 import os 7 from django.conf.global_settings import MEDIA_ROOT 8 from django.core.urlresolvers import reverse 9 from django.core.exceptions import ValidationError 10 from django.core.validators import validate_email 11 from django.db import models 12 from django.utils.text import slugify 13 from django.utils.translation import ugettext_lazy as _ 14 from core.settings.contrib import STOP_WORDS 15 from unidecode import unidecode 16 from django.contrib.auth.models import User 17 from django_countries.fields import CountryField 18 import logging 19 20 logger = logging.getLogger(__name__) 21 22 23 class SlugifyingMixin(object): 24 25 class Meta: 26 abstract = True 27 28 def save(self, *args, **kwargs): 29 if not self.pk: 30 words = self.name.split() 31 filtered_words = [word for word in words if 32 word.lower() not in STOP_WORDS] 33 # unidecode() represents special characters (unicode data) in ASCII 34 new_list = unidecode(' '.join(filtered_words)) 35 self.slug = slugify(new_list)[:50] 36 super(SlugifyingMixin, self).save(*args, **kwargs) 37 38 39 class ApprovedCertifyingOrganisationManager(models.Manager): 40 """Custom training centre manager. 41 42 Shows only approved certifying organisation. 43 """ 44 45 def get_queryset(self): 46 """Query set generator. """ 47 48 return super( 49 ApprovedCertifyingOrganisationManager, self).get_queryset().filter( 50 approved=True) 51 52 53 class UnapprovedCertifyingOrganisationManager(models.Manager): 54 """Custom training centre manager. 55 56 Shows only unapproved certifying organisation. 57 """ 58 59 def get_queryset(self): 60 """Query set generator. """ 61 62 return super( 63 UnapprovedCertifyingOrganisationManager, self).get_queryset( 64 ).filter(approved=False) 65 66 67 def validate_email_address(value): 68 try: 69 validate_email(value) 70 return True 71 except ValidationError( 72 _('%(value)s is not a valid email address'), 73 params={'value': value},): 74 return False 75 76 77 class CertifyingOrganisation(SlugifyingMixin, models.Model): 78 """Certifying organisation model.""" 79 80 name = models.CharField( 81 help_text=_('name of organisation or institution'), 82 max_length=200, 83 null=False, 84 blank=False 85 ) 86 87 organisation_email = models.CharField( 88 help_text=_('Email address organisation or institution.'), 89 max_length=200, 90 null=False, 91 blank=False, 92 validators=[validate_email_address], 93 ) 94 95 address = models.TextField( 96 help_text=_('Address of Organisation or Institution.'), 97 max_length=1000, 98 null=False, 99 blank=False 100 ) 101 102 logo = models.ImageField( 103 help_text=_('Logo for this organisation. ' 104 'Most browsers support dragging the image directly on to ' 105 'the "Choose File" button above.'), 106 upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'), 107 blank=True 108 ) 109 110 country = CountryField( 111 help_text=_('Select the country for this Institution'), 112 null=True, 113 blank=True) 114 115 organisation_phone = models.CharField( 116 help_text=_('Phone number: (country code)(number) e.g. +6221551553'), 117 max_length=200, 118 null=False, 119 blank=False 120 ) 121 122 approved = models.BooleanField( 123 help_text=_('Approval from project admin'), 124 default=False 125 ) 126 127 enabled = models.BooleanField( 128 help_text=_('Project enabled'), 129 default=True 130 ) 131 132 slug = models.SlugField() 133 organisation_owners = models.ManyToManyField(User) 134 project = models.ForeignKey('base.Project') 135 objects = models.Manager() 136 approved_objects = ApprovedCertifyingOrganisationManager() 137 unapproved_objects = UnapprovedCertifyingOrganisationManager() 138 139 # noinspection PyClassicStyleClass. 140 class Meta: 141 """Meta class for Course attendee.""" 142 143 app_label = 'certification' 144 ordering = ['name'] 145 unique_together = ['name', 'project'] 146 147 def save(self, *args, **kwargs): 148 super(CertifyingOrganisation, self).save(*args, **kwargs) 149 150 def __unicode__(self): 151 return '%s - %s' % (self.project.name, self.name) 152 153 def get_absolute_url(self): 154 """Return URL to certifying organisation detail page. 155 156 :return: URL 157 :rtype: str 158 """ 159 return reverse('certifying-organisation-detail', kwargs={ 160 'slug': self.slug, 161 'project_slug': self.project.slug 162 }) 163 [end of django_project/certification/models/certifying_organisation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django_project/certification/models/certifying_organisation.py b/django_project/certification/models/certifying_organisation.py --- a/django_project/certification/models/certifying_organisation.py +++ b/django_project/certification/models/certifying_organisation.py @@ -74,7 +74,7 @@ return False -class CertifyingOrganisation(SlugifyingMixin, models.Model): +class CertifyingOrganisation(models.Model): """Certifying organisation model.""" name = models.CharField( @@ -145,6 +145,15 @@ unique_together = ['name', 'project'] def save(self, *args, **kwargs): + if not self.pk: + words = self.name.split() + filtered_words = [word for word in words if + word.lower() not in STOP_WORDS] + # unidecode() represents special characters (unicode data) in ASCII + new_list = \ + self.project.slug + ' ' + \ + unidecode(' '.join(filtered_words)) + self.slug = slugify(new_list)[:50] super(CertifyingOrganisation, self).save(*args, **kwargs) def __unicode__(self):
{"golden_diff": "diff --git a/django_project/certification/models/certifying_organisation.py b/django_project/certification/models/certifying_organisation.py\n--- a/django_project/certification/models/certifying_organisation.py\n+++ b/django_project/certification/models/certifying_organisation.py\n@@ -74,7 +74,7 @@\n return False\n \n \n-class CertifyingOrganisation(SlugifyingMixin, models.Model):\n+class CertifyingOrganisation(models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n \n name = models.CharField(\n@@ -145,6 +145,15 @@\n unique_together = ['name', 'project']\n \n def save(self, *args, **kwargs):\n+ if not self.pk:\n+ words = self.name.split()\n+ filtered_words = [word for word in words if\n+ word.lower() not in STOP_WORDS]\n+ # unidecode() represents special characters (unicode data) in ASCII\n+ new_list = \\\n+ self.project.slug + ' ' + \\\n+ unidecode(' '.join(filtered_words))\n+ self.slug = slugify(new_list)[:50]\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n \n def __unicode__(self):\n", "issue": "Course name\nCurrently the course name is quite long, i.e. XProject_QGIS Introduction_2017-07-05-2017-07-19 \n\nWould it be better if we can have a shorter course name, i.e. QGIS Introduction 101?\nWhat do you think @timlinux ?\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Certifying organisation model definitions for certification apps.\n\n\"\"\"\n\nimport os\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom core.settings.contrib import STOP_WORDS\nfrom unidecode import unidecode\nfrom django.contrib.auth.models import User\nfrom django_countries.fields import CountryField\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlugifyingMixin(object):\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [word for word in words if\n word.lower() not in STOP_WORDS]\n # unidecode() represents special characters (unicode data) in ASCII\n new_list = unidecode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(SlugifyingMixin, self).save(*args, **kwargs)\n\n\nclass ApprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only approved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n ApprovedCertifyingOrganisationManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only unapproved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n UnapprovedCertifyingOrganisationManager, self).get_queryset(\n ).filter(approved=False)\n\n\ndef validate_email_address(value):\n try:\n validate_email(value)\n return True\n except ValidationError(\n _('%(value)s is not a valid email address'),\n params={'value': value},):\n return False\n\n\nclass CertifyingOrganisation(SlugifyingMixin, models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n\n name = models.CharField(\n help_text=_('name of organisation or institution'),\n max_length=200,\n null=False,\n blank=False\n )\n\n organisation_email = models.CharField(\n help_text=_('Email address organisation or institution.'),\n max_length=200,\n null=False,\n blank=False,\n validators=[validate_email_address],\n )\n\n address = models.TextField(\n help_text=_('Address of Organisation or Institution.'),\n max_length=1000,\n null=False,\n blank=False\n )\n\n logo = models.ImageField(\n help_text=_('Logo for this organisation. '\n 'Most browsers support dragging the image directly on to '\n 'the \"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'),\n blank=True\n )\n\n country = CountryField(\n help_text=_('Select the country for this Institution'),\n null=True,\n blank=True)\n\n organisation_phone = models.CharField(\n help_text=_('Phone number: (country code)(number) e.g. +6221551553'),\n max_length=200,\n null=False,\n blank=False\n )\n\n approved = models.BooleanField(\n help_text=_('Approval from project admin'),\n default=False\n )\n\n enabled = models.BooleanField(\n help_text=_('Project enabled'),\n default=True\n )\n\n slug = models.SlugField()\n organisation_owners = models.ManyToManyField(User)\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedCertifyingOrganisationManager()\n unapproved_objects = UnapprovedCertifyingOrganisationManager()\n\n # noinspection PyClassicStyleClass.\n class Meta:\n \"\"\"Meta class for Course attendee.\"\"\"\n\n app_label = 'certification'\n ordering = ['name']\n unique_together = ['name', 'project']\n\n def save(self, *args, **kwargs):\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return '%s - %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n \"\"\"Return URL to certifying organisation detail page.\n\n :return: URL\n :rtype: str\n \"\"\"\n return reverse('certifying-organisation-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n", "path": "django_project/certification/models/certifying_organisation.py"}]}
1,981
276
gh_patches_debug_14062
rasdani/github-patches
git_diff
OCA__manufacture-130
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> is:issue is:open [8.0][mrp_production_real_cost] Error when produce product Hi, there's new error from mrp_production_real_cost, after I do git pull from the last commit ``` ERROR demo1 openerp.sql_db: Programming error: can't adapt type 'mrp.production', in query SELECT "mrp_production"."id" FROM "mrp_production" WHERE "mrp_production".id IN %s ORDER BY "mrp_production"."priority" DESC,"mrp_production"."date_planned" ASC File "/opt/odoo/server/addons/mrp_production_real_cost/models/mrp_production.py", line 34, in action_production_end self.mapped('move_created_ids2').filtered( File "/usr/lib/python2.7/dist-packages/psycopg2/extensions.py", line 129, in getquoted pobjs = [adapt(o) for o in self._seq] ValueError: "can't adapt type 'mrp.production'" while evaluating u'action_production_end()' ``` regards </issue> <code> [start of mrp_production_real_cost/models/mrp_production.py] 1 # -*- coding: utf-8 -*- 2 # © 2014-2015 Avanzosc 3 # © 2014-2015 Pedro M. Baeza 4 # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html 5 6 from openerp import api, fields, models 7 8 9 class MrpProduction(models.Model): 10 _inherit = 'mrp.production' 11 12 @api.multi 13 @api.depends('analytic_line_ids', 'analytic_line_ids.amount', 14 'product_qty') 15 def _compute_real_cost(self): 16 for production in self: 17 cost_lines = production.analytic_line_ids.filtered( 18 lambda l: l.amount < 0) 19 production.real_cost = -sum(cost_lines.mapped('amount')) 20 production.unit_real_cost = ( 21 production.real_cost / production.product_qty) 22 23 analytic_line_ids = fields.One2many( 24 comodel_name="account.analytic.line", inverse_name="mrp_production_id", 25 string="Cost Lines") 26 real_cost = fields.Float( 27 "Total Real Cost", compute="_compute_real_cost", store=True) 28 unit_real_cost = fields.Float( 29 "Unit Real Cost", compute="_compute_real_cost", store=True) 30 31 @api.multi 32 def action_production_end(self): 33 res = super(MrpProduction, self).action_production_end() 34 self.mapped('move_created_ids2').filtered( 35 lambda l: l.state == 'done').product_price_update_production_done() 36 return res 37 38 @api.model 39 def _prepare_real_cost_analytic_line( 40 self, journal, name, production, product, general_account=None, 41 workorder=None, qty=1, amount=0): 42 """ 43 Prepare the vals for creating an analytic entry for real cost 44 :param journal: Journal of the entry 45 :param name: Name of the entry 46 :param production: Origin product 47 :param product: Product for the entry 48 :param general_account: General account for the entry 49 :param workorder: Origin workorder 50 :param qty: Quantity for the entry. This quantity will multiply both 51 standard and average costs for the entry costs. 52 :param amount: Cost for calculating real cost. 53 :return: Dictionary with the analytic entry vals. 54 """ 55 analytic_line_obj = self.env['account.analytic.line'] 56 property_obj = self.env['ir.property'] 57 general_account = ( 58 general_account or product.property_account_expense or 59 product.categ_id.property_account_expense_categ or 60 property_obj.get('property_account_expense_categ', 61 'product.category')) 62 return { 63 'name': name, 64 'mrp_production_id': production.id, 65 'workorder': workorder and workorder.id or False, 66 'account_id': self.analytic_account_id.id, 67 'journal_id': journal.id, 68 'user_id': self.env.uid, 69 'date': analytic_line_obj._get_default_date(), 70 'product_id': product and product.id or False, 71 'unit_amount': qty, 72 'amount': amount, 73 'product_uom_id': product.uom_id.id, 74 'general_account_id': general_account.id, 75 } 76 77 @api.multi 78 def _costs_generate(self): 79 """ 80 As we are generating the account_analytic_lines for MO in the 81 current module, we override this method in order to avoid 82 duplicates created in the parent class. Any other module 83 inheriting this method should take this into account! 84 """ 85 return 86 [end of mrp_production_real_cost/models/mrp_production.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mrp_production_real_cost/models/mrp_production.py b/mrp_production_real_cost/models/mrp_production.py --- a/mrp_production_real_cost/models/mrp_production.py +++ b/mrp_production_real_cost/models/mrp_production.py @@ -31,8 +31,15 @@ @api.multi def action_production_end(self): res = super(MrpProduction, self).action_production_end() - self.mapped('move_created_ids2').filtered( - lambda l: l.state == 'done').product_price_update_production_done() + for production in self: + # This is needed because commit + # https://github.com/odoo/odoo/commit/ + # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9 + # introduces a weird behavior on the next call, provoking an error. + production.sudo().refresh() + production.mapped('move_created_ids2').filtered( + lambda l: l.state == 'done' + ).product_price_update_production_done() return res @api.model
{"golden_diff": "diff --git a/mrp_production_real_cost/models/mrp_production.py b/mrp_production_real_cost/models/mrp_production.py\n--- a/mrp_production_real_cost/models/mrp_production.py\n+++ b/mrp_production_real_cost/models/mrp_production.py\n@@ -31,8 +31,15 @@\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n- self.mapped('move_created_ids2').filtered(\n- lambda l: l.state == 'done').product_price_update_production_done()\n+ for production in self:\n+ # This is needed because commit\n+ # https://github.com/odoo/odoo/commit/\n+ # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9\n+ # introduces a weird behavior on the next call, provoking an error.\n+ production.sudo().refresh()\n+ production.mapped('move_created_ids2').filtered(\n+ lambda l: l.state == 'done'\n+ ).product_price_update_production_done()\n return res\n \n @api.model\n", "issue": "is:issue is:open [8.0][mrp_production_real_cost] Error when produce product\nHi,\n\nthere's new error from mrp_production_real_cost, after I do git pull from the last commit \n\n```\n\nERROR demo1 openerp.sql_db: Programming error: can't adapt type 'mrp.production', in query SELECT \"mrp_production\".\"id\" FROM \"mrp_production\"\n WHERE \"mrp_production\".id IN %s ORDER BY \"mrp_production\".\"priority\" DESC,\"mrp_production\".\"date_planned\" ASC \n\n File \"/opt/odoo/server/addons/mrp_production_real_cost/models/mrp_production.py\", line 34, in action_production_end\n self.mapped('move_created_ids2').filtered(\n\n\n File \"/usr/lib/python2.7/dist-packages/psycopg2/extensions.py\", line 129, in getquoted\n pobjs = [adapt(o) for o in self._seq]\nValueError: \"can't adapt type 'mrp.production'\" while evaluating\nu'action_production_end()'\n```\n\nregards\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2014-2015 Avanzosc\n# \u00a9 2014-2015 Pedro M. Baeza\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\n\nfrom openerp import api, fields, models\n\n\nclass MrpProduction(models.Model):\n _inherit = 'mrp.production'\n\n @api.multi\n @api.depends('analytic_line_ids', 'analytic_line_ids.amount',\n 'product_qty')\n def _compute_real_cost(self):\n for production in self:\n cost_lines = production.analytic_line_ids.filtered(\n lambda l: l.amount < 0)\n production.real_cost = -sum(cost_lines.mapped('amount'))\n production.unit_real_cost = (\n production.real_cost / production.product_qty)\n\n analytic_line_ids = fields.One2many(\n comodel_name=\"account.analytic.line\", inverse_name=\"mrp_production_id\",\n string=\"Cost Lines\")\n real_cost = fields.Float(\n \"Total Real Cost\", compute=\"_compute_real_cost\", store=True)\n unit_real_cost = fields.Float(\n \"Unit Real Cost\", compute=\"_compute_real_cost\", store=True)\n\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n self.mapped('move_created_ids2').filtered(\n lambda l: l.state == 'done').product_price_update_production_done()\n return res\n\n @api.model\n def _prepare_real_cost_analytic_line(\n self, journal, name, production, product, general_account=None,\n workorder=None, qty=1, amount=0):\n \"\"\"\n Prepare the vals for creating an analytic entry for real cost\n :param journal: Journal of the entry\n :param name: Name of the entry\n :param production: Origin product\n :param product: Product for the entry\n :param general_account: General account for the entry\n :param workorder: Origin workorder\n :param qty: Quantity for the entry. This quantity will multiply both\n standard and average costs for the entry costs.\n :param amount: Cost for calculating real cost.\n :return: Dictionary with the analytic entry vals.\n \"\"\"\n analytic_line_obj = self.env['account.analytic.line']\n property_obj = self.env['ir.property']\n general_account = (\n general_account or product.property_account_expense or\n product.categ_id.property_account_expense_categ or\n property_obj.get('property_account_expense_categ',\n 'product.category'))\n return {\n 'name': name,\n 'mrp_production_id': production.id,\n 'workorder': workorder and workorder.id or False,\n 'account_id': self.analytic_account_id.id,\n 'journal_id': journal.id,\n 'user_id': self.env.uid,\n 'date': analytic_line_obj._get_default_date(),\n 'product_id': product and product.id or False,\n 'unit_amount': qty,\n 'amount': amount,\n 'product_uom_id': product.uom_id.id,\n 'general_account_id': general_account.id,\n }\n\n @api.multi\n def _costs_generate(self):\n \"\"\"\n As we are generating the account_analytic_lines for MO in the\n current module, we override this method in order to avoid\n duplicates created in the parent class. Any other module\n inheriting this method should take this into account!\n \"\"\"\n return\n", "path": "mrp_production_real_cost/models/mrp_production.py"}]}
1,708
264
gh_patches_debug_4786
rasdani/github-patches
git_diff
jazzband__pip-tools-314
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip-compile looses `via` with pip 8 ``` $ echo jinja2 > req $ pip-compile --version pip-compile, version 1.5 $ pip --version pip 7.1.2 from <snip>lib/python2.7/site-packages (python 2.7) pip-compile req # # This file is autogenerated by pip-compile # Make changes in req, then run this to update: # # pip-compile req # jinja2==2.8 markupsafe==0.23 # via jinja2 $ pip install -U pip <snip> $ pip --version pip 8.0.2 from <snip>lib/python2.7/site-packages (python 2.7) pip-compile req # # This file is autogenerated by pip-compile # Make changes in req, then run this to update: # # pip-compile req # jinja2==2.8 MarkupSafe==0.23 ``` note the missing `via jinja2` for pip 8 </issue> <code> [start of piptools/writer.py] 1 import os 2 from os.path import basename 3 4 from ._compat import ExitStack 5 from .click import unstyle 6 from .io import AtomicSaver 7 from .logging import log 8 from .utils import comment, format_requirement 9 10 11 class OutputWriter(object): 12 def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate, 13 default_index_url, index_urls): 14 self.src_file = src_file 15 self.dst_file = dst_file 16 self.dry_run = dry_run 17 self.emit_header = emit_header 18 self.emit_index = emit_index 19 self.annotate = annotate 20 self.default_index_url = default_index_url 21 self.index_urls = index_urls 22 23 def _sort_key(self, ireq): 24 return (not ireq.editable, str(ireq.req).lower()) 25 26 def write_header(self): 27 if self.emit_header: 28 yield comment('#') 29 yield comment('# This file is autogenerated by pip-compile') 30 yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file))) 31 yield comment('#') 32 args = '' 33 if not self.emit_index: 34 args += '--no-index ' 35 if not self.annotate: 36 args += '--no-annotate ' 37 yield comment('# pip-compile {args}{filename}'.format( 38 args=args, 39 filename=basename(self.src_file))) 40 yield comment('#') 41 42 def write_index_options(self): 43 if self.emit_index: 44 emitted = False 45 for index, index_url in enumerate(self.index_urls): 46 if index_url.rstrip('/') == self.default_index_url: 47 continue 48 flag = '--index-url' if index == 0 else '--extra-index-url' 49 yield '{} {}'.format(flag, index_url) 50 emitted = True 51 if emitted: 52 yield '' # extra line of whitespace 53 54 def _iter_lines(self, results, reverse_dependencies, primary_packages): 55 for line in self.write_header(): 56 yield line 57 for line in self.write_index_options(): 58 yield line 59 60 UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'} 61 unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES} 62 packages = {r for r in results if r.name not in UNSAFE_PACKAGES} 63 64 packages = sorted(packages, key=self._sort_key) 65 unsafe_packages = sorted(unsafe_packages, key=self._sort_key) 66 67 for ireq in packages: 68 line = self._format_requirement(ireq, reverse_dependencies, primary_packages) 69 yield line 70 71 if unsafe_packages: 72 yield '' 73 yield comment('# The following packages are commented out because they are') 74 yield comment('# considered to be unsafe in a requirements file:') 75 76 for ireq in unsafe_packages: 77 line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False) 78 yield comment('# ' + line) 79 80 def write(self, results, reverse_dependencies, primary_packages): 81 with ExitStack() as stack: 82 f = None 83 if not self.dry_run: 84 f = stack.enter_context(AtomicSaver(self.dst_file)) 85 86 for line in self._iter_lines(results, reverse_dependencies, primary_packages): 87 log.info(line) 88 if f: 89 f.write(unstyle(line).encode('utf-8')) 90 f.write(os.linesep.encode('utf-8')) 91 92 def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True): 93 line = format_requirement(ireq, include_specifier=include_specifier) 94 if not self.annotate or ireq.name in primary_packages: 95 return line 96 97 # Annotate what packages this package is required by 98 required_by = reverse_dependencies.get(ireq.name, []) 99 if required_by: 100 line = line.ljust(24) 101 annotation = ', '.join(sorted(required_by)) 102 line += comment(' # via ' + annotation) 103 return line 104 [end of piptools/writer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/piptools/writer.py b/piptools/writer.py --- a/piptools/writer.py +++ b/piptools/writer.py @@ -95,7 +95,7 @@ return line # Annotate what packages this package is required by - required_by = reverse_dependencies.get(ireq.name, []) + required_by = reverse_dependencies.get(ireq.name.lower(), []) if required_by: line = line.ljust(24) annotation = ', '.join(sorted(required_by))
{"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -95,7 +95,7 @@\n return line\n \n # Annotate what packages this package is required by\n- required_by = reverse_dependencies.get(ireq.name, [])\n+ required_by = reverse_dependencies.get(ireq.name.lower(), [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n", "issue": "pip-compile looses `via` with pip 8\n```\n$ echo jinja2 > req\n$ pip-compile --version\npip-compile, version 1.5\n$ pip --version\npip 7.1.2 from <snip>lib/python2.7/site-packages (python 2.7)\n\npip-compile req\n#\n# This file is autogenerated by pip-compile\n# Make changes in req, then run this to update:\n#\n# pip-compile req\n#\n\njinja2==2.8\nmarkupsafe==0.23 # via jinja2\n\n$ pip install -U pip\n<snip>\n\n$ pip --version\npip 8.0.2 from <snip>lib/python2.7/site-packages (python 2.7)\n\npip-compile req\n#\n# This file is autogenerated by pip-compile\n# Make changes in req, then run this to update:\n#\n# pip-compile req\n#\n\njinja2==2.8\nMarkupSafe==0.23\n```\n\nnote the missing `via jinja2` for pip 8\n\n", "before_files": [{"content": "import os\nfrom os.path import basename\n\nfrom ._compat import ExitStack\nfrom .click import unstyle\nfrom .io import AtomicSaver\nfrom .logging import log\nfrom .utils import comment, format_requirement\n\n\nclass OutputWriter(object):\n def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate,\n default_index_url, index_urls):\n self.src_file = src_file\n self.dst_file = dst_file\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index = emit_index\n self.annotate = annotate\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n\n def _sort_key(self, ireq):\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self):\n if self.emit_header:\n yield comment('#')\n yield comment('# This file is autogenerated by pip-compile')\n yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file)))\n yield comment('#')\n args = ''\n if not self.emit_index:\n args += '--no-index '\n if not self.annotate:\n args += '--no-annotate '\n yield comment('# pip-compile {args}{filename}'.format(\n args=args,\n filename=basename(self.src_file)))\n yield comment('#')\n\n def write_index_options(self):\n if self.emit_index:\n emitted = False\n for index, index_url in enumerate(self.index_urls):\n if index_url.rstrip('/') == self.default_index_url:\n continue\n flag = '--index-url' if index == 0 else '--extra-index-url'\n yield '{} {}'.format(flag, index_url)\n emitted = True\n if emitted:\n yield '' # extra line of whitespace\n\n def _iter_lines(self, results, reverse_dependencies, primary_packages):\n for line in self.write_header():\n yield line\n for line in self.write_index_options():\n yield line\n\n UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'}\n unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES}\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n packages = sorted(packages, key=self._sort_key)\n unsafe_packages = sorted(unsafe_packages, key=self._sort_key)\n\n for ireq in packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages)\n yield line\n\n if unsafe_packages:\n yield ''\n yield comment('# The following packages are commented out because they are')\n yield comment('# considered to be unsafe in a requirements file:')\n\n for ireq in unsafe_packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False)\n yield comment('# ' + line)\n\n def write(self, results, reverse_dependencies, primary_packages):\n with ExitStack() as stack:\n f = None\n if not self.dry_run:\n f = stack.enter_context(AtomicSaver(self.dst_file))\n\n for line in self._iter_lines(results, reverse_dependencies, primary_packages):\n log.info(line)\n if f:\n f.write(unstyle(line).encode('utf-8'))\n f.write(os.linesep.encode('utf-8'))\n\n def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True):\n line = format_requirement(ireq, include_specifier=include_specifier)\n if not self.annotate or ireq.name in primary_packages:\n return line\n\n # Annotate what packages this package is required by\n required_by = reverse_dependencies.get(ireq.name, [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n line += comment(' # via ' + annotation)\n return line\n", "path": "piptools/writer.py"}]}
1,837
120
gh_patches_debug_18879
rasdani/github-patches
git_diff
netbox-community__netbox-8292
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Circuits list view to display formatted commit rate ### NetBox version 3.1.2 ### Feature type Change to existing functionality ### Proposed functionality The current circuit list view (/circuits/circuits/) has a column called "Commit Rate (kbps) and shows the rate in kbps i.e. 1000000 However when looking at the circuit details, the commit rate is translated into something more human readable i.e 1 Gbps Proposing either changing the existing Commit Rate (kbps) column to also translate the commit rate or the addition of an extra column simply called Commit Rate that has the human readable version. ### Use case Easier for non-technical users to quickly see the commit rate of a circuit in the table view. Brings more parity to the circuit details view. ### Database changes _No response_ ### External dependencies _No response_ </issue> <code> [start of netbox/circuits/tables.py] 1 import django_tables2 as tables 2 from django_tables2.utils import Accessor 3 4 from tenancy.tables import TenantColumn 5 from utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn 6 from .models import * 7 8 9 __all__ = ( 10 'CircuitTable', 11 'CircuitTypeTable', 12 'ProviderTable', 13 'ProviderNetworkTable', 14 ) 15 16 17 CIRCUITTERMINATION_LINK = """ 18 {% if value.site %} 19 <a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a> 20 {% elif value.provider_network %} 21 <a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a> 22 {% endif %} 23 """ 24 25 26 # 27 # Providers 28 # 29 30 class ProviderTable(BaseTable): 31 pk = ToggleColumn() 32 name = tables.Column( 33 linkify=True 34 ) 35 circuit_count = tables.Column( 36 accessor=Accessor('count_circuits'), 37 verbose_name='Circuits' 38 ) 39 comments = MarkdownColumn() 40 tags = TagColumn( 41 url_name='circuits:provider_list' 42 ) 43 44 class Meta(BaseTable.Meta): 45 model = Provider 46 fields = ( 47 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count', 48 'comments', 'tags', 49 ) 50 default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count') 51 52 53 # 54 # Provider networks 55 # 56 57 class ProviderNetworkTable(BaseTable): 58 pk = ToggleColumn() 59 name = tables.Column( 60 linkify=True 61 ) 62 provider = tables.Column( 63 linkify=True 64 ) 65 comments = MarkdownColumn() 66 tags = TagColumn( 67 url_name='circuits:providernetwork_list' 68 ) 69 70 class Meta(BaseTable.Meta): 71 model = ProviderNetwork 72 fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags') 73 default_columns = ('pk', 'name', 'provider', 'description') 74 75 76 # 77 # Circuit types 78 # 79 80 class CircuitTypeTable(BaseTable): 81 pk = ToggleColumn() 82 name = tables.Column( 83 linkify=True 84 ) 85 tags = TagColumn( 86 url_name='circuits:circuittype_list' 87 ) 88 circuit_count = tables.Column( 89 verbose_name='Circuits' 90 ) 91 actions = ButtonsColumn(CircuitType) 92 93 class Meta(BaseTable.Meta): 94 model = CircuitType 95 fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions') 96 default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions') 97 98 99 # 100 # Circuits 101 # 102 103 class CircuitTable(BaseTable): 104 pk = ToggleColumn() 105 cid = tables.Column( 106 linkify=True, 107 verbose_name='Circuit ID' 108 ) 109 provider = tables.Column( 110 linkify=True 111 ) 112 status = ChoiceFieldColumn() 113 tenant = TenantColumn() 114 termination_a = tables.TemplateColumn( 115 template_code=CIRCUITTERMINATION_LINK, 116 verbose_name='Side A' 117 ) 118 termination_z = tables.TemplateColumn( 119 template_code=CIRCUITTERMINATION_LINK, 120 verbose_name='Side Z' 121 ) 122 comments = MarkdownColumn() 123 tags = TagColumn( 124 url_name='circuits:circuit_list' 125 ) 126 127 class Meta(BaseTable.Meta): 128 model = Circuit 129 fields = ( 130 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date', 131 'commit_rate', 'description', 'comments', 'tags', 132 ) 133 default_columns = ( 134 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description', 135 ) 136 [end of netbox/circuits/tables.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/circuits/tables.py b/netbox/circuits/tables.py --- a/netbox/circuits/tables.py +++ b/netbox/circuits/tables.py @@ -22,11 +22,32 @@ {% endif %} """ +# +# Table columns +# + + +class CommitRateColumn(tables.TemplateColumn): + """ + Humanize the commit rate in the column view + """ + + template_code = """ + {% load helpers %} + {{ record.commit_rate|humanize_speed }} + """ + + def __init__(self, *args, **kwargs): + super().__init__(template_code=self.template_code, *args, **kwargs) + + def value(self, value): + return str(value) if value else None # # Providers # + class ProviderTable(BaseTable): pk = ToggleColumn() name = tables.Column( @@ -119,6 +140,7 @@ template_code=CIRCUITTERMINATION_LINK, verbose_name='Side Z' ) + commit_rate = CommitRateColumn() comments = MarkdownColumn() tags = TagColumn( url_name='circuits:circuit_list'
{"golden_diff": "diff --git a/netbox/circuits/tables.py b/netbox/circuits/tables.py\n--- a/netbox/circuits/tables.py\n+++ b/netbox/circuits/tables.py\n@@ -22,11 +22,32 @@\n {% endif %}\n \"\"\"\n \n+#\n+# Table columns\n+#\n+\n+\n+class CommitRateColumn(tables.TemplateColumn):\n+ \"\"\"\n+ Humanize the commit rate in the column view\n+ \"\"\"\n+\n+ template_code = \"\"\"\n+ {% load helpers %}\n+ {{ record.commit_rate|humanize_speed }}\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(template_code=self.template_code, *args, **kwargs)\n+\n+ def value(self, value):\n+ return str(value) if value else None\n \n #\n # Providers\n #\n \n+\n class ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n@@ -119,6 +140,7 @@\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n+ commit_rate = CommitRateColumn()\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n", "issue": "Circuits list view to display formatted commit rate\n### NetBox version\n\n3.1.2\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nThe current circuit list view (/circuits/circuits/) has a column called \"Commit Rate (kbps) and shows the rate in kbps i.e. 1000000\r\n\r\nHowever when looking at the circuit details, the commit rate is translated into something more human readable i.e 1 Gbps\r\n\r\nProposing either changing the existing Commit Rate (kbps) column to also translate the commit rate or the addition of an extra column simply called Commit Rate that has the human readable version.\n\n### Use case\n\nEasier for non-technical users to quickly see the commit rate of a circuit in the table view. Brings more parity to the circuit details view.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom tenancy.tables import TenantColumn\nfrom utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn\nfrom .models import *\n\n\n__all__ = (\n 'CircuitTable',\n 'CircuitTypeTable',\n 'ProviderTable',\n 'ProviderNetworkTable',\n)\n\n\nCIRCUITTERMINATION_LINK = \"\"\"\n{% if value.site %}\n <a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n{% elif value.provider_network %}\n <a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n{% endif %}\n\"\"\"\n\n\n#\n# Providers\n#\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n circuit_count = tables.Column(\n accessor=Accessor('count_circuits'),\n verbose_name='Circuits'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:provider_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count',\n 'comments', 'tags',\n )\n default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count')\n\n\n#\n# Provider networks\n#\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n provider = tables.Column(\n linkify=True\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:providernetwork_list'\n )\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags')\n default_columns = ('pk', 'name', 'provider', 'description')\n\n\n#\n# Circuit types\n#\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n tags = TagColumn(\n url_name='circuits:circuittype_list'\n )\n circuit_count = tables.Column(\n verbose_name='Circuits'\n )\n actions = ButtonsColumn(CircuitType)\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions')\n default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions')\n\n\n#\n# Circuits\n#\n\nclass CircuitTable(BaseTable):\n pk = ToggleColumn()\n cid = tables.Column(\n linkify=True,\n verbose_name='Circuit ID'\n )\n provider = tables.Column(\n linkify=True\n )\n status = ChoiceFieldColumn()\n tenant = TenantColumn()\n termination_a = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side A'\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date',\n 'commit_rate', 'description', 'comments', 'tags',\n )\n default_columns = (\n 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description',\n )\n", "path": "netbox/circuits/tables.py"}]}
1,867
272
gh_patches_debug_22839
rasdani/github-patches
git_diff
beetbox__beets-4086
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> unimported: Add an option to ignore some folders I use a hard drive as my Beets library 'folder'. Because of its size I also store some other non-imported music folders on that drive. I ran into the situation that running 'beets unimported' showed me all the files in those unimported folders. It's logical that the plugin scans those too but a more specific scan would be great. I could circumvent this by placing all Beets folders in another folder instead of the root of the drive but that would make for a deeper hierarchy which I wouldn't like. ### Proposed solution Add extra options for the command line `beets unimported /specific_folder` or in config.yaml ``` unimported: ignore_folders: folder-with-non-imported-files ``` </issue> <code> [start of beetsplug/unimported.py] 1 # This file is part of beets. 2 # Copyright 2019, Joris Jensen 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """ 16 List all files in the library folder which are not listed in the 17 beets library database, including art files 18 """ 19 20 import os 21 22 from beets import util 23 from beets.plugins import BeetsPlugin 24 from beets.ui import Subcommand, print_ 25 26 __author__ = 'https://github.com/MrNuggelz' 27 28 29 class Unimported(BeetsPlugin): 30 31 def __init__(self): 32 super().__init__() 33 self.config.add( 34 { 35 'ignore_extensions': [] 36 } 37 ) 38 39 def commands(self): 40 def print_unimported(lib, opts, args): 41 ignore_exts = [('.' + x).encode() for x 42 in self.config['ignore_extensions'].as_str_seq()] 43 in_folder = { 44 os.path.join(r, file) for r, d, f in os.walk(lib.directory) 45 for file in f if not any( 46 [file.endswith(extension) for extension in 47 ignore_exts])} 48 in_library = {x.path for x in lib.items()} 49 art_files = {x.artpath for x in lib.albums()} 50 for f in in_folder - in_library - art_files: 51 print_(util.displayable_path(f)) 52 53 unimported = Subcommand( 54 'unimported', 55 help='list all files in the library folder which are not listed' 56 ' in the beets library database') 57 unimported.func = print_unimported 58 return [unimported] 59 [end of beetsplug/unimported.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py --- a/beetsplug/unimported.py +++ b/beetsplug/unimported.py @@ -38,13 +38,23 @@ def commands(self): def print_unimported(lib, opts, args): - ignore_exts = [('.' + x).encode() for x - in self.config['ignore_extensions'].as_str_seq()] + ignore_exts = [ + ('.' + x).encode() + for x in self.config["ignore_extensions"].as_str_seq() + ] + ignore_dirs = [ + os.path.join(lib.directory, x.encode()) + for x in self.config["ignore_subdirectories"].as_str_seq() + ] in_folder = { - os.path.join(r, file) for r, d, f in os.walk(lib.directory) - for file in f if not any( - [file.endswith(extension) for extension in - ignore_exts])} + os.path.join(r, file) + for r, d, f in os.walk(lib.directory) + for file in f + if not any( + [file.endswith(ext) for ext in ignore_exts] + + [r in ignore_dirs] + ) + } in_library = {x.path for x in lib.items()} art_files = {x.artpath for x in lib.albums()} for f in in_folder - in_library - art_files:
{"golden_diff": "diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py\n--- a/beetsplug/unimported.py\n+++ b/beetsplug/unimported.py\n@@ -38,13 +38,23 @@\n \n def commands(self):\n def print_unimported(lib, opts, args):\n- ignore_exts = [('.' + x).encode() for x\n- in self.config['ignore_extensions'].as_str_seq()]\n+ ignore_exts = [\n+ ('.' + x).encode()\n+ for x in self.config[\"ignore_extensions\"].as_str_seq()\n+ ]\n+ ignore_dirs = [\n+ os.path.join(lib.directory, x.encode())\n+ for x in self.config[\"ignore_subdirectories\"].as_str_seq()\n+ ]\n in_folder = {\n- os.path.join(r, file) for r, d, f in os.walk(lib.directory)\n- for file in f if not any(\n- [file.endswith(extension) for extension in\n- ignore_exts])}\n+ os.path.join(r, file)\n+ for r, d, f in os.walk(lib.directory)\n+ for file in f\n+ if not any(\n+ [file.endswith(ext) for ext in ignore_exts]\n+ + [r in ignore_dirs]\n+ )\n+ }\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n", "issue": "unimported: Add an option to ignore some folders\nI use a hard drive as my Beets library 'folder'. \r\nBecause of its size I also store some other non-imported music folders on that drive.\r\nI ran into the situation that running 'beets unimported' showed me all the files in those unimported folders. \r\nIt's logical that the plugin scans those too but a more specific scan would be great.\r\nI could circumvent this by placing all Beets folders in another folder instead of the root of the drive but that would make for a deeper hierarchy which I wouldn't like.\r\n\r\n### Proposed solution\r\n\r\nAdd extra options for the command line\r\n`beets unimported /specific_folder`\r\nor in config.yaml\r\n```\r\nunimported:\r\n ignore_folders: folder-with-non-imported-files\r\n```\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2019, Joris Jensen\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"\nList all files in the library folder which are not listed in the\n beets library database, including art files\n\"\"\"\n\nimport os\n\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import Subcommand, print_\n\n__author__ = 'https://github.com/MrNuggelz'\n\n\nclass Unimported(BeetsPlugin):\n\n def __init__(self):\n super().__init__()\n self.config.add(\n {\n 'ignore_extensions': []\n }\n )\n\n def commands(self):\n def print_unimported(lib, opts, args):\n ignore_exts = [('.' + x).encode() for x\n in self.config['ignore_extensions'].as_str_seq()]\n in_folder = {\n os.path.join(r, file) for r, d, f in os.walk(lib.directory)\n for file in f if not any(\n [file.endswith(extension) for extension in\n ignore_exts])}\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n print_(util.displayable_path(f))\n\n unimported = Subcommand(\n 'unimported',\n help='list all files in the library folder which are not listed'\n ' in the beets library database')\n unimported.func = print_unimported\n return [unimported]\n", "path": "beetsplug/unimported.py"}]}
1,277
332
gh_patches_debug_2859
rasdani/github-patches
git_diff
spack__spack-26095
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CentOS 6 image doesn't build with clingo on Dockerhub ### Steps to reproduce Has to do with failure on centos:6 ``` Step 17/19 : RUN spack spec hdf5+mpi ---> Running in 8335d48ff53f ==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification. ==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification. ==> Warning: the original concretizer is currently being used. Upgrade to "clingo" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0 ==> Error: cannot bootstrap the "clingo" Python module from spec "clingo-bootstrap@spack+python %gcc target=x86_64" Input spec -------------------------------- hdf5+mpi Concretized -------------------------------- ==> Bootstrapping clingo from pre-built binaries The command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3 ``` --- So it bootstraps *during* concretization? </issue> <code> [start of lib/spack/spack/schema/container.py] 1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 """Schema for the 'container' subsection of Spack environments.""" 6 7 _stages_from_dockerhub = { 8 'type': 'object', 9 'additionalProperties': False, 10 'properties': { 11 'os': { 12 'type': 'string', 13 'enum': ['ubuntu:18.04', 14 'ubuntu:16.04', 15 'centos:7', 16 'centos:6'] 17 }, 18 'spack': { 19 'type': 'string', 20 }, 21 }, 22 'required': ['os', 'spack'] 23 } 24 25 _custom_stages = { 26 'type': 'object', 27 'additionalProperties': False, 28 'properties': { 29 'build': {'type': 'string'}, 30 'final': {'type': 'string'} 31 }, 32 'required': ['build', 'final'] 33 } 34 35 #: List of packages for the schema below 36 _list_of_packages = { 37 'type': 'array', 38 'items': { 39 'type': 'string' 40 } 41 } 42 43 #: Schema for the container attribute included in Spack environments 44 container_schema = { 45 'type': 'object', 46 'additionalProperties': False, 47 'properties': { 48 # The recipe formats that are currently supported by the command 49 'format': { 50 'type': 'string', 51 'enum': ['docker', 'singularity'] 52 }, 53 # Describes the base image to start from and the version 54 # of Spack to be used 55 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]}, 56 # Whether or not to strip installed binaries 57 'strip': { 58 'type': 'boolean', 59 'default': True 60 }, 61 # Additional system packages that are needed at runtime 62 'os_packages': { 63 'type': 'object', 64 'properties': { 65 'command': {'type': 'string', 'enum': ['apt', 'yum']}, 66 'update': {'type': 'boolean'}, 67 'build': _list_of_packages, 68 'final': _list_of_packages 69 }, 70 'additionalProperties': False 71 }, 72 # Add labels to the image 73 'labels': { 74 'type': 'object', 75 }, 76 # Add a custom extra section at the bottom of a stage 77 'extra_instructions': { 78 'type': 'object', 79 'additionalProperties': False, 80 'properties': { 81 'build': {'type': 'string'}, 82 'final': {'type': 'string'} 83 } 84 }, 85 # Reserved for properties that are specific to each format 86 'singularity': { 87 'type': 'object', 88 'additionalProperties': False, 89 'default': {}, 90 'properties': { 91 'runscript': {'type': 'string'}, 92 'startscript': {'type': 'string'}, 93 'test': {'type': 'string'}, 94 'help': {'type': 'string'} 95 } 96 }, 97 'docker': { 98 'type': 'object', 99 'additionalProperties': False, 100 'default': {}, 101 } 102 } 103 } 104 105 properties = {'container': container_schema} 106 [end of lib/spack/spack/schema/container.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py --- a/lib/spack/spack/schema/container.py +++ b/lib/spack/spack/schema/container.py @@ -12,8 +12,7 @@ 'type': 'string', 'enum': ['ubuntu:18.04', 'ubuntu:16.04', - 'centos:7', - 'centos:6'] + 'centos:7'] }, 'spack': { 'type': 'string',
{"golden_diff": "diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py\n--- a/lib/spack/spack/schema/container.py\n+++ b/lib/spack/spack/schema/container.py\n@@ -12,8 +12,7 @@\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n- 'centos:7',\n- 'centos:6']\n+ 'centos:7']\n },\n 'spack': {\n 'type': 'string',\n", "issue": "CentOS 6 image doesn't build with clingo on Dockerhub\n### Steps to reproduce\r\n\r\nHas to do with failure on centos:6\r\n\r\n```\r\nStep 17/19 : RUN spack spec hdf5+mpi\r\n ---> Running in 8335d48ff53f\r\n==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.\r\n==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.\r\n==> Warning: the original concretizer is currently being used.\r\n Upgrade to \"clingo\" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0\r\n==> Error: cannot bootstrap the \"clingo\" Python module from spec \"clingo-bootstrap@spack+python %gcc target=x86_64\"\r\nInput spec\r\n--------------------------------\r\nhdf5+mpi\r\n\r\nConcretized\r\n--------------------------------\r\n==> Bootstrapping clingo from pre-built binaries\r\nThe command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3\r\n```\r\n\r\n---\r\n\r\nSo it bootstraps *during* concretization?\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"Schema for the 'container' subsection of Spack environments.\"\"\"\n\n_stages_from_dockerhub = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'os': {\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n 'centos:7',\n 'centos:6']\n },\n 'spack': {\n 'type': 'string',\n },\n },\n 'required': ['os', 'spack']\n}\n\n_custom_stages = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n },\n 'required': ['build', 'final']\n}\n\n#: List of packages for the schema below\n_list_of_packages = {\n 'type': 'array',\n 'items': {\n 'type': 'string'\n }\n}\n\n#: Schema for the container attribute included in Spack environments\ncontainer_schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n # The recipe formats that are currently supported by the command\n 'format': {\n 'type': 'string',\n 'enum': ['docker', 'singularity']\n },\n # Describes the base image to start from and the version\n # of Spack to be used\n 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},\n # Whether or not to strip installed binaries\n 'strip': {\n 'type': 'boolean',\n 'default': True\n },\n # Additional system packages that are needed at runtime\n 'os_packages': {\n 'type': 'object',\n 'properties': {\n 'command': {'type': 'string', 'enum': ['apt', 'yum']},\n 'update': {'type': 'boolean'},\n 'build': _list_of_packages,\n 'final': _list_of_packages\n },\n 'additionalProperties': False\n },\n # Add labels to the image\n 'labels': {\n 'type': 'object',\n },\n # Add a custom extra section at the bottom of a stage\n 'extra_instructions': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n }\n },\n # Reserved for properties that are specific to each format\n 'singularity': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n 'properties': {\n 'runscript': {'type': 'string'},\n 'startscript': {'type': 'string'},\n 'test': {'type': 'string'},\n 'help': {'type': 'string'}\n }\n },\n 'docker': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n }\n }\n}\n\nproperties = {'container': container_schema}\n", "path": "lib/spack/spack/schema/container.py"}]}
1,724
126
gh_patches_debug_10429
rasdani/github-patches
git_diff
safe-global__safe-config-service-1107
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bad logo URL when creating a new Safe App **Describe the bug** When inserting a new Safe App, `None` is added to the logo image URL instead of the `app_id`. Re-uploading the image for the Safe App solves the problem. **To Reproduce** Steps to reproduce the behavior: - Create a new Safe App. - Check the path for the logo image is not correct (it includes `None` as ID). **Expected behavior** A correct Safe App `app_id` is added to the logo path instead of `None`. **Environment (please complete the following information):** - Staging and production. </issue> <code> [start of src/safe_apps/models.py] 1 import os 2 from enum import Enum 3 from typing import IO, Union 4 5 from django.contrib.postgres.fields import ArrayField 6 from django.core.exceptions import ValidationError 7 from django.core.files.images import get_image_dimensions 8 from django.core.validators import RegexValidator 9 from django.db import models 10 11 _HOSTNAME_VALIDATOR = RegexValidator( 12 r"^(https?:\/\/)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\/?$", 13 message="Enter a valid hostname (Without a resource path)", 14 code="invalid_hostname", 15 ) 16 17 18 def safe_app_icon_path(instance: "SafeApp", filename: str) -> str: 19 _, file_extension = os.path.splitext(filename) 20 return f"safe_apps/{instance.app_id}/icon{file_extension}" 21 22 23 def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None: 24 width, height = get_image_dimensions(image) 25 if not width or not height: 26 raise ValidationError( 27 f"Could not get image dimensions. Width={width}, Height={height}" 28 ) 29 if width > 512 or height > 512: 30 raise ValidationError("Image width and height need to be at most 512 pixels") 31 32 33 class Provider(models.Model): 34 url = models.URLField(primary_key=True) 35 name = models.CharField(max_length=200) 36 37 def __str__(self) -> str: 38 return f"{self.name} | {self.url}" 39 40 41 class Client(models.Model): 42 url = models.CharField( 43 unique=True, 44 help_text="The domain URL client is hosted at", 45 # The maximum length of a full host name is 253 characters per RFC 1034 46 max_length=255, 47 validators=[_HOSTNAME_VALIDATOR], 48 ) 49 50 def __str__(self) -> str: 51 return f"Client: {self.url}" 52 53 54 class SafeApp(models.Model): 55 class AccessControlPolicy(str, Enum): 56 NO_RESTRICTIONS = "NO_RESTRICTIONS" 57 DOMAIN_ALLOWLIST = "DOMAIN_ALLOWLIST" 58 59 app_id = models.BigAutoField(primary_key=True) 60 visible = models.BooleanField( 61 default=True 62 ) # True if this safe-app should be visible from the view. False otherwise 63 url = models.URLField() 64 name = models.CharField(max_length=200) 65 icon_url = models.ImageField( 66 validators=[validate_safe_app_icon_size], 67 upload_to=safe_app_icon_path, 68 max_length=255, 69 null=True, 70 blank=True, 71 ) 72 description = models.CharField(max_length=200) 73 chain_ids = ArrayField(models.PositiveBigIntegerField()) 74 provider = models.ForeignKey( 75 Provider, null=True, blank=True, on_delete=models.SET_NULL 76 ) 77 exclusive_clients = models.ManyToManyField( 78 Client, 79 blank=True, 80 help_text="Clients that are only allowed to use this SafeApp", 81 ) 82 developer_website = models.URLField(null=True, blank=True) 83 84 def get_access_control_type(self) -> AccessControlPolicy: 85 if self.exclusive_clients.exists(): 86 return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST 87 return SafeApp.AccessControlPolicy.NO_RESTRICTIONS 88 89 def __str__(self) -> str: 90 return f"{self.name} | {self.url} | chain_ids={self.chain_ids}" 91 92 93 class Tag(models.Model): 94 name = models.CharField(max_length=255) 95 safe_apps = models.ManyToManyField(SafeApp, blank=True) 96 97 def __str__(self) -> str: 98 return f"Tag: {self.name}" 99 100 101 class Feature(models.Model): 102 # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled 103 safe_apps = models.ManyToManyField( 104 SafeApp, blank=True, help_text="Safe Apps where this feature is enabled." 105 ) 106 key = models.CharField( 107 unique=True, 108 max_length=255, 109 help_text="The unique name/key that identifies this feature", 110 ) 111 112 def __str__(self) -> str: 113 return f"Safe App Feature: {self.key}" 114 115 116 class SocialProfile(models.Model): 117 class Platform(models.TextChoices): 118 DISCORD = "DISCORD" 119 GITHUB = "GITHUB" 120 TWITTER = "TWITTER" 121 122 safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE) 123 platform = models.CharField(choices=Platform.choices, max_length=255) 124 url = models.URLField() 125 126 def __str__(self) -> str: 127 return f"Social Profile: {self.platform} | {self.url}" 128 [end of src/safe_apps/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py --- a/src/safe_apps/models.py +++ b/src/safe_apps/models.py @@ -1,4 +1,5 @@ import os +import uuid from enum import Enum from typing import IO, Union @@ -17,7 +18,7 @@ def safe_app_icon_path(instance: "SafeApp", filename: str) -> str: _, file_extension = os.path.splitext(filename) - return f"safe_apps/{instance.app_id}/icon{file_extension}" + return f"safe_apps/{uuid.uuid4()}/icon{file_extension}" def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:
{"golden_diff": "diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -1,4 +1,5 @@\n import os\n+import uuid\n from enum import Enum\n from typing import IO, Union\n \n@@ -17,7 +18,7 @@\n \n def safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n- return f\"safe_apps/{instance.app_id}/icon{file_extension}\"\n+ return f\"safe_apps/{uuid.uuid4()}/icon{file_extension}\"\n \n \n def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n", "issue": "Bad logo URL when creating a new Safe App\n**Describe the bug**\r\nWhen inserting a new Safe App, `None` is added to the logo image URL instead of the `app_id`. \r\n\r\nRe-uploading the image for the Safe App solves the problem.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Create a new Safe App.\r\n- Check the path for the logo image is not correct (it includes `None` as ID).\r\n\r\n**Expected behavior**\r\nA correct Safe App `app_id` is added to the logo path instead of `None`.\r\n\r\n**Environment (please complete the following information):**\r\n - Staging and production.\r\n\n", "before_files": [{"content": "import os\nfrom enum import Enum\nfrom typing import IO, Union\n\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.images import get_image_dimensions\nfrom django.core.validators import RegexValidator\nfrom django.db import models\n\n_HOSTNAME_VALIDATOR = RegexValidator(\n r\"^(https?:\\/\\/)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\/?$\",\n message=\"Enter a valid hostname (Without a resource path)\",\n code=\"invalid_hostname\",\n)\n\n\ndef safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n return f\"safe_apps/{instance.app_id}/icon{file_extension}\"\n\n\ndef validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n width, height = get_image_dimensions(image)\n if not width or not height:\n raise ValidationError(\n f\"Could not get image dimensions. Width={width}, Height={height}\"\n )\n if width > 512 or height > 512:\n raise ValidationError(\"Image width and height need to be at most 512 pixels\")\n\n\nclass Provider(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url}\"\n\n\nclass Client(models.Model):\n url = models.CharField(\n unique=True,\n help_text=\"The domain URL client is hosted at\",\n # The maximum length of a full host name is 253 characters per RFC 1034\n max_length=255,\n validators=[_HOSTNAME_VALIDATOR],\n )\n\n def __str__(self) -> str:\n return f\"Client: {self.url}\"\n\n\nclass SafeApp(models.Model):\n class AccessControlPolicy(str, Enum):\n NO_RESTRICTIONS = \"NO_RESTRICTIONS\"\n DOMAIN_ALLOWLIST = \"DOMAIN_ALLOWLIST\"\n\n app_id = models.BigAutoField(primary_key=True)\n visible = models.BooleanField(\n default=True\n ) # True if this safe-app should be visible from the view. False otherwise\n url = models.URLField()\n name = models.CharField(max_length=200)\n icon_url = models.ImageField(\n validators=[validate_safe_app_icon_size],\n upload_to=safe_app_icon_path,\n max_length=255,\n null=True,\n blank=True,\n )\n description = models.CharField(max_length=200)\n chain_ids = ArrayField(models.PositiveBigIntegerField())\n provider = models.ForeignKey(\n Provider, null=True, blank=True, on_delete=models.SET_NULL\n )\n exclusive_clients = models.ManyToManyField(\n Client,\n blank=True,\n help_text=\"Clients that are only allowed to use this SafeApp\",\n )\n developer_website = models.URLField(null=True, blank=True)\n\n def get_access_control_type(self) -> AccessControlPolicy:\n if self.exclusive_clients.exists():\n return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST\n return SafeApp.AccessControlPolicy.NO_RESTRICTIONS\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url} | chain_ids={self.chain_ids}\"\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=255)\n safe_apps = models.ManyToManyField(SafeApp, blank=True)\n\n def __str__(self) -> str:\n return f\"Tag: {self.name}\"\n\n\nclass Feature(models.Model):\n # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled\n safe_apps = models.ManyToManyField(\n SafeApp, blank=True, help_text=\"Safe Apps where this feature is enabled.\"\n )\n key = models.CharField(\n unique=True,\n max_length=255,\n help_text=\"The unique name/key that identifies this feature\",\n )\n\n def __str__(self) -> str:\n return f\"Safe App Feature: {self.key}\"\n\n\nclass SocialProfile(models.Model):\n class Platform(models.TextChoices):\n DISCORD = \"DISCORD\"\n GITHUB = \"GITHUB\"\n TWITTER = \"TWITTER\"\n\n safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE)\n platform = models.CharField(choices=Platform.choices, max_length=255)\n url = models.URLField()\n\n def __str__(self) -> str:\n return f\"Social Profile: {self.platform} | {self.url}\"\n", "path": "src/safe_apps/models.py"}]}
1,946
161
gh_patches_debug_12476
rasdani/github-patches
git_diff
bokeh__bokeh-9068
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Development guide missing `test` argument for conda install and pytest install failure on windows ### Missing `test` argument The current [developement guide](https://bokeh.pydata.org/en/latest/docs/dev_guide/setup.html#conda-packages) is missing the `test` argument for windows setups. As for OSX / Linux (bash / sh), it is: - ```conda install `python scripts/deps.py build run test```. As for windows, the `test` argument is missing for the `deps.py`: - ```conda install $(python scripts/deps.py build run).split() | where {$_}``` - ```for /F "delims=" %i in ('python scripts\deps.py build run') do (conda install %i)``` Instead, it should be: - ```conda install $(python scripts/deps.py build run test).split() | where {$_}``` - ```for /F "delims=" %i in ('python scripts\deps.py build run test') do (conda install %i)``` ### `pytest<5.0.0` fails In addition, running ```for /F "delims=" %i in ('python scripts\deps.py build run test') do (conda install %i)``` fails with error `System can't find given file.` which is due to `pytest<5.0.0`. Providing double quotes actually fixes the issue -> `conda install "pytest<5.0.0"`. </issue> <code> [start of scripts/deps.py] 1 import sys 2 import jinja2 3 import yaml 4 5 6 def load_setup_py_data(): 7 import os 8 import setuptools 9 os.environ['CONDA_BUILD_STATE'] = 'RENDER' 10 data = {} 11 12 def _setup(**kw): data.update(kw) 13 setuptools.setup = _setup 14 return data 15 16 meta_src = jinja2.Template(open("conda.recipe/meta.yaml").read()) 17 meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data), 18 Loader=yaml.FullLoader) 19 20 section = { 21 "build" : meta_src["requirements"]["build"], 22 "deploy" : meta_src["extra"]["deploy"], 23 "run" : meta_src["requirements"]["run"], 24 "test" : meta_src["test"]["requires"], 25 } 26 27 spec = [] 28 for name in sys.argv[1:]: 29 spec += section[name] 30 31 # bare python unpins python version causing upgrade to latest 32 if 'python' in spec: spec.remove('python') 33 34 deps = "" 35 deps += " ".join(s for s in spec) 36 deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec 37 deps = deps.replace(' <', '<') 38 deps = deps.replace(' [unix]', ' ') 39 40 print(deps) 41 [end of scripts/deps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/deps.py b/scripts/deps.py --- a/scripts/deps.py +++ b/scripts/deps.py @@ -1,4 +1,5 @@ import sys +import platform import jinja2 import yaml @@ -31,6 +32,10 @@ # bare python unpins python version causing upgrade to latest if 'python' in spec: spec.remove('python') +# add double quotes to specs for windows, fixes #9065 +if "windows" in platform.platform().lower(): + spec = ['"{}"'.format(s) for s in spec] + deps = "" deps += " ".join(s for s in spec) deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec
{"golden_diff": "diff --git a/scripts/deps.py b/scripts/deps.py\n--- a/scripts/deps.py\n+++ b/scripts/deps.py\n@@ -1,4 +1,5 @@\n import sys\n+import platform\n import jinja2\n import yaml\n \n@@ -31,6 +32,10 @@\n # bare python unpins python version causing upgrade to latest\n if 'python' in spec: spec.remove('python')\n \n+# add double quotes to specs for windows, fixes #9065\n+if \"windows\" in platform.platform().lower():\n+ spec = ['\"{}\"'.format(s) for s in spec]\n+\n deps = \"\"\n deps += \" \".join(s for s in spec)\n deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\n", "issue": "[BUG] Development guide missing `test` argument for conda install and pytest install failure on windows\n### Missing `test` argument\r\n\r\nThe current [developement guide](https://bokeh.pydata.org/en/latest/docs/dev_guide/setup.html#conda-packages) is missing the `test` argument for windows setups. \r\n\r\nAs for OSX / Linux (bash / sh), it is: \r\n- ```conda install `python scripts/deps.py build run test```.\r\n\r\nAs for windows, the `test` argument is missing for the `deps.py`:\r\n- ```conda install $(python scripts/deps.py build run).split() | where {$_}```\r\n- ```for /F \"delims=\" %i in ('python scripts\\deps.py build run') do (conda install %i)```\r\n\r\nInstead, it should be:\r\n- ```conda install $(python scripts/deps.py build run test).split() | where {$_}```\r\n- ```for /F \"delims=\" %i in ('python scripts\\deps.py build run test') do (conda install %i)```\r\n\r\n### `pytest<5.0.0` fails\r\nIn addition, running ```for /F \"delims=\" %i in ('python scripts\\deps.py build run test') do (conda install %i)``` fails with error `System can't find given file.` which is due to `pytest<5.0.0`. Providing double quotes actually fixes the issue -> `conda install \"pytest<5.0.0\"`.\n", "before_files": [{"content": "import sys\nimport jinja2\nimport yaml\n\n\ndef load_setup_py_data():\n import os\n import setuptools\n os.environ['CONDA_BUILD_STATE'] = 'RENDER'\n data = {}\n\n def _setup(**kw): data.update(kw)\n setuptools.setup = _setup\n return data\n\nmeta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\nmeta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n Loader=yaml.FullLoader)\n\nsection = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\n \"deploy\" : meta_src[\"extra\"][\"deploy\"],\n \"run\" : meta_src[\"requirements\"][\"run\"],\n \"test\" : meta_src[\"test\"][\"requires\"],\n}\n\nspec = []\nfor name in sys.argv[1:]:\n spec += section[name]\n\n# bare python unpins python version causing upgrade to latest\nif 'python' in spec: spec.remove('python')\n\ndeps = \"\"\ndeps += \" \".join(s for s in spec)\ndeps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\ndeps = deps.replace(' <', '<')\ndeps = deps.replace(' [unix]', ' ')\n\nprint(deps)\n", "path": "scripts/deps.py"}]}
1,197
178
gh_patches_debug_12473
rasdani/github-patches
git_diff
urllib3__urllib3-2216
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Is HTTPHeaderDict a public API to make requests? `HTTPHeaderDict` was initially designed to handle response headers, and is not documented for request headers. * Should it be documented? * How should it be imported? The current options are `from urllib3.response import HTTPHeaderDict` and `from urllib3._collections import HTTPHeaderDict`, and they don't feel right. </issue> <code> [start of src/urllib3/__init__.py] 1 """ 2 Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more 3 """ 4 5 # Set default logging handler to avoid "No handler found" warnings. 6 import logging 7 import warnings 8 from logging import NullHandler 9 10 from . import exceptions 11 from ._version import __version__ 12 from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url 13 from .filepost import encode_multipart_formdata 14 from .poolmanager import PoolManager, ProxyManager, proxy_from_url 15 from .response import HTTPResponse 16 from .util.request import make_headers 17 from .util.retry import Retry 18 from .util.timeout import Timeout 19 20 __author__ = "Andrey Petrov ([email protected])" 21 __license__ = "MIT" 22 __version__ = __version__ 23 24 __all__ = ( 25 "HTTPConnectionPool", 26 "HTTPSConnectionPool", 27 "PoolManager", 28 "ProxyManager", 29 "HTTPResponse", 30 "Retry", 31 "Timeout", 32 "add_stderr_logger", 33 "connection_from_url", 34 "disable_warnings", 35 "encode_multipart_formdata", 36 "make_headers", 37 "proxy_from_url", 38 "request", 39 ) 40 41 logging.getLogger(__name__).addHandler(NullHandler()) 42 43 44 def add_stderr_logger(level=logging.DEBUG): 45 """ 46 Helper for quickly adding a StreamHandler to the logger. Useful for 47 debugging. 48 49 Returns the handler after adding it. 50 """ 51 # This method needs to be in this __init__.py to get the __name__ correct 52 # even if urllib3 is vendored within another package. 53 logger = logging.getLogger(__name__) 54 handler = logging.StreamHandler() 55 handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s")) 56 logger.addHandler(handler) 57 logger.setLevel(level) 58 logger.debug("Added a stderr logging handler to logger: %s", __name__) 59 return handler 60 61 62 # ... Clean up. 63 del NullHandler 64 65 66 # All warning filters *must* be appended unless you're really certain that they 67 # shouldn't be: otherwise, it's very hard for users to use most Python 68 # mechanisms to silence them. 69 # SecurityWarning's always go off by default. 70 warnings.simplefilter("always", exceptions.SecurityWarning, append=True) 71 # InsecurePlatformWarning's don't vary between requests, so we keep it default. 72 warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True) 73 # SNIMissingWarnings should go off only once. 74 warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True) 75 76 77 def disable_warnings(category=exceptions.HTTPWarning): 78 """ 79 Helper for quickly disabling all urllib3 warnings. 80 """ 81 warnings.simplefilter("ignore", category) 82 83 84 _DEFAULT_POOL = PoolManager() 85 86 87 def request(method, url, fields=None, headers=None): 88 """ 89 A convenience, top-level request method. It uses a module-global ``PoolManager`` instance. 90 Therefore, its side effects could be shared across dependencies relying on it. 91 To avoid side effects create a new ``PoolManager`` instance and use it instead. 92 The method does not accept low-level ``**urlopen_kw`` keyword arguments. 93 """ 94 95 return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers) 96 [end of src/urllib3/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py --- a/src/urllib3/__init__.py +++ b/src/urllib3/__init__.py @@ -8,6 +8,7 @@ from logging import NullHandler from . import exceptions +from ._collections import HTTPHeaderDict from ._version import __version__ from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url from .filepost import encode_multipart_formdata @@ -23,6 +24,7 @@ __all__ = ( "HTTPConnectionPool", + "HTTPHeaderDict", "HTTPSConnectionPool", "PoolManager", "ProxyManager",
{"golden_diff": "diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py\n--- a/src/urllib3/__init__.py\n+++ b/src/urllib3/__init__.py\n@@ -8,6 +8,7 @@\n from logging import NullHandler\n \n from . import exceptions\n+from ._collections import HTTPHeaderDict\n from ._version import __version__\n from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\n from .filepost import encode_multipart_formdata\n@@ -23,6 +24,7 @@\n \n __all__ = (\n \"HTTPConnectionPool\",\n+ \"HTTPHeaderDict\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n", "issue": "Is HTTPHeaderDict a public API to make requests?\n`HTTPHeaderDict` was initially designed to handle response headers, and is not documented for request headers.\r\n\r\n * Should it be documented?\r\n * How should it be imported? The current options are `from urllib3.response import HTTPHeaderDict` and `from urllib3._collections import HTTPHeaderDict`, and they don't feel right.\n", "before_files": [{"content": "\"\"\"\nPython HTTP library with thread-safe connection pooling, file post support, user friendly, and more\n\"\"\"\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nimport warnings\nfrom logging import NullHandler\n\nfrom . import exceptions\nfrom ._version import __version__\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\n\n__author__ = \"Andrey Petrov ([email protected])\"\n__license__ = \"MIT\"\n__version__ = __version__\n\n__all__ = (\n \"HTTPConnectionPool\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n \"HTTPResponse\",\n \"Retry\",\n \"Timeout\",\n \"add_stderr_logger\",\n \"connection_from_url\",\n \"disable_warnings\",\n \"encode_multipart_formdata\",\n \"make_headers\",\n \"proxy_from_url\",\n \"request\",\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug(\"Added a stderr logging handler to logger: %s\", __name__)\n return handler\n\n\n# ... Clean up.\ndel NullHandler\n\n\n# All warning filters *must* be appended unless you're really certain that they\n# shouldn't be: otherwise, it's very hard for users to use most Python\n# mechanisms to silence them.\n# SecurityWarning's always go off by default.\nwarnings.simplefilter(\"always\", exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter(\"default\", exceptions.InsecurePlatformWarning, append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter(\"default\", exceptions.SNIMissingWarning, append=True)\n\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter(\"ignore\", category)\n\n\n_DEFAULT_POOL = PoolManager()\n\n\ndef request(method, url, fields=None, headers=None):\n \"\"\"\n A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.\n Therefore, its side effects could be shared across dependencies relying on it.\n To avoid side effects create a new ``PoolManager`` instance and use it instead.\n The method does not accept low-level ``**urlopen_kw`` keyword arguments.\n \"\"\"\n\n return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers)\n", "path": "src/urllib3/__init__.py"}]}
1,492
160
gh_patches_debug_3103
rasdani/github-patches
git_diff
conan-io__conan-center-index-1534
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [conan.io/center] parallel-hashmap/1.31 merged but not found in conan center Even though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search` </issue> <code> [start of recipes/parallel-hashmap/all/conanfile.py] 1 import os 2 3 from conans import ConanFile, tools 4 5 class ParallelHashmapConan(ConanFile): 6 name = "parallel-hashmap" 7 description = "A family of header-only, very fast and memory-friendly hashmap and btree containers." 8 license = "Apache-2.0" 9 topics = ("conan", "parallel-hashmap", "parallel", "hashmap", "btree") 10 homepage = "https://github.com/greg7mdp/parallel-hashmap" 11 url = "https://github.com/conan-io/conan-center-index" 12 no_copy_source = True 13 14 @property 15 def _source_subfolder(self): 16 return "source_subfolder" 17 18 def source(self): 19 tools.get(**self.conan_data["sources"][self.version]) 20 os.rename(self.name + "-" + self.version, self._source_subfolder) 21 22 def package(self): 23 self.copy("LICENSE", dst="licenses", src=self._source_subfolder) 24 self.copy("*.h", 25 dst=os.path.join("include", "parallel_hashmap"), 26 src=os.path.join(self._source_subfolder, "parallel_hashmap")) 27 self.copy("phmap.natvis", dst="res", src=self._source_subfolder) 28 29 def package_id(self): 30 self.info.header_only() 31 [end of recipes/parallel-hashmap/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py --- a/recipes/parallel-hashmap/all/conanfile.py +++ b/recipes/parallel-hashmap/all/conanfile.py @@ -1,7 +1,7 @@ import os - from conans import ConanFile, tools + class ParallelHashmapConan(ConanFile): name = "parallel-hashmap" description = "A family of header-only, very fast and memory-friendly hashmap and btree containers."
{"golden_diff": "diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py\n--- a/recipes/parallel-hashmap/all/conanfile.py\n+++ b/recipes/parallel-hashmap/all/conanfile.py\n@@ -1,7 +1,7 @@\n import os\n-\n from conans import ConanFile, tools\n \n+\n class ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n", "issue": "[conan.io/center] parallel-hashmap/1.31 merged but not found in conan center\nEven though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search`\r\n\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\n\nclass ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"parallel-hashmap\", \"parallel\", \"hashmap\", \"btree\")\n homepage = \"https://github.com/greg7mdp/parallel-hashmap\"\n url = \"https://github.com/conan-io/conan-center-index\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\",\n dst=os.path.join(\"include\", \"parallel_hashmap\"),\n src=os.path.join(self._source_subfolder, \"parallel_hashmap\"))\n self.copy(\"phmap.natvis\", dst=\"res\", src=self._source_subfolder)\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/parallel-hashmap/all/conanfile.py"}]}
950
121
gh_patches_debug_25403
rasdani/github-patches
git_diff
encode__uvicorn-701
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> UnicodeDecodeError when decoding bad headers Someone (or some bot) was spamming my sever with requests to potential vulnerabilities. One of the attacks is for a potential vulnerability in php, which sets the the `x-forwarded-for` header to the following value: ``` }__test|O:21:"JDatabaseDriverMysqli":3:{s:2:"fc";O:17:"JSimplepieFactory":0:{}s:21:"\\0\\0\\0disconnectHandlers";a:1:{i:0;a:2:{i:0;O:9:"SimplePie":5:{s:8:"sanitize";O:20:"JDatabaseDriverMysql":0:{}s:8:"feed_url";s:56:"die(md5(DIRECTORY_SEPARATOR));JFactory::getConfig();exit";s:19:"cache_name_function";s:6:"assert";s:5:"cache";b:1;s:11:"cache_class";O:20:"JDatabaseDriverMysql":0:{}}i:1;s:4:"init";}}s:13:"\\0\\0\\0connection";b:1;}\xf0\xfd\xfd\xfd, ... ``` This leads to this exception: ``` Exception in ASGI application Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi result = await app(self.scope, self.receive, self.send) File "/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 40, in __call__ x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii") UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 427: ordinal not in range(128) ``` As it's due to malformed header from the client, I would expect this should be a 400 error instead? </issue> <code> [start of uvicorn/middleware/proxy_headers.py] 1 """ 2 This middleware can be used when a known proxy is fronting the application, 3 and is trusted to be properly setting the `X-Forwarded-Proto` and 4 `X-Forwarded-For` headers with the connecting client information. 5 6 Modifies the `client` and `scheme` information so that they reference 7 the connecting client, rather that the connecting proxy. 8 9 https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies 10 """ 11 12 13 class ProxyHeadersMiddleware: 14 def __init__(self, app, trusted_hosts="127.0.0.1"): 15 self.app = app 16 if isinstance(trusted_hosts, str): 17 self.trusted_hosts = [item.strip() for item in trusted_hosts.split(",")] 18 else: 19 self.trusted_hosts = trusted_hosts 20 self.always_trust = "*" in self.trusted_hosts 21 22 async def __call__(self, scope, receive, send): 23 if scope["type"] in ("http", "websocket"): 24 client_addr = scope.get("client") 25 client_host = client_addr[0] if client_addr else None 26 27 if self.always_trust or client_host in self.trusted_hosts: 28 headers = dict(scope["headers"]) 29 30 if b"x-forwarded-proto" in headers: 31 # Determine if the incoming request was http or https based on 32 # the X-Forwarded-Proto header. 33 x_forwarded_proto = headers[b"x-forwarded-proto"].decode("ascii") 34 scope["scheme"] = x_forwarded_proto.strip() 35 36 if b"x-forwarded-for" in headers: 37 # Determine the client address from the last trusted IP in the 38 # X-Forwarded-For header. We've lost the connecting client's port 39 # information by now, so only include the host. 40 x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii") 41 host = x_forwarded_for.split(",")[-1].strip() 42 port = 0 43 scope["client"] = (host, port) 44 45 return await self.app(scope, receive, send) 46 [end of uvicorn/middleware/proxy_headers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py --- a/uvicorn/middleware/proxy_headers.py +++ b/uvicorn/middleware/proxy_headers.py @@ -30,14 +30,14 @@ if b"x-forwarded-proto" in headers: # Determine if the incoming request was http or https based on # the X-Forwarded-Proto header. - x_forwarded_proto = headers[b"x-forwarded-proto"].decode("ascii") + x_forwarded_proto = headers[b"x-forwarded-proto"].decode("latin1") scope["scheme"] = x_forwarded_proto.strip() if b"x-forwarded-for" in headers: # Determine the client address from the last trusted IP in the # X-Forwarded-For header. We've lost the connecting client's port # information by now, so only include the host. - x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii") + x_forwarded_for = headers[b"x-forwarded-for"].decode("latin1") host = x_forwarded_for.split(",")[-1].strip() port = 0 scope["client"] = (host, port)
{"golden_diff": "diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py\n--- a/uvicorn/middleware/proxy_headers.py\n+++ b/uvicorn/middleware/proxy_headers.py\n@@ -30,14 +30,14 @@\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n- x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"ascii\")\n+ x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"latin1\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n \n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n- x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\n+ x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"latin1\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n", "issue": "UnicodeDecodeError when decoding bad headers\nSomeone (or some bot) was spamming my sever with requests to potential vulnerabilities.\r\n\r\nOne of the attacks is for a potential vulnerability in php, which sets the the `x-forwarded-for` header to the following value:\r\n```\r\n}__test|O:21:\"JDatabaseDriverMysqli\":3:{s:2:\"fc\";O:17:\"JSimplepieFactory\":0:{}s:21:\"\\\\0\\\\0\\\\0disconnectHandlers\";a:1:{i:0;a:2:{i:0;O:9:\"SimplePie\":5:{s:8:\"sanitize\";O:20:\"JDatabaseDriverMysql\":0:{}s:8:\"feed_url\";s:56:\"die(md5(DIRECTORY_SEPARATOR));JFactory::getConfig();exit\";s:19:\"cache_name_function\";s:6:\"assert\";s:5:\"cache\";b:1;s:11:\"cache_class\";O:20:\"JDatabaseDriverMysql\":0:{}}i:1;s:4:\"init\";}}s:13:\"\\\\0\\\\0\\\\0connection\";b:1;}\\xf0\\xfd\\xfd\\xfd, ...\r\n```\r\n\r\nThis leads to this exception:\r\n\r\n```\r\nException in ASGI application\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 385, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py\", line 40, in __call__\r\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 427: ordinal not in range(128)\r\n```\r\n\r\nAs it's due to malformed header from the client, I would expect this should be a 400 error instead?\n", "before_files": [{"content": "\"\"\"\nThis middleware can be used when a known proxy is fronting the application,\nand is trusted to be properly setting the `X-Forwarded-Proto` and\n`X-Forwarded-For` headers with the connecting client information.\n\nModifies the `client` and `scheme` information so that they reference\nthe connecting client, rather that the connecting proxy.\n\nhttps://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies\n\"\"\"\n\n\nclass ProxyHeadersMiddleware:\n def __init__(self, app, trusted_hosts=\"127.0.0.1\"):\n self.app = app\n if isinstance(trusted_hosts, str):\n self.trusted_hosts = [item.strip() for item in trusted_hosts.split(\",\")]\n else:\n self.trusted_hosts = trusted_hosts\n self.always_trust = \"*\" in self.trusted_hosts\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] in (\"http\", \"websocket\"):\n client_addr = scope.get(\"client\")\n client_host = client_addr[0] if client_addr else None\n\n if self.always_trust or client_host in self.trusted_hosts:\n headers = dict(scope[\"headers\"])\n\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"ascii\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n\n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n\n return await self.app(scope, receive, send)\n", "path": "uvicorn/middleware/proxy_headers.py"}]}
1,510
277
gh_patches_debug_3991
rasdani/github-patches
git_diff
sublimelsp__LSP-450
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Don't move cursor at the end when populating the diagnostics panel When the diagnostics are populated the cursor in the panel is moved to the end. See [here](https://github.com/tomv564/LSP/blob/2869978d8b46d717da27eb0ac7a7e234840b218d/plugin/core/panels.py#L53-L56). Is there a reason for this? Because I can't use the `f4` keybinding to goto the next result when it is opened. Instead I need first to press the `shift + f4`, which is the backward direction. Here is a simple solution. ```diff def run(self, edit, characters): self.view.replace(edit, sublime.Region(0, self.view.size()), characters) - # Move cursor to the end + # Clear the selection selection = self.view.sel() selection.clear() - selection.add(sublime.Region(self.view.size(), self.view.size())) ``` </issue> <code> [start of plugin/core/panels.py] 1 import sublime 2 import sublime_plugin 3 4 5 OUTPUT_PANEL_SETTINGS = { 6 "auto_indent": False, 7 "draw_indent_guides": False, 8 "draw_white_space": "None", 9 "gutter": False, 10 'is_widget': True, 11 "line_numbers": False, 12 "margin": 3, 13 "match_brackets": False, 14 "scroll_past_end": False, 15 "tab_size": 4, 16 "translate_tabs_to_spaces": False, 17 "word_wrap": False 18 } 19 20 21 def create_output_panel(window: sublime.Window, name: str) -> sublime.View: 22 panel = window.create_output_panel(name) 23 settings = panel.settings() 24 for key, value in OUTPUT_PANEL_SETTINGS.items(): 25 settings.set(key, value) 26 return panel 27 28 29 def destroy_output_panels(window: sublime.Window): 30 for panel_name in ["references", "diagnostics"]: 31 window.destroy_output_panel(panel_name) 32 33 34 class LspClearPanelCommand(sublime_plugin.TextCommand): 35 """ 36 A clear_panel command to clear the error panel. 37 """ 38 39 def run(self, edit): 40 self.view.set_read_only(False) 41 self.view.erase(edit, sublime.Region(0, self.view.size())) 42 self.view.set_read_only(True) 43 44 45 class LspUpdatePanelCommand(sublime_plugin.TextCommand): 46 """ 47 A update_panel command to update the error panel with new text. 48 """ 49 50 def run(self, edit, characters): 51 self.view.replace(edit, sublime.Region(0, self.view.size()), characters) 52 53 # Move cursor to the end 54 selection = self.view.sel() 55 selection.clear() 56 selection.add(sublime.Region(self.view.size(), self.view.size())) 57 [end of plugin/core/panels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugin/core/panels.py b/plugin/core/panels.py --- a/plugin/core/panels.py +++ b/plugin/core/panels.py @@ -50,7 +50,6 @@ def run(self, edit, characters): self.view.replace(edit, sublime.Region(0, self.view.size()), characters) - # Move cursor to the end + # Clear the selection selection = self.view.sel() selection.clear() - selection.add(sublime.Region(self.view.size(), self.view.size()))
{"golden_diff": "diff --git a/plugin/core/panels.py b/plugin/core/panels.py\n--- a/plugin/core/panels.py\n+++ b/plugin/core/panels.py\n@@ -50,7 +50,6 @@\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n \n- # Move cursor to the end\n+ # Clear the selection\n selection = self.view.sel()\n selection.clear()\n- selection.add(sublime.Region(self.view.size(), self.view.size()))\n", "issue": "Don't move cursor at the end when populating the diagnostics panel\nWhen the diagnostics are populated the cursor in the panel is moved to the end. See [here](https://github.com/tomv564/LSP/blob/2869978d8b46d717da27eb0ac7a7e234840b218d/plugin/core/panels.py#L53-L56).\r\n\r\nIs there a reason for this? \r\n\r\nBecause I can't use the `f4` keybinding to goto the next result when it is opened.\r\nInstead I need first to press the `shift + f4`, which is the backward direction.\r\n\r\n\r\nHere is a simple solution.\r\n```diff\r\n def run(self, edit, characters):\r\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\r\n \r\n- # Move cursor to the end\r\n+ # Clear the selection\r\n selection = self.view.sel()\r\n selection.clear()\r\n- selection.add(sublime.Region(self.view.size(), self.view.size()))\r\n```\r\n\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\n\nOUTPUT_PANEL_SETTINGS = {\n \"auto_indent\": False,\n \"draw_indent_guides\": False,\n \"draw_white_space\": \"None\",\n \"gutter\": False,\n 'is_widget': True,\n \"line_numbers\": False,\n \"margin\": 3,\n \"match_brackets\": False,\n \"scroll_past_end\": False,\n \"tab_size\": 4,\n \"translate_tabs_to_spaces\": False,\n \"word_wrap\": False\n}\n\n\ndef create_output_panel(window: sublime.Window, name: str) -> sublime.View:\n panel = window.create_output_panel(name)\n settings = panel.settings()\n for key, value in OUTPUT_PANEL_SETTINGS.items():\n settings.set(key, value)\n return panel\n\n\ndef destroy_output_panels(window: sublime.Window):\n for panel_name in [\"references\", \"diagnostics\"]:\n window.destroy_output_panel(panel_name)\n\n\nclass LspClearPanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A clear_panel command to clear the error panel.\n \"\"\"\n\n def run(self, edit):\n self.view.set_read_only(False)\n self.view.erase(edit, sublime.Region(0, self.view.size()))\n self.view.set_read_only(True)\n\n\nclass LspUpdatePanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A update_panel command to update the error panel with new text.\n \"\"\"\n\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n\n # Move cursor to the end\n selection = self.view.sel()\n selection.clear()\n selection.add(sublime.Region(self.view.size(), self.view.size()))\n", "path": "plugin/core/panels.py"}]}
1,228
114
gh_patches_debug_55627
rasdani/github-patches
git_diff
xonsh__xonsh-3527
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Weird completion issue <!--- Provide a general summary of the issue in the Title above --> <!--- If you have a question along the lines of "How do I do this Bash command in xonsh" please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html If you don't find an answer there, please do open an issue! --> ## xonfig <details> ``` $ xonfig +------------------+-----------------+ | xonsh | 0.9.12 | | Python | 3.7.4 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 2.0.9 | | shell type | prompt_toolkit2 | | pygments | 2.4.2 | | on posix | True | | on linux | False | | on darwin | True | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | +------------------+-----------------+ ``` </details> ## Expected Behavior <!--- Tell us what should happen --> Tab completion behind shell command `vim` should work ## Current Behavior <!--- Tell us what happens instead of the expected behavior --> <!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` --> existing file is not being found by completion (see screenshot). As you can see in the lower part of the screenshot, the file `pip_packages_to_install.txt` exists in the current folder but isn't found when used behind the shell command `vim` (but does work behind `cat`). Is this maybe created by interfering completions installed elsewhere? Maybe some vim completions from homebrew? <img width="822" alt="Screenshot 2019-10-31 14 11 02" src="https://user-images.githubusercontent.com/69774/67982582-99090380-fbe8-11e9-839a-b6fd0536a3ed.png"> </issue> <code> [start of xonsh/completers/pip.py] 1 """Completers for pip.""" 2 # pylint: disable=invalid-name, missing-docstring, unsupported-membership-test 3 # pylint: disable=unused-argument, not-an-iterable 4 import re 5 import subprocess 6 7 import xonsh.lazyasd as xl 8 9 10 @xl.lazyobject 11 def PIP_RE(): 12 return re.compile(r"\bx?pip(?:\d|\.)*") 13 14 15 @xl.lazyobject 16 def PIP_LIST_RE(): 17 return re.compile(r"\bx?pip(?:\d|\.)* (?:uninstall|show)") 18 19 20 @xl.lazyobject 21 def ALL_COMMANDS(): 22 try: 23 help_text = str( 24 subprocess.check_output(["pip", "--help"], stderr=subprocess.DEVNULL) 25 ) 26 except FileNotFoundError: 27 return [] 28 commands = re.findall(r" (\w+) ", help_text) 29 return [c for c in commands if c not in ["completion", "help"]] 30 31 32 def complete_pip(prefix, line, begidx, endidx, ctx): 33 """Completes python's package manager pip""" 34 line_len = len(line.split()) 35 if ( 36 (line_len > 3) 37 or (line_len > 2 and line.endswith(" ")) 38 or (not PIP_RE.search(line)) 39 ): 40 return 41 if PIP_LIST_RE.search(line): 42 try: 43 items = subprocess.check_output(["pip", "list"], stderr=subprocess.DEVNULL) 44 except FileNotFoundError: 45 return set() 46 items = items.decode("utf-8").splitlines() 47 return set(i.split()[0] for i in items if i.split()[0].startswith(prefix)) 48 49 if (line_len > 1 and line.endswith(" ")) or line_len > 2: 50 # "pip show " -> no complete (note space) 51 return 52 if prefix not in ALL_COMMANDS: 53 suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)] 54 if suggestions: 55 return suggestions, len(prefix) 56 return ALL_COMMANDS, len(prefix) 57 [end of xonsh/completers/pip.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py --- a/xonsh/completers/pip.py +++ b/xonsh/completers/pip.py @@ -9,12 +9,12 @@ @xl.lazyobject def PIP_RE(): - return re.compile(r"\bx?pip(?:\d|\.)*") + return re.compile(r"\bx?pip(?:\d|\.)*\b") @xl.lazyobject def PIP_LIST_RE(): - return re.compile(r"\bx?pip(?:\d|\.)* (?:uninstall|show)") + return re.compile(r"\bx?pip(?:\d|\.)*\b (?:uninstall|show)") @xl.lazyobject
{"golden_diff": "diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py\n--- a/xonsh/completers/pip.py\n+++ b/xonsh/completers/pip.py\n@@ -9,12 +9,12 @@\n \n @xl.lazyobject\n def PIP_RE():\n- return re.compile(r\"\\bx?pip(?:\\d|\\.)*\")\n+ return re.compile(r\"\\bx?pip(?:\\d|\\.)*\\b\")\n \n \n @xl.lazyobject\n def PIP_LIST_RE():\n- return re.compile(r\"\\bx?pip(?:\\d|\\.)* (?:uninstall|show)\")\n+ return re.compile(r\"\\bx?pip(?:\\d|\\.)*\\b (?:uninstall|show)\")\n \n \n @xl.lazyobject\n", "issue": "Weird completion issue\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n$ xonfig\r\n+------------------+-----------------+\r\n| xonsh | 0.9.12 |\r\n| Python | 3.7.4 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 2.0.9 |\r\n| shell type | prompt_toolkit2 |\r\n| pygments | 2.4.2 |\r\n| on posix | True |\r\n| on linux | False |\r\n| on darwin | True |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n+------------------+-----------------+\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\n<!--- Tell us what should happen -->\r\nTab completion behind shell command `vim` should work\r\n\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.\r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\nexisting file is not being found by completion (see screenshot).\r\nAs you can see in the lower part of the screenshot, the file `pip_packages_to_install.txt` exists in the current folder but isn't found when used behind the shell command `vim` (but does work behind `cat`).\r\nIs this maybe created by interfering completions installed elsewhere? Maybe some vim completions from homebrew?\r\n\r\n\r\n<img width=\"822\" alt=\"Screenshot 2019-10-31 14 11 02\" src=\"https://user-images.githubusercontent.com/69774/67982582-99090380-fbe8-11e9-839a-b6fd0536a3ed.png\">\n", "before_files": [{"content": "\"\"\"Completers for pip.\"\"\"\n# pylint: disable=invalid-name, missing-docstring, unsupported-membership-test\n# pylint: disable=unused-argument, not-an-iterable\nimport re\nimport subprocess\n\nimport xonsh.lazyasd as xl\n\n\[email protected]\ndef PIP_RE():\n return re.compile(r\"\\bx?pip(?:\\d|\\.)*\")\n\n\[email protected]\ndef PIP_LIST_RE():\n return re.compile(r\"\\bx?pip(?:\\d|\\.)* (?:uninstall|show)\")\n\n\[email protected]\ndef ALL_COMMANDS():\n try:\n help_text = str(\n subprocess.check_output([\"pip\", \"--help\"], stderr=subprocess.DEVNULL)\n )\n except FileNotFoundError:\n return []\n commands = re.findall(r\" (\\w+) \", help_text)\n return [c for c in commands if c not in [\"completion\", \"help\"]]\n\n\ndef complete_pip(prefix, line, begidx, endidx, ctx):\n \"\"\"Completes python's package manager pip\"\"\"\n line_len = len(line.split())\n if (\n (line_len > 3)\n or (line_len > 2 and line.endswith(\" \"))\n or (not PIP_RE.search(line))\n ):\n return\n if PIP_LIST_RE.search(line):\n try:\n items = subprocess.check_output([\"pip\", \"list\"], stderr=subprocess.DEVNULL)\n except FileNotFoundError:\n return set()\n items = items.decode(\"utf-8\").splitlines()\n return set(i.split()[0] for i in items if i.split()[0].startswith(prefix))\n\n if (line_len > 1 and line.endswith(\" \")) or line_len > 2:\n # \"pip show \" -> no complete (note space)\n return\n if prefix not in ALL_COMMANDS:\n suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]\n if suggestions:\n return suggestions, len(prefix)\n return ALL_COMMANDS, len(prefix)\n", "path": "xonsh/completers/pip.py"}]}
1,649
174
gh_patches_debug_33399
rasdani/github-patches
git_diff
plotly__plotly.py-1832
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Does Plotly 4.2.0 depend on scikit-image? I failed to `import plotly.figure_factory` because plotly seems not to install `scikit-image` when running `pip install -U plotly`. After I manually installed `scikit-image`, `import plotly.figure_factory` worked. This was not a problem in version 4.1.1. But the source code shows it depends on it. https://github.com/plotly/plotly.py/blob/b7ad5433c4e0882715781fa6c4816fc7fff62965/packages/python/plotly/plotly/figure_factory/_ternary_contour.py#L11 </issue> <code> [start of packages/python/plotly/plotly/express/__init__.py] 1 """ 2 `plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \ 3 data exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express 4 """ 5 6 from ._chart_types import ( # noqa: F401 7 scatter, 8 scatter_3d, 9 scatter_polar, 10 scatter_ternary, 11 scatter_mapbox, 12 scatter_geo, 13 line, 14 line_3d, 15 line_polar, 16 line_ternary, 17 line_mapbox, 18 line_geo, 19 area, 20 bar, 21 bar_polar, 22 violin, 23 box, 24 strip, 25 histogram, 26 scatter_matrix, 27 parallel_coordinates, 28 parallel_categories, 29 choropleth, 30 density_contour, 31 density_heatmap, 32 ) 33 34 from ._core import ( # noqa: F401 35 set_mapbox_access_token, 36 defaults, 37 get_trendline_results, 38 ) 39 40 from . import data, colors # noqa: F401 41 42 __all__ = [ 43 "scatter", 44 "scatter_3d", 45 "scatter_polar", 46 "scatter_ternary", 47 "scatter_mapbox", 48 "scatter_geo", 49 "scatter_matrix", 50 "density_contour", 51 "density_heatmap", 52 "line", 53 "line_3d", 54 "line_polar", 55 "line_ternary", 56 "line_mapbox", 57 "line_geo", 58 "parallel_coordinates", 59 "parallel_categories", 60 "area", 61 "bar", 62 "bar_polar", 63 "violin", 64 "box", 65 "strip", 66 "histogram", 67 "choropleth", 68 "data", 69 "colors", 70 "set_mapbox_access_token", 71 "get_trendline_results", 72 ] 73 [end of packages/python/plotly/plotly/express/__init__.py] [start of packages/python/plotly/plotly/figure_factory/__init__.py] 1 from __future__ import absolute_import 2 3 from plotly import optional_imports 4 5 # Require that numpy exists for figure_factory 6 np = optional_imports.get_module("numpy") 7 if np is None: 8 raise ImportError( 9 """\ 10 The figure factory module requires the numpy package""" 11 ) 12 13 14 from plotly.figure_factory._2d_density import create_2d_density 15 from plotly.figure_factory._annotated_heatmap import create_annotated_heatmap 16 from plotly.figure_factory._bullet import create_bullet 17 from plotly.figure_factory._candlestick import create_candlestick 18 from plotly.figure_factory._dendrogram import create_dendrogram 19 from plotly.figure_factory._distplot import create_distplot 20 from plotly.figure_factory._facet_grid import create_facet_grid 21 from plotly.figure_factory._gantt import create_gantt 22 from plotly.figure_factory._ohlc import create_ohlc 23 from plotly.figure_factory._quiver import create_quiver 24 from plotly.figure_factory._scatterplot import create_scatterplotmatrix 25 from plotly.figure_factory._streamline import create_streamline 26 from plotly.figure_factory._table import create_table 27 from plotly.figure_factory._ternary_contour import create_ternary_contour 28 from plotly.figure_factory._trisurf import create_trisurf 29 from plotly.figure_factory._violin import create_violin 30 31 if optional_imports.get_module("pandas") is not None: 32 from plotly.figure_factory._county_choropleth import create_choropleth 33 34 __all__ = [ 35 "create_2d_density", 36 "create_annotated_heatmap", 37 "create_bullet", 38 "create_candlestick", 39 "create_dendrogram", 40 "create_distplot", 41 "create_facet_grid", 42 "create_gantt", 43 "create_ohlc", 44 "create_quiver", 45 "create_scatterplotmatrix", 46 "create_streamline", 47 "create_table", 48 "create_ternary_contour", 49 "create_trisurf", 50 "create_violin", 51 ] 52 [end of packages/python/plotly/plotly/figure_factory/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/packages/python/plotly/plotly/express/__init__.py b/packages/python/plotly/plotly/express/__init__.py --- a/packages/python/plotly/plotly/express/__init__.py +++ b/packages/python/plotly/plotly/express/__init__.py @@ -2,6 +2,16 @@ `plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \ data exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express """ +from __future__ import absolute_import +from plotly import optional_imports + +pd = optional_imports.get_module("pandas") +if pd is None: + raise ImportError( + """\ +Plotly express requires pandas to be installed.""" + ) + from ._chart_types import ( # noqa: F401 scatter, diff --git a/packages/python/plotly/plotly/figure_factory/__init__.py b/packages/python/plotly/plotly/figure_factory/__init__.py --- a/packages/python/plotly/plotly/figure_factory/__init__.py +++ b/packages/python/plotly/plotly/figure_factory/__init__.py @@ -24,18 +24,31 @@ from plotly.figure_factory._scatterplot import create_scatterplotmatrix from plotly.figure_factory._streamline import create_streamline from plotly.figure_factory._table import create_table -from plotly.figure_factory._ternary_contour import create_ternary_contour from plotly.figure_factory._trisurf import create_trisurf from plotly.figure_factory._violin import create_violin if optional_imports.get_module("pandas") is not None: from plotly.figure_factory._county_choropleth import create_choropleth +else: + + def create_choropleth(*args, **kwargs): + raise ImportError("Please install pandas to use `create_choropleth`") + + +if optional_imports.get_module("skimage") is not None: + from plotly.figure_factory._ternary_contour import create_ternary_contour +else: + + def create_ternary_contour(*args, **kwargs): + raise ImportError("Please install scikit-image to use `create_ternary_contour`") + __all__ = [ "create_2d_density", "create_annotated_heatmap", "create_bullet", "create_candlestick", + "create_choropleth", "create_dendrogram", "create_distplot", "create_facet_grid",
{"golden_diff": "diff --git a/packages/python/plotly/plotly/express/__init__.py b/packages/python/plotly/plotly/express/__init__.py\n--- a/packages/python/plotly/plotly/express/__init__.py\n+++ b/packages/python/plotly/plotly/express/__init__.py\n@@ -2,6 +2,16 @@\n `plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \\\n data exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express\n \"\"\"\n+from __future__ import absolute_import\n+from plotly import optional_imports\n+\n+pd = optional_imports.get_module(\"pandas\")\n+if pd is None:\n+ raise ImportError(\n+ \"\"\"\\\n+Plotly express requires pandas to be installed.\"\"\"\n+ )\n+\n \n from ._chart_types import ( # noqa: F401\n scatter,\ndiff --git a/packages/python/plotly/plotly/figure_factory/__init__.py b/packages/python/plotly/plotly/figure_factory/__init__.py\n--- a/packages/python/plotly/plotly/figure_factory/__init__.py\n+++ b/packages/python/plotly/plotly/figure_factory/__init__.py\n@@ -24,18 +24,31 @@\n from plotly.figure_factory._scatterplot import create_scatterplotmatrix\n from plotly.figure_factory._streamline import create_streamline\n from plotly.figure_factory._table import create_table\n-from plotly.figure_factory._ternary_contour import create_ternary_contour\n from plotly.figure_factory._trisurf import create_trisurf\n from plotly.figure_factory._violin import create_violin\n \n if optional_imports.get_module(\"pandas\") is not None:\n from plotly.figure_factory._county_choropleth import create_choropleth\n+else:\n+\n+ def create_choropleth(*args, **kwargs):\n+ raise ImportError(\"Please install pandas to use `create_choropleth`\")\n+\n+\n+if optional_imports.get_module(\"skimage\") is not None:\n+ from plotly.figure_factory._ternary_contour import create_ternary_contour\n+else:\n+\n+ def create_ternary_contour(*args, **kwargs):\n+ raise ImportError(\"Please install scikit-image to use `create_ternary_contour`\")\n+\n \n __all__ = [\n \"create_2d_density\",\n \"create_annotated_heatmap\",\n \"create_bullet\",\n \"create_candlestick\",\n+ \"create_choropleth\",\n \"create_dendrogram\",\n \"create_distplot\",\n \"create_facet_grid\",\n", "issue": "Does Plotly 4.2.0 depend on scikit-image?\nI failed to `import plotly.figure_factory` because plotly seems not to install `scikit-image` when running `pip install -U plotly`. After I manually installed `scikit-image`, `import plotly.figure_factory` worked.\r\n\r\nThis was not a problem in version 4.1.1.\r\n\r\nBut the source code shows it depends on it.\r\nhttps://github.com/plotly/plotly.py/blob/b7ad5433c4e0882715781fa6c4816fc7fff62965/packages/python/plotly/plotly/figure_factory/_ternary_contour.py#L11\n", "before_files": [{"content": "\"\"\"\n`plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \\\ndata exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express\n\"\"\"\n\nfrom ._chart_types import ( # noqa: F401\n scatter,\n scatter_3d,\n scatter_polar,\n scatter_ternary,\n scatter_mapbox,\n scatter_geo,\n line,\n line_3d,\n line_polar,\n line_ternary,\n line_mapbox,\n line_geo,\n area,\n bar,\n bar_polar,\n violin,\n box,\n strip,\n histogram,\n scatter_matrix,\n parallel_coordinates,\n parallel_categories,\n choropleth,\n density_contour,\n density_heatmap,\n)\n\nfrom ._core import ( # noqa: F401\n set_mapbox_access_token,\n defaults,\n get_trendline_results,\n)\n\nfrom . import data, colors # noqa: F401\n\n__all__ = [\n \"scatter\",\n \"scatter_3d\",\n \"scatter_polar\",\n \"scatter_ternary\",\n \"scatter_mapbox\",\n \"scatter_geo\",\n \"scatter_matrix\",\n \"density_contour\",\n \"density_heatmap\",\n \"line\",\n \"line_3d\",\n \"line_polar\",\n \"line_ternary\",\n \"line_mapbox\",\n \"line_geo\",\n \"parallel_coordinates\",\n \"parallel_categories\",\n \"area\",\n \"bar\",\n \"bar_polar\",\n \"violin\",\n \"box\",\n \"strip\",\n \"histogram\",\n \"choropleth\",\n \"data\",\n \"colors\",\n \"set_mapbox_access_token\",\n \"get_trendline_results\",\n]\n", "path": "packages/python/plotly/plotly/express/__init__.py"}, {"content": "from __future__ import absolute_import\n\nfrom plotly import optional_imports\n\n# Require that numpy exists for figure_factory\nnp = optional_imports.get_module(\"numpy\")\nif np is None:\n raise ImportError(\n \"\"\"\\\nThe figure factory module requires the numpy package\"\"\"\n )\n\n\nfrom plotly.figure_factory._2d_density import create_2d_density\nfrom plotly.figure_factory._annotated_heatmap import create_annotated_heatmap\nfrom plotly.figure_factory._bullet import create_bullet\nfrom plotly.figure_factory._candlestick import create_candlestick\nfrom plotly.figure_factory._dendrogram import create_dendrogram\nfrom plotly.figure_factory._distplot import create_distplot\nfrom plotly.figure_factory._facet_grid import create_facet_grid\nfrom plotly.figure_factory._gantt import create_gantt\nfrom plotly.figure_factory._ohlc import create_ohlc\nfrom plotly.figure_factory._quiver import create_quiver\nfrom plotly.figure_factory._scatterplot import create_scatterplotmatrix\nfrom plotly.figure_factory._streamline import create_streamline\nfrom plotly.figure_factory._table import create_table\nfrom plotly.figure_factory._ternary_contour import create_ternary_contour\nfrom plotly.figure_factory._trisurf import create_trisurf\nfrom plotly.figure_factory._violin import create_violin\n\nif optional_imports.get_module(\"pandas\") is not None:\n from plotly.figure_factory._county_choropleth import create_choropleth\n\n__all__ = [\n \"create_2d_density\",\n \"create_annotated_heatmap\",\n \"create_bullet\",\n \"create_candlestick\",\n \"create_dendrogram\",\n \"create_distplot\",\n \"create_facet_grid\",\n \"create_gantt\",\n \"create_ohlc\",\n \"create_quiver\",\n \"create_scatterplotmatrix\",\n \"create_streamline\",\n \"create_table\",\n \"create_ternary_contour\",\n \"create_trisurf\",\n \"create_violin\",\n]\n", "path": "packages/python/plotly/plotly/figure_factory/__init__.py"}]}
1,809
588
gh_patches_debug_33729
rasdani/github-patches
git_diff
translate__pootle-5882
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dont create project if command fails in init_fs_project atm if for some reason this command fails it leaves a project behind </issue> <code> [start of pootle/apps/pootle_fs/management/commands/init_fs_project.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import logging 10 11 from django.core.exceptions import ValidationError 12 from django.core.management import BaseCommand, CommandError 13 14 from pootle_format.models import Format 15 from pootle_fs.utils import FSPlugin, parse_fs_url 16 from pootle_language.models import Language 17 from pootle_project.models import Project 18 19 20 logger = logging.getLogger('pootle.fs') 21 22 23 class Command(BaseCommand): 24 help = "Init a new Pootle FS project." 25 26 def add_arguments(self, parser): 27 parser.add_argument( 28 'code', 29 metavar='CODE', 30 help='Project code' 31 ) 32 parser.add_argument( 33 'fs', 34 metavar='FS_URL', 35 help='FS url "filesystem_type+/repo/path/"' 36 ) 37 parser.add_argument( 38 'translation_mapping', 39 help='Translation mapping "<language_code>/<filename>.<ext>"', 40 metavar='TRANSLATION_MAPPING' 41 ) 42 parser.add_argument( 43 '-n', '--name', 44 action='store', 45 dest='name', 46 nargs='?', 47 help='Project name', 48 ) 49 parser.add_argument( 50 '--filetypes', 51 action='append', 52 dest='filetypes', 53 help='File types', 54 ) 55 parser.add_argument( 56 '--checkstyle', 57 action='store', 58 dest='checkstyle', 59 help='Checkstyle', 60 nargs='?', 61 default='standard' 62 ) 63 parser.add_argument( 64 '-l', '--source-language', 65 action='store', 66 dest='source_language', 67 help="Code for the project's source language", 68 nargs='?', 69 default='en' 70 ) 71 parser.add_argument( 72 '--nosync', 73 action='store_false', 74 dest='sync', 75 help='Flag if sync is unnecessary', 76 default=True 77 ) 78 79 def handle(self, **options): 80 source_language_code = options['source_language'] 81 try: 82 source_language = Language.objects.get(code=source_language_code) 83 except Language.DoesNotExist as e: 84 self.stdout.write('%s: Unknown language code.' % 85 source_language_code) 86 raise CommandError(e) 87 88 fs_type, fs_url = parse_fs_url(options['fs']) 89 code = options['code'] 90 name = options['name'] or code.capitalize() 91 92 try: 93 project = Project.objects.create( 94 code=code, 95 fullname=name, 96 treestyle='pootle_fs', 97 checkstyle=options['checkstyle'], 98 source_language=source_language) 99 except ValidationError as e: 100 raise CommandError(e) 101 102 for filetype in options["filetypes"] or ["po"]: 103 try: 104 filetype = Format.objects.get(name=filetype) 105 project.filetypes.add(filetype) 106 except Format.DoesNotExist as e: 107 raise CommandError(e) 108 109 project.config['pootle_fs.fs_type'] = fs_type 110 project.config['pootle_fs.fs_url'] = fs_url 111 project.config['pootle_fs.translation_mappings'] = { 112 'default': options['translation_mapping'] 113 } 114 if options['sync']: 115 plugin = FSPlugin(project) 116 plugin.fetch() 117 plugin.add() 118 plugin.sync() 119 [end of pootle/apps/pootle_fs/management/commands/init_fs_project.py] [start of pootle/apps/pootle_fs/localfs.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import logging 10 import uuid 11 12 import dirsync 13 14 from django import forms 15 16 from pootle.core.delegate import revision 17 from pootle_project.models import Project 18 19 from .plugin import Plugin 20 21 22 class LocalFSPlugin(Plugin): 23 24 fs_type = "localfs" 25 _pulled = False 26 27 @property 28 def latest_hash(self): 29 return revision.get(Project)( 30 self.project).get(key="pootle.fs.fs_hash") 31 32 def push(self, response): 33 dirsync.sync( 34 self.project.local_fs_path, 35 self.fs_url, 36 "sync", 37 purge=True, 38 logger=logging.getLogger(dirsync.__name__)) 39 return response 40 41 def fetch(self): 42 synced = dirsync.sync( 43 self.fs_url, 44 self.project.local_fs_path, 45 "sync", 46 create=True, 47 purge=True, 48 logger=logging.getLogger(dirsync.__name__)) 49 if synced: 50 revision.get(Project)(self.project).set( 51 keys=["pootle.fs.fs_hash"], value=uuid.uuid4().hex) 52 53 54 class LocalFSUrlValidator(object): 55 56 help_text = "Enter an absolute path to a directory on your filesystem" 57 58 def validate(self, url): 59 if not url.startswith("/"): 60 raise forms.ValidationError(self.help_text) 61 [end of pootle/apps/pootle_fs/localfs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/pootle_fs/localfs.py b/pootle/apps/pootle_fs/localfs.py --- a/pootle/apps/pootle_fs/localfs.py +++ b/pootle/apps/pootle_fs/localfs.py @@ -16,6 +16,7 @@ from pootle.core.delegate import revision from pootle_project.models import Project +from .exceptions import FSFetchError from .plugin import Plugin @@ -39,13 +40,16 @@ return response def fetch(self): - synced = dirsync.sync( - self.fs_url, - self.project.local_fs_path, - "sync", - create=True, - purge=True, - logger=logging.getLogger(dirsync.__name__)) + try: + synced = dirsync.sync( + self.fs_url, + self.project.local_fs_path, + "sync", + create=True, + purge=True, + logger=logging.getLogger(dirsync.__name__)) + except ValueError as e: + raise FSFetchError(e) if synced: revision.get(Project)(self.project).set( keys=["pootle.fs.fs_hash"], value=uuid.uuid4().hex) diff --git a/pootle/apps/pootle_fs/management/commands/init_fs_project.py b/pootle/apps/pootle_fs/management/commands/init_fs_project.py --- a/pootle/apps/pootle_fs/management/commands/init_fs_project.py +++ b/pootle/apps/pootle_fs/management/commands/init_fs_project.py @@ -12,6 +12,7 @@ from django.core.management import BaseCommand, CommandError from pootle_format.models import Format +from pootle_fs.exceptions import FSFetchError from pootle_fs.utils import FSPlugin, parse_fs_url from pootle_language.models import Language from pootle_project.models import Project @@ -112,7 +113,11 @@ 'default': options['translation_mapping'] } if options['sync']: - plugin = FSPlugin(project) - plugin.fetch() - plugin.add() - plugin.sync() + try: + plugin = FSPlugin(project) + plugin.fetch() + plugin.add() + plugin.sync() + except FSFetchError as e: + project.delete() + raise CommandError(e)
{"golden_diff": "diff --git a/pootle/apps/pootle_fs/localfs.py b/pootle/apps/pootle_fs/localfs.py\n--- a/pootle/apps/pootle_fs/localfs.py\n+++ b/pootle/apps/pootle_fs/localfs.py\n@@ -16,6 +16,7 @@\n from pootle.core.delegate import revision\n from pootle_project.models import Project\n \n+from .exceptions import FSFetchError\n from .plugin import Plugin\n \n \n@@ -39,13 +40,16 @@\n return response\n \n def fetch(self):\n- synced = dirsync.sync(\n- self.fs_url,\n- self.project.local_fs_path,\n- \"sync\",\n- create=True,\n- purge=True,\n- logger=logging.getLogger(dirsync.__name__))\n+ try:\n+ synced = dirsync.sync(\n+ self.fs_url,\n+ self.project.local_fs_path,\n+ \"sync\",\n+ create=True,\n+ purge=True,\n+ logger=logging.getLogger(dirsync.__name__))\n+ except ValueError as e:\n+ raise FSFetchError(e)\n if synced:\n revision.get(Project)(self.project).set(\n keys=[\"pootle.fs.fs_hash\"], value=uuid.uuid4().hex)\ndiff --git a/pootle/apps/pootle_fs/management/commands/init_fs_project.py b/pootle/apps/pootle_fs/management/commands/init_fs_project.py\n--- a/pootle/apps/pootle_fs/management/commands/init_fs_project.py\n+++ b/pootle/apps/pootle_fs/management/commands/init_fs_project.py\n@@ -12,6 +12,7 @@\n from django.core.management import BaseCommand, CommandError\n \n from pootle_format.models import Format\n+from pootle_fs.exceptions import FSFetchError\n from pootle_fs.utils import FSPlugin, parse_fs_url\n from pootle_language.models import Language\n from pootle_project.models import Project\n@@ -112,7 +113,11 @@\n 'default': options['translation_mapping']\n }\n if options['sync']:\n- plugin = FSPlugin(project)\n- plugin.fetch()\n- plugin.add()\n- plugin.sync()\n+ try:\n+ plugin = FSPlugin(project)\n+ plugin.fetch()\n+ plugin.add()\n+ plugin.sync()\n+ except FSFetchError as e:\n+ project.delete()\n+ raise CommandError(e)\n", "issue": "Dont create project if command fails in init_fs_project\natm if for some reason this command fails it leaves a project behind\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.management import BaseCommand, CommandError\n\nfrom pootle_format.models import Format\nfrom pootle_fs.utils import FSPlugin, parse_fs_url\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\n\n\nlogger = logging.getLogger('pootle.fs')\n\n\nclass Command(BaseCommand):\n help = \"Init a new Pootle FS project.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'code',\n metavar='CODE',\n help='Project code'\n )\n parser.add_argument(\n 'fs',\n metavar='FS_URL',\n help='FS url \"filesystem_type+/repo/path/\"'\n )\n parser.add_argument(\n 'translation_mapping',\n help='Translation mapping \"<language_code>/<filename>.<ext>\"',\n metavar='TRANSLATION_MAPPING'\n )\n parser.add_argument(\n '-n', '--name',\n action='store',\n dest='name',\n nargs='?',\n help='Project name',\n )\n parser.add_argument(\n '--filetypes',\n action='append',\n dest='filetypes',\n help='File types',\n )\n parser.add_argument(\n '--checkstyle',\n action='store',\n dest='checkstyle',\n help='Checkstyle',\n nargs='?',\n default='standard'\n )\n parser.add_argument(\n '-l', '--source-language',\n action='store',\n dest='source_language',\n help=\"Code for the project's source language\",\n nargs='?',\n default='en'\n )\n parser.add_argument(\n '--nosync',\n action='store_false',\n dest='sync',\n help='Flag if sync is unnecessary',\n default=True\n )\n\n def handle(self, **options):\n source_language_code = options['source_language']\n try:\n source_language = Language.objects.get(code=source_language_code)\n except Language.DoesNotExist as e:\n self.stdout.write('%s: Unknown language code.' %\n source_language_code)\n raise CommandError(e)\n\n fs_type, fs_url = parse_fs_url(options['fs'])\n code = options['code']\n name = options['name'] or code.capitalize()\n\n try:\n project = Project.objects.create(\n code=code,\n fullname=name,\n treestyle='pootle_fs',\n checkstyle=options['checkstyle'],\n source_language=source_language)\n except ValidationError as e:\n raise CommandError(e)\n\n for filetype in options[\"filetypes\"] or [\"po\"]:\n try:\n filetype = Format.objects.get(name=filetype)\n project.filetypes.add(filetype)\n except Format.DoesNotExist as e:\n raise CommandError(e)\n\n project.config['pootle_fs.fs_type'] = fs_type\n project.config['pootle_fs.fs_url'] = fs_url\n project.config['pootle_fs.translation_mappings'] = {\n 'default': options['translation_mapping']\n }\n if options['sync']:\n plugin = FSPlugin(project)\n plugin.fetch()\n plugin.add()\n plugin.sync()\n", "path": "pootle/apps/pootle_fs/management/commands/init_fs_project.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport uuid\n\nimport dirsync\n\nfrom django import forms\n\nfrom pootle.core.delegate import revision\nfrom pootle_project.models import Project\n\nfrom .plugin import Plugin\n\n\nclass LocalFSPlugin(Plugin):\n\n fs_type = \"localfs\"\n _pulled = False\n\n @property\n def latest_hash(self):\n return revision.get(Project)(\n self.project).get(key=\"pootle.fs.fs_hash\")\n\n def push(self, response):\n dirsync.sync(\n self.project.local_fs_path,\n self.fs_url,\n \"sync\",\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n return response\n\n def fetch(self):\n synced = dirsync.sync(\n self.fs_url,\n self.project.local_fs_path,\n \"sync\",\n create=True,\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n if synced:\n revision.get(Project)(self.project).set(\n keys=[\"pootle.fs.fs_hash\"], value=uuid.uuid4().hex)\n\n\nclass LocalFSUrlValidator(object):\n\n help_text = \"Enter an absolute path to a directory on your filesystem\"\n\n def validate(self, url):\n if not url.startswith(\"/\"):\n raise forms.ValidationError(self.help_text)\n", "path": "pootle/apps/pootle_fs/localfs.py"}]}
2,045
526
gh_patches_debug_34572
rasdani/github-patches
git_diff
SigmaHQ__sigma-1895
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> sigma2attack does not support collections Collections parsing happens only in [`collection.py`](tools/sigma/parser/collection.py) it seems but [`sigma2attack`](/tools/sigma/sigma2attack.py#L24) uses good old `yaml.safe_load` on his own. that leads to errors when parsing and rules being ignored ``` [snip] Ignoring rule rules\windows\other\win_tool_psexec.yml (parsing failed) Ignoring rule rules\windows\powershell\win_powershell_web_request.yml (parsing failed) Ignoring rule rules\windows\process_access\sysmon_cmstp_execution.yml (parsing failed) Ignoring rule rules\windows\process_creation\win_apt_chafer_mar18.yml (parsing failed) Ignoring rule rules\windows\process_creation\win_apt_empiremonkey.yml (parsing failed) Ignoring rule rules\windows\process_creation\win_apt_gallium.yml (parsing failed) [snip] ``` </issue> <code> [start of tools/sigma/sigma2attack.py] 1 #!/usr/bin/env python3 2 3 import argparse 4 import glob 5 import json 6 import os 7 import sys 8 9 import yaml 10 11 def main(): 12 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 13 parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from") 14 parser.add_argument("--out-file", "-o", dest="out_file", default="heatmap.json", help="File to write the JSON layer to") 15 parser.add_argument("--no-comment", dest="no_comment", action="store_true", help="Don't store rule names in comments") 16 args = parser.parse_args() 17 18 rule_files = glob.glob(os.path.join(args.rules_dir, "**/*.yml"), recursive=True) 19 techniques_to_rules = {} 20 curr_max_technique_count = 0 21 num_rules_used = 0 22 for rule_file in rule_files: 23 try: 24 rule = yaml.safe_load(open(rule_file, encoding="utf-8").read()) 25 except yaml.YAMLError: 26 sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n") 27 continue 28 if "tags" not in rule: 29 sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n") 30 continue 31 tags = rule["tags"] 32 for tag in tags: 33 if tag.lower().startswith("attack.t"): 34 technique_id = tag[len("attack."):].upper() 35 num_rules_used += 1 36 if technique_id not in techniques_to_rules: 37 techniques_to_rules[technique_id] = [] 38 techniques_to_rules[technique_id].append(os.path.basename(rule_file)) 39 curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id])) 40 41 42 scores = [] 43 for technique in techniques_to_rules: 44 entry = { 45 "techniqueID": technique, 46 "score": len(techniques_to_rules[technique]), 47 } 48 if not args.no_comment: 49 entry["comment"] = "\n".join(techniques_to_rules[technique]) 50 51 scores.append(entry) 52 53 output = { 54 "domain": "mitre-enterprise", 55 "name": "Sigma rules heatmap", 56 "gradient": { 57 "colors": [ 58 "#ffffff", 59 "#ff6666" 60 ], 61 "maxValue": curr_max_technique_count, 62 "minValue": 0 63 }, 64 "versions": { 65 "navigator": "4.0", 66 "layer": "4.0" 67 }, 68 "techniques": scores, 69 } 70 71 with open(args.out_file, "w") as f: 72 f.write(json.dumps(output)) 73 print("[*] Layer file written in " + args.out_file + " (" + str(num_rules_used) + " rules)") 74 75 if __name__ == "__main__": 76 main() 77 [end of tools/sigma/sigma2attack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py --- a/tools/sigma/sigma2attack.py +++ b/tools/sigma/sigma2attack.py @@ -8,6 +8,7 @@ import yaml + def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from") @@ -20,24 +21,25 @@ curr_max_technique_count = 0 num_rules_used = 0 for rule_file in rule_files: - try: - rule = yaml.safe_load(open(rule_file, encoding="utf-8").read()) - except yaml.YAMLError: - sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n") - continue - if "tags" not in rule: - sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n") - continue - tags = rule["tags"] - for tag in tags: - if tag.lower().startswith("attack.t"): - technique_id = tag[len("attack."):].upper() - num_rules_used += 1 - if technique_id not in techniques_to_rules: - techniques_to_rules[technique_id] = [] - techniques_to_rules[technique_id].append(os.path.basename(rule_file)) - curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id])) - + with open(rule_file,encoding='utf-8') as f: + docs = yaml.load_all(f, Loader=yaml.FullLoader) + double = False + for rule in docs: + if "tags" not in rule : + if double == False : # Only 1 warning + sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n") + double = True # action globle no tag + continue + tags = rule["tags"] + double = True + for tag in tags: + if tag.lower().startswith("attack.t"): + technique_id = tag[len("attack."):].upper() + num_rules_used += 1 + if technique_id not in techniques_to_rules: + techniques_to_rules[technique_id] = [] + techniques_to_rules[technique_id].append(os.path.basename(rule_file)) + curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id])) scores = [] for technique in techniques_to_rules:
{"golden_diff": "diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py\n--- a/tools/sigma/sigma2attack.py\n+++ b/tools/sigma/sigma2attack.py\n@@ -8,6 +8,7 @@\n \n import yaml\n \n+\n def main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n@@ -20,24 +21,25 @@\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n- try:\n- rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n- except yaml.YAMLError:\n- sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n- continue\n- if \"tags\" not in rule:\n- sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n- continue\n- tags = rule[\"tags\"]\n- for tag in tags:\n- if tag.lower().startswith(\"attack.t\"):\n- technique_id = tag[len(\"attack.\"):].upper()\n- num_rules_used += 1\n- if technique_id not in techniques_to_rules:\n- techniques_to_rules[technique_id] = []\n- techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n- curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n-\n+ with open(rule_file,encoding='utf-8') as f:\n+ docs = yaml.load_all(f, Loader=yaml.FullLoader)\n+ double = False\n+ for rule in docs:\n+ if \"tags\" not in rule :\n+ if double == False : # Only 1 warning\n+ sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n+ double = True # action globle no tag\n+ continue\n+ tags = rule[\"tags\"]\n+ double = True\n+ for tag in tags:\n+ if tag.lower().startswith(\"attack.t\"):\n+ technique_id = tag[len(\"attack.\"):].upper()\n+ num_rules_used += 1\n+ if technique_id not in techniques_to_rules:\n+ techniques_to_rules[technique_id] = []\n+ techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n+ curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n \n scores = []\n for technique in techniques_to_rules:\n", "issue": "sigma2attack does not support collections\nCollections parsing happens only in [`collection.py`](tools/sigma/parser/collection.py) it seems but [`sigma2attack`](/tools/sigma/sigma2attack.py#L24) uses good old `yaml.safe_load` on his own.\r\n\r\nthat leads to errors when parsing and rules being ignored\r\n\r\n```\r\n[snip]\r\nIgnoring rule rules\\windows\\other\\win_tool_psexec.yml (parsing failed)\r\nIgnoring rule rules\\windows\\powershell\\win_powershell_web_request.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_access\\sysmon_cmstp_execution.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_creation\\win_apt_chafer_mar18.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_creation\\win_apt_empiremonkey.yml (parsing failed)\r\nIgnoring rule rules\\windows\\process_creation\\win_apt_gallium.yml (parsing failed)\r\n[snip]\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\n\nimport yaml\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n parser.add_argument(\"--out-file\", \"-o\", dest=\"out_file\", default=\"heatmap.json\", help=\"File to write the JSON layer to\")\n parser.add_argument(\"--no-comment\", dest=\"no_comment\", action=\"store_true\", help=\"Don't store rule names in comments\")\n args = parser.parse_args()\n\n rule_files = glob.glob(os.path.join(args.rules_dir, \"**/*.yml\"), recursive=True)\n techniques_to_rules = {}\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n if \"tags\" not in rule:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n continue\n tags = rule[\"tags\"]\n for tag in tags:\n if tag.lower().startswith(\"attack.t\"):\n technique_id = tag[len(\"attack.\"):].upper()\n num_rules_used += 1\n if technique_id not in techniques_to_rules:\n techniques_to_rules[technique_id] = []\n techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n\n\n scores = []\n for technique in techniques_to_rules:\n entry = {\n \"techniqueID\": technique, \n \"score\": len(techniques_to_rules[technique]), \n }\n if not args.no_comment:\n entry[\"comment\"] = \"\\n\".join(techniques_to_rules[technique])\n\n scores.append(entry)\n\n output = {\n \"domain\": \"mitre-enterprise\",\n \"name\": \"Sigma rules heatmap\",\n \"gradient\": {\n \"colors\": [\n \"#ffffff\",\n \"#ff6666\"\n ],\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n \"versions\": {\n \"navigator\": \"4.0\",\n \"layer\": \"4.0\"\n },\n \"techniques\": scores,\n }\n\n with open(args.out_file, \"w\") as f:\n f.write(json.dumps(output))\n print(\"[*] Layer file written in \" + args.out_file + \" (\" + str(num_rules_used) + \" rules)\")\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2attack.py"}]}
1,521
594