problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_16517 | rasdani/github-patches | git_diff | ultrabug__py3status-113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in the keyboard_layout module
`xbklayout` function should be `xkblayout`, i.e. "kb" instead of "bk". This typo appears 3 times in total.
The rest of the code uses "kb" so I assumed what I found was a typo and decided to report it since it already caught my eye.
</issue>
<code>
[start of py3status/modules/keyboard_layout.py]
1 # -*- coding: utf-8 -*-
2 """
3 Display the current keyboard layout.
4
5 Configuration parameters:
6 - cache_timeout: check for keyboard layout change every seconds
7
8 Requires:
9 - xkblayout-state
10 or
11 - setxkbmap
12
13 @author shadowprince
14 @license Eclipse Public License
15 """
16
17 from subprocess import check_output
18 from time import time
19 import shlex
20 import re
21
22 # colors of layouts, check your command's output to match keys
23 LANG_COLORS = {
24 'fr': '#268BD2', # solarized blue
25 'ru': '#F75252', # red
26 'ua': '#FCE94F', # yellow
27 'us': '#729FCF', # light blue
28 }
29
30 LAYOUT_RE = re.compile(r".*layout:\s*(\w+).*", flags=re.DOTALL)
31
32
33 def xbklayout():
34 """
35 check using xkblayout-state (preferred method)
36 """
37 return check_output(
38 ["xkblayout-state", "print", "%s"]
39 ).decode('utf-8')
40
41
42 def setxkbmap():
43 """
44 check using setxkbmap >= 1.3.0
45
46 Please read issue 33 for more information :
47 https://github.com/ultrabug/py3status/pull/33
48 """
49 out = check_output(shlex.split("setxkbmap -query")).decode("utf-8")
50
51 return re.match(LAYOUT_RE, out).group(1)
52
53
54 class Py3status:
55 """
56 """
57 # available configuration parameters
58 cache_timeout = 10
59 color = ''
60
61 def __init__(self):
62 """
63 find the best implementation to get the keyboard's layout
64 """
65 try:
66 xbklayout()
67 except:
68 self.command = setxkbmap
69 else:
70 self.command = xbklayout
71
72 def keyboard_layout(self, i3s_output_list, i3s_config):
73 response = {
74 'cached_until': time() + self.cache_timeout,
75 'full_text': ''
76 }
77
78 lang = self.command().strip()
79 lang_color = self.color if self.color else LANG_COLORS.get(lang)
80
81 response['full_text'] = lang or '??'
82 if lang_color:
83 response['color'] = lang_color
84
85 return response
86
87 if __name__ == "__main__":
88 """
89 Test this module by calling it directly.
90 """
91 from time import sleep
92 x = Py3status()
93 config = {
94 'color_good': '#00FF00',
95 'color_bad': '#FF0000',
96 }
97 while True:
98 print(x.keyboard_layout([], config))
99 sleep(1)
100
[end of py3status/modules/keyboard_layout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/modules/keyboard_layout.py b/py3status/modules/keyboard_layout.py
--- a/py3status/modules/keyboard_layout.py
+++ b/py3status/modules/keyboard_layout.py
@@ -30,7 +30,7 @@
LAYOUT_RE = re.compile(r".*layout:\s*(\w+).*", flags=re.DOTALL)
-def xbklayout():
+def xkblayout():
"""
check using xkblayout-state (preferred method)
"""
@@ -63,11 +63,11 @@
find the best implementation to get the keyboard's layout
"""
try:
- xbklayout()
+ xkblayout()
except:
self.command = setxkbmap
else:
- self.command = xbklayout
+ self.command = xkblayout
def keyboard_layout(self, i3s_output_list, i3s_config):
response = {
| {"golden_diff": "diff --git a/py3status/modules/keyboard_layout.py b/py3status/modules/keyboard_layout.py\n--- a/py3status/modules/keyboard_layout.py\n+++ b/py3status/modules/keyboard_layout.py\n@@ -30,7 +30,7 @@\n LAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n \n \n-def xbklayout():\n+def xkblayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n@@ -63,11 +63,11 @@\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n- xbklayout()\n+ xkblayout()\n except:\n self.command = setxkbmap\n else:\n- self.command = xbklayout\n+ self.command = xkblayout\n \n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n", "issue": "Typo in the keyboard_layout module\n`xbklayout` function should be `xkblayout`, i.e. \"kb\" instead of \"bk\". This typo appears 3 times in total.\n\nThe rest of the code uses \"kb\" so I assumed what I found was a typo and decided to report it since it already caught my eye.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay the current keyboard layout.\n\nConfiguration parameters:\n - cache_timeout: check for keyboard layout change every seconds\n\nRequires:\n - xkblayout-state\n or\n - setxkbmap\n\n@author shadowprince\n@license Eclipse Public License\n\"\"\"\n\nfrom subprocess import check_output\nfrom time import time\nimport shlex\nimport re\n\n# colors of layouts, check your command's output to match keys\nLANG_COLORS = {\n 'fr': '#268BD2', # solarized blue\n 'ru': '#F75252', # red\n 'ua': '#FCE94F', # yellow\n 'us': '#729FCF', # light blue\n}\n\nLAYOUT_RE = re.compile(r\".*layout:\\s*(\\w+).*\", flags=re.DOTALL)\n\n\ndef xbklayout():\n \"\"\"\n check using xkblayout-state (preferred method)\n \"\"\"\n return check_output(\n [\"xkblayout-state\", \"print\", \"%s\"]\n ).decode('utf-8')\n\n\ndef setxkbmap():\n \"\"\"\n check using setxkbmap >= 1.3.0\n\n Please read issue 33 for more information :\n https://github.com/ultrabug/py3status/pull/33\n \"\"\"\n out = check_output(shlex.split(\"setxkbmap -query\")).decode(\"utf-8\")\n\n return re.match(LAYOUT_RE, out).group(1)\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 10\n color = ''\n\n def __init__(self):\n \"\"\"\n find the best implementation to get the keyboard's layout\n \"\"\"\n try:\n xbklayout()\n except:\n self.command = setxkbmap\n else:\n self.command = xbklayout\n\n def keyboard_layout(self, i3s_output_list, i3s_config):\n response = {\n 'cached_until': time() + self.cache_timeout,\n 'full_text': ''\n }\n\n lang = self.command().strip()\n lang_color = self.color if self.color else LANG_COLORS.get(lang)\n\n response['full_text'] = lang or '??'\n if lang_color:\n response['color'] = lang_color\n\n return response\n\nif __name__ == \"__main__\":\n \"\"\"\n Test this module by calling it directly.\n \"\"\"\n from time import sleep\n x = Py3status()\n config = {\n 'color_good': '#00FF00',\n 'color_bad': '#FF0000',\n }\n while True:\n print(x.keyboard_layout([], config))\n sleep(1)\n", "path": "py3status/modules/keyboard_layout.py"}]} | 1,410 | 209 |
gh_patches_debug_62154 | rasdani/github-patches | git_diff | Parsl__parsl-258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`globals` should be an optional config field
Running over a config without `globals`, I see:
```
def make_rundir(config=None, path=None):
"""When a path has not been specified, make the run directory.
Creates a rundir with the following hierarchy:
./runinfo <- Home of all run directories
|----000
|----001 <- Directories for each run
| ....
|----NNN
Kwargs:
- path (str): String path to a specific run dir
Default : None.
"""
try:
if not path:
path = None
> elif config["globals"].get('runDir', None):
E KeyError: 'globals'
../dataflow/rundirs.py:25: KeyError
```
</issue>
<code>
[start of parsl/dataflow/rundirs.py]
1 import os
2 from glob import glob
3 import logging
4
5 logger = logging.getLogger(__name__)
6
7
8 def make_rundir(config=None, path=None):
9 """When a path has not been specified, make the run directory.
10
11 Creates a rundir with the following hierarchy:
12 ./runinfo <- Home of all run directories
13 |----000
14 |----001 <- Directories for each run
15 | ....
16 |----NNN
17
18 Kwargs:
19 - path (str): String path to a specific run dir
20 Default : None.
21 """
22 try:
23 if not path:
24 path = None
25 elif config["globals"].get('runDir', None):
26 path = config["globals"]['runDir']
27
28 if not path:
29 path = "./runinfo"
30
31 if not os.path.exists(path):
32 os.makedirs(path)
33
34 prev_rundirs = glob(os.path.join(path, "[0-9]*"))
35
36 current_rundir = os.path.join(path, '000')
37
38 if prev_rundirs:
39 # Since we globbed on files named as 0-9
40 x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]
41 current_rundir = os.path.join(path, '{0:03}'.format(x + 1))
42
43 os.makedirs(current_rundir)
44 logger.debug("Parsl run initializing in rundir:{0}".format(current_rundir))
45 return os.path.abspath(current_rundir)
46
47 except Exception as e:
48 logger.error("Failed to create a run directory")
49 logger.error("Error: {0}".format(e))
50 exit(-1)
51
[end of parsl/dataflow/rundirs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/dataflow/rundirs.py b/parsl/dataflow/rundirs.py
--- a/parsl/dataflow/rundirs.py
+++ b/parsl/dataflow/rundirs.py
@@ -22,7 +22,7 @@
try:
if not path:
path = None
- elif config["globals"].get('runDir', None):
+ elif config.get("globals", {}).get('runDir'):
path = config["globals"]['runDir']
if not path:
| {"golden_diff": "diff --git a/parsl/dataflow/rundirs.py b/parsl/dataflow/rundirs.py\n--- a/parsl/dataflow/rundirs.py\n+++ b/parsl/dataflow/rundirs.py\n@@ -22,7 +22,7 @@\n try:\n if not path:\n path = None\n- elif config[\"globals\"].get('runDir', None):\n+ elif config.get(\"globals\", {}).get('runDir'):\n path = config[\"globals\"]['runDir']\n \n if not path:\n", "issue": "`globals` should be an optional config field\nRunning over a config without `globals`, I see:\r\n```\r\n def make_rundir(config=None, path=None):\r\n \"\"\"When a path has not been specified, make the run directory.\r\n\r\n Creates a rundir with the following hierarchy:\r\n ./runinfo <- Home of all run directories\r\n |----000\r\n |----001 <- Directories for each run\r\n | ....\r\n |----NNN\r\n\r\n Kwargs:\r\n - path (str): String path to a specific run dir\r\n Default : None.\r\n \"\"\"\r\n try:\r\n if not path:\r\n path = None\r\n> elif config[\"globals\"].get('runDir', None):\r\nE KeyError: 'globals'\r\n\r\n../dataflow/rundirs.py:25: KeyError\r\n```\n", "before_files": [{"content": "import os\nfrom glob import glob\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_rundir(config=None, path=None):\n \"\"\"When a path has not been specified, make the run directory.\n\n Creates a rundir with the following hierarchy:\n ./runinfo <- Home of all run directories\n |----000\n |----001 <- Directories for each run\n | ....\n |----NNN\n\n Kwargs:\n - path (str): String path to a specific run dir\n Default : None.\n \"\"\"\n try:\n if not path:\n path = None\n elif config[\"globals\"].get('runDir', None):\n path = config[\"globals\"]['runDir']\n\n if not path:\n path = \"./runinfo\"\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n prev_rundirs = glob(os.path.join(path, \"[0-9]*\"))\n\n current_rundir = os.path.join(path, '000')\n\n if prev_rundirs:\n # Since we globbed on files named as 0-9\n x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-1]\n current_rundir = os.path.join(path, '{0:03}'.format(x + 1))\n\n os.makedirs(current_rundir)\n logger.debug(\"Parsl run initializing in rundir:{0}\".format(current_rundir))\n return os.path.abspath(current_rundir)\n\n except Exception as e:\n logger.error(\"Failed to create a run directory\")\n logger.error(\"Error: {0}\".format(e))\n exit(-1)\n", "path": "parsl/dataflow/rundirs.py"}]} | 1,178 | 119 |
gh_patches_debug_15618 | rasdani/github-patches | git_diff | opsdroid__opsdroid-930 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add time to crontab log message
When the cron parser is triggered it emits a debug log saying `Running crontab skills`.
It would be more useful if it included the time that opsdroid thinks it is. This would help when trying to debug issues where skills are triggered at the wrong time due to opsdroid having the wrong timezone.
The line which needs updating is [here](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/parsers/crontab.py#L17).
</issue>
<code>
[start of opsdroid/parsers/crontab.py]
1 """A helper function for parsing and executing crontab skills."""
2
3 import asyncio
4 import logging
5
6 import arrow
7 import pycron
8
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 async def parse_crontab(opsdroid):
14 """Parse all crontab skills against the current time."""
15 while opsdroid.eventloop.is_running():
16 await asyncio.sleep(60 - arrow.now().time().second)
17 _LOGGER.debug(_("Running crontab skills"))
18 for skill in opsdroid.skills:
19 for matcher in skill.matchers:
20 if "crontab" in matcher:
21 if matcher["timezone"] is not None:
22 timezone = matcher["timezone"]
23 else:
24 timezone = opsdroid.config.get("timezone", "UTC")
25 if pycron.is_now(matcher["crontab"],
26 arrow.now(tz=timezone)):
27 await opsdroid.run_skill(skill,
28 skill.config,
29 None)
30
[end of opsdroid/parsers/crontab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py
--- a/opsdroid/parsers/crontab.py
+++ b/opsdroid/parsers/crontab.py
@@ -1,5 +1,5 @@
"""A helper function for parsing and executing crontab skills."""
-
+import time
import asyncio
import logging
@@ -14,7 +14,7 @@
"""Parse all crontab skills against the current time."""
while opsdroid.eventloop.is_running():
await asyncio.sleep(60 - arrow.now().time().second)
- _LOGGER.debug(_("Running crontab skills"))
+ _LOGGER.debug(_("Running crontab skills at %s "), time.asctime())
for skill in opsdroid.skills:
for matcher in skill.matchers:
if "crontab" in matcher:
| {"golden_diff": "diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py\n--- a/opsdroid/parsers/crontab.py\n+++ b/opsdroid/parsers/crontab.py\n@@ -1,5 +1,5 @@\n \"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n-\n+import time\n import asyncio\n import logging\n \n@@ -14,7 +14,7 @@\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n- _LOGGER.debug(_(\"Running crontab skills\"))\n+ _LOGGER.debug(_(\"Running crontab skills at %s \"), time.asctime())\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n", "issue": "Add time to crontab log message\nWhen the cron parser is triggered it emits a debug log saying `Running crontab skills`.\r\n\r\nIt would be more useful if it included the time that opsdroid thinks it is. This would help when trying to debug issues where skills are triggered at the wrong time due to opsdroid having the wrong timezone.\r\n\r\nThe line which needs updating is [here](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/parsers/crontab.py#L17). \n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(_(\"Running crontab skills\"))\n for skill in opsdroid.skills:\n for matcher in skill.matchers:\n if \"crontab\" in matcher:\n if matcher[\"timezone\"] is not None:\n timezone = matcher[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(matcher[\"crontab\"],\n arrow.now(tz=timezone)):\n await opsdroid.run_skill(skill,\n skill.config,\n None)\n", "path": "opsdroid/parsers/crontab.py"}]} | 907 | 195 |
gh_patches_debug_8828 | rasdani/github-patches | git_diff | mozmeao__snippets-service-1238 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Just published jobs with global limits get prematurely completed due to missing metrics.
</issue>
<code>
[start of snippets/base/management/commands/update_jobs.py]
1 from datetime import datetime, timedelta
2
3 from django.contrib.auth import get_user_model
4 from django.core.management.base import BaseCommand
5 from django.db import transaction
6 from django.db.models import F, Q
7
8 from snippets.base.models import Job
9
10
11 class Command(BaseCommand):
12 args = "(no args)"
13 help = "Update Jobs"
14
15 @transaction.atomic
16 def handle(self, *args, **options):
17 now = datetime.utcnow()
18 user = get_user_model().objects.get_or_create(username='snippets_bot')[0]
19 count_total_completed = 0
20
21 # Publish Scheduled Jobs with `publish_start` before now or without
22 # publish_start.
23 jobs = Job.objects.filter(status=Job.SCHEDULED).filter(
24 Q(publish_start__lte=now) | Q(publish_start=None)
25 )
26 count_published = jobs.count()
27 for job in jobs:
28 job.change_status(
29 status=Job.PUBLISHED,
30 user=user,
31 reason='Published start date reached.',
32 )
33
34 # Disable Published Jobs with `publish_end` before now.
35 jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)
36 count_publication_end = jobs.count()
37 count_total_completed += count_publication_end
38
39 for job in jobs:
40 job.change_status(
41 status=Job.COMPLETED,
42 user=user,
43 reason='Publication end date reached.',
44 )
45
46 # Disable Jobs that reached Impression, Click or Block limits.
47 count_limit = {}
48 for limit in ['impressions', 'clicks', 'blocks']:
49 jobs = (Job.objects
50 .filter(status=Job.PUBLISHED)
51 .exclude(**{f'limit_{limit}': 0})
52 .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))
53 for job in jobs:
54 job.change_status(
55 status=Job.COMPLETED,
56 user=user,
57 reason=f'Limit reached: {limit}.',
58 )
59
60 count_limit[limit] = jobs.count()
61 count_total_completed += count_limit[limit]
62
63 # Disable Jobs that have Impression, Click or Block limits but don't
64 # have metrics data for at least 24h. This is to handle cases where the
65 # Metrics Pipeline is broken.
66 yesterday = datetime.utcnow() - timedelta(days=1)
67 jobs = (Job.objects
68 .filter(status=Job.PUBLISHED)
69 .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
70 .filter(metric_last_update__lt=yesterday))
71 for job in jobs:
72 job.change_status(
73 status=Job.COMPLETED,
74 user=user,
75 reason=f'Premature termination due to missing metrics.',
76 )
77 count_premature_termination = jobs.count()
78 count_total_completed += count_premature_termination
79
80 count_running = Job.objects.filter(status=Job.PUBLISHED).count()
81
82 self.stdout.write(
83 f'Jobs Published: {count_published}\n'
84 f'Jobs Completed: {count_total_completed}\n'
85 f' - Reached Publication End Date: {count_publication_end}\n'
86 f' - Reached Impressions Limit: {count_limit["impressions"]}\n'
87 f' - Reached Clicks Limit: {count_limit["clicks"]}\n'
88 f' - Reached Blocks Limit: {count_limit["blocks"]}\n'
89 f' - Premature Termination due to missing metrics: {count_premature_termination}\n'
90 f'Total Jobs Running: {count_running}\n'
91 )
92
[end of snippets/base/management/commands/update_jobs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py
--- a/snippets/base/management/commands/update_jobs.py
+++ b/snippets/base/management/commands/update_jobs.py
@@ -67,6 +67,8 @@
jobs = (Job.objects
.filter(status=Job.PUBLISHED)
.exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
+ # Exclude Jobs with limits which haven't been updated once yet.
+ .exclude(metric_last_update='1970-01-01')
.filter(metric_last_update__lt=yesterday))
for job in jobs:
job.change_status(
| {"golden_diff": "diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py\n--- a/snippets/base/management/commands/update_jobs.py\n+++ b/snippets/base/management/commands/update_jobs.py\n@@ -67,6 +67,8 @@\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n+ # Exclude Jobs with limits which haven't been updated once yet.\n+ .exclude(metric_last_update='1970-01-01')\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n", "issue": "Just published jobs with global limits get prematurely completed due to missing metrics.\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}]} | 1,508 | 158 |
gh_patches_debug_8533 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cherrypy >= 6.1.0 fails tests
From the cherrypy [changelog](https://github.com/cherrypy/cherrypy/blob/master/CHANGES.txt):
```
6.1.0
-----
* Combined wsgiserver2 and wsgiserver3 modules into a
single module, ``cherrypy.wsgiserver``.
```
</issue>
<code>
[start of PyInstaller/hooks/hook-cherrypy.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2015-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 #
10 # CherryPy is a minimalist Python web framework.
11 #
12 # http://www.cherrypy.org/
13 #
14 # Tested with CherryPy 5.0.1
15
16
17 from PyInstaller.utils.hooks import collect_submodules
18
19
20 hiddenimports = collect_submodules('cherrypy.wsgiserver')
[end of PyInstaller/hooks/hook-cherrypy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-cherrypy.py b/PyInstaller/hooks/hook-cherrypy.py
deleted file mode 100644
--- a/PyInstaller/hooks/hook-cherrypy.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015-2016, PyInstaller Development Team.
-#
-# Distributed under the terms of the GNU General Public License with exception
-# for distributing bootloader.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-#
-# CherryPy is a minimalist Python web framework.
-#
-# http://www.cherrypy.org/
-#
-# Tested with CherryPy 5.0.1
-
-
-from PyInstaller.utils.hooks import collect_submodules
-
-
-hiddenimports = collect_submodules('cherrypy.wsgiserver')
\ No newline at end of file
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-cherrypy.py b/PyInstaller/hooks/hook-cherrypy.py\ndeleted file mode 100644\n--- a/PyInstaller/hooks/hook-cherrypy.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-#-----------------------------------------------------------------------------\n-# Copyright (c) 2015-2016, PyInstaller Development Team.\n-#\n-# Distributed under the terms of the GNU General Public License with exception\n-# for distributing bootloader.\n-#\n-# The full license is in the file COPYING.txt, distributed with this software.\n-#-----------------------------------------------------------------------------\n-#\n-# CherryPy is a minimalist Python web framework.\n-#\n-# http://www.cherrypy.org/\n-#\n-# Tested with CherryPy 5.0.1\n-\n-\n-from PyInstaller.utils.hooks import collect_submodules\n-\n-\n-hiddenimports = collect_submodules('cherrypy.wsgiserver')\n\\ No newline at end of file\n", "issue": "cherrypy >= 6.1.0 fails tests\nFrom the cherrypy [changelog](https://github.com/cherrypy/cherrypy/blob/master/CHANGES.txt):\n\n```\n6.1.0\n-----\n\n* Combined wsgiserver2 and wsgiserver3 modules into a\n single module, ``cherrypy.wsgiserver``.\n```\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n#\n# CherryPy is a minimalist Python web framework.\n#\n# http://www.cherrypy.org/\n#\n# Tested with CherryPy 5.0.1\n\n\nfrom PyInstaller.utils.hooks import collect_submodules\n\n\nhiddenimports = collect_submodules('cherrypy.wsgiserver')", "path": "PyInstaller/hooks/hook-cherrypy.py"}]} | 783 | 215 |
gh_patches_debug_25371 | rasdani/github-patches | git_diff | vispy__vispy-463 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug when running Vispy offline for the first time
There appears to be a bug when you run Vispy offline and you don't have the freetype thing already downloaded. Not completely sure about the exact conditions responsible for the crash, require some testing...
</issue>
<code>
[start of vispy/util/fonts/_freetype.py]
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
4 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
5 # -----------------------------------------------------------------------------
6
7 # Use freetype to get glyph bitmaps
8
9 import sys
10 import numpy as np
11
12 from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
13 FT_LOAD_NO_AUTOHINT, Face)
14
15
16 # Convert face to filename
17 from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename
18 if sys.platform.startswith('linux'):
19 from ...ext.fontconfig import find_font
20 elif sys.platform.startswith('win'):
21 from ._win32 import find_font # noqa, analysis:ignore
22 else:
23 raise NotImplementedError
24
25 _font_dict = {}
26
27
28 def _load_font(face, bold, italic):
29 key = '%s-%s-%s' % (face, bold, italic)
30 if key in _font_dict:
31 return _font_dict[key]
32 if face in _vispy_fonts:
33 fname = _get_vispy_font_filename(face, bold, italic)
34 else:
35 fname = find_font(face, bold, italic)
36 font = Face(fname)
37 _font_dict[key] = font
38 return font
39
40
41 def _load_glyph(f, char, glyphs_dict):
42 """Load glyph from font into dict"""
43 flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT
44 face = _load_font(f['face'], f['bold'], f['italic'])
45 face.set_char_size(f['size'] * 64)
46 # get the character of interest
47 face.load_char(char, flags)
48 bitmap = face.glyph.bitmap
49 width = face.glyph.bitmap.width
50 height = face.glyph.bitmap.rows
51 bitmap = np.array(bitmap.buffer)
52 w0 = bitmap.size // height if bitmap.size > 0 else 0
53 bitmap.shape = (height, w0)
54 bitmap = bitmap[:, :width].astype(np.ubyte)
55
56 left = face.glyph.bitmap_left
57 top = face.glyph.bitmap_top
58 advance = face.glyph.advance.x / 64.
59 glyph = dict(char=char, offset=(left, top), bitmap=bitmap,
60 advance=advance, kerning={})
61 glyphs_dict[char] = glyph
62 # Generate kerning
63 for other_char, other_glyph in glyphs_dict.items():
64 kerning = face.get_kerning(other_char, char)
65 glyph['kerning'][other_char] = kerning.x / 64.
66 kerning = face.get_kerning(char, other_char)
67 other_glyph['kerning'][char] = kerning.x / 64.
68
[end of vispy/util/fonts/_freetype.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vispy/util/fonts/_freetype.py b/vispy/util/fonts/_freetype.py
--- a/vispy/util/fonts/_freetype.py
+++ b/vispy/util/fonts/_freetype.py
@@ -9,9 +9,6 @@
import sys
import numpy as np
-from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
- FT_LOAD_NO_AUTOHINT, Face)
-
# Convert face to filename
from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename
@@ -25,7 +22,11 @@
_font_dict = {}
+# Nest freetype imports in case someone doesn't have freetype on their system
+# and isn't using fonts (Windows)
+
def _load_font(face, bold, italic):
+ from ...ext.freetype import Face
key = '%s-%s-%s' % (face, bold, italic)
if key in _font_dict:
return _font_dict[key]
@@ -40,6 +41,8 @@
def _load_glyph(f, char, glyphs_dict):
"""Load glyph from font into dict"""
+ from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,
+ FT_LOAD_NO_AUTOHINT)
flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT
face = _load_font(f['face'], f['bold'], f['italic'])
face.set_char_size(f['size'] * 64)
| {"golden_diff": "diff --git a/vispy/util/fonts/_freetype.py b/vispy/util/fonts/_freetype.py\n--- a/vispy/util/fonts/_freetype.py\n+++ b/vispy/util/fonts/_freetype.py\n@@ -9,9 +9,6 @@\n import sys\n import numpy as np\n \n-from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n- FT_LOAD_NO_AUTOHINT, Face)\n-\n \n # Convert face to filename\n from ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\n@@ -25,7 +22,11 @@\n _font_dict = {}\n \n \n+# Nest freetype imports in case someone doesn't have freetype on their system\n+# and isn't using fonts (Windows)\n+\n def _load_font(face, bold, italic):\n+ from ...ext.freetype import Face\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n@@ -40,6 +41,8 @@\n \n def _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n+ from ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n+ FT_LOAD_NO_AUTOHINT)\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n", "issue": "Bug when running Vispy offline for the first time\nThere appears to be a bug when you run Vispy offline and you don't have the freetype thing already downloaded. Not completely sure about the exact conditions responsible for the crash, require some testing...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\n# Use freetype to get glyph bitmaps\n\nimport sys\nimport numpy as np\n\nfrom ...ext.freetype import (FT_LOAD_RENDER, FT_LOAD_NO_HINTING,\n FT_LOAD_NO_AUTOHINT, Face)\n\n\n# Convert face to filename\nfrom ._vispy_fonts import _vispy_fonts, _get_vispy_font_filename\nif sys.platform.startswith('linux'):\n from ...ext.fontconfig import find_font\nelif sys.platform.startswith('win'):\n from ._win32 import find_font # noqa, analysis:ignore\nelse:\n raise NotImplementedError\n\n_font_dict = {}\n\n\ndef _load_font(face, bold, italic):\n key = '%s-%s-%s' % (face, bold, italic)\n if key in _font_dict:\n return _font_dict[key]\n if face in _vispy_fonts:\n fname = _get_vispy_font_filename(face, bold, italic)\n else:\n fname = find_font(face, bold, italic)\n font = Face(fname)\n _font_dict[key] = font\n return font\n\n\ndef _load_glyph(f, char, glyphs_dict):\n \"\"\"Load glyph from font into dict\"\"\"\n flags = FT_LOAD_RENDER | FT_LOAD_NO_HINTING | FT_LOAD_NO_AUTOHINT\n face = _load_font(f['face'], f['bold'], f['italic'])\n face.set_char_size(f['size'] * 64)\n # get the character of interest\n face.load_char(char, flags)\n bitmap = face.glyph.bitmap\n width = face.glyph.bitmap.width\n height = face.glyph.bitmap.rows\n bitmap = np.array(bitmap.buffer)\n w0 = bitmap.size // height if bitmap.size > 0 else 0\n bitmap.shape = (height, w0)\n bitmap = bitmap[:, :width].astype(np.ubyte)\n\n left = face.glyph.bitmap_left\n top = face.glyph.bitmap_top\n advance = face.glyph.advance.x / 64.\n glyph = dict(char=char, offset=(left, top), bitmap=bitmap,\n advance=advance, kerning={})\n glyphs_dict[char] = glyph\n # Generate kerning\n for other_char, other_glyph in glyphs_dict.items():\n kerning = face.get_kerning(other_char, char)\n glyph['kerning'][other_char] = kerning.x / 64.\n kerning = face.get_kerning(char, other_char)\n other_glyph['kerning'][char] = kerning.x / 64.\n", "path": "vispy/util/fonts/_freetype.py"}]} | 1,317 | 340 |
gh_patches_debug_16503 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1061 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dockerfile scan fails when in directory and used -f
**Describe the bug**
When running directory scan checkov shows Dockerfile failed checks. When scanning file no errors are shown.
**To Reproduce**
Create Dockerfile in directory `test` with content:
```
FROM debian:buster
ENV CHECKOV_VERSION 1.0.775
RUN export DEBIAN_FRONTEND=noninteractive && \
apt-get -y update && \
apt-get -y --no-install-recommends install wget unzip ca-certificates git python3 python3-pip python3-setuptools python3-wheel && \
pip3 install -U checkov=="${CHECKOV_VERSION}"
```
`checkov -f test/Dockerfile` won't show errors
`checkov -d test` will show error
**Expected behavior**
Show error in both cases.
**Screenshots**
<img width="892" alt="Screenshot 2021-04-10 at 09 39 21" src="https://user-images.githubusercontent.com/672767/114262507-a54dde80-99e0-11eb-9e9e-3e3f5d2d2a7f.png">
**Desktop (please complete the following information):**
- OS: MacOS 11.2.3
- Python: 3.9.4
- Checkov Version 2.0.27
</issue>
<code>
[start of checkov/dockerfile/runner.py]
1 import logging
2 import os
3 from dockerfile_parse.constants import DOCKERFILE_FILENAME
4
5 from checkov.common.output.record import Record
6 from checkov.common.output.report import Report
7 from checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories
8 from checkov.dockerfile.parser import parse, collect_skipped_checks
9 from checkov.dockerfile.registry import registry
10 from checkov.runner_filter import RunnerFilter
11
12 DOCKER_FILE_MASK = [DOCKERFILE_FILENAME]
13
14
15 class Runner(BaseRunner):
16 check_type = "dockerfile"
17
18 def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
19 collect_skip_comments=True):
20 report = Report(self.check_type)
21 definitions = {}
22 definitions_raw = {}
23 parsing_errors = {}
24 files_list = []
25 if external_checks_dir:
26 for directory in external_checks_dir:
27 registry.load_external_checks(directory)
28
29 if files:
30 for file in files:
31 if file in DOCKER_FILE_MASK:
32 (definitions[file], definitions_raw[file]) = parse(file)
33
34 if root_folder:
35 for root, d_names, f_names in os.walk(root_folder):
36 filter_ignored_directories(d_names)
37 for file in f_names:
38 if file in DOCKER_FILE_MASK:
39 files_list.append(os.path.join(root, file))
40
41 for file in files_list:
42 relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'
43 try:
44 (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)
45 except TypeError:
46 logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')
47
48 for docker_file_path in definitions.keys():
49
50 # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
51 # or there will be no leading slash; root_folder will always be none.
52 # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).
53 # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).
54 if docker_file_path[0] == '/':
55 path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path
56 else:
57 path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path
58
59 file_abs_path = os.path.abspath(path_to_convert)
60 skipped_checks = collect_skipped_checks(definitions[docker_file_path])
61 instructions = definitions[docker_file_path]
62
63 results = registry.scan(docker_file_path, instructions, skipped_checks,
64 runner_filter)
65 for check, check_result in results.items():
66 result_configuration = check_result['results_configuration']
67 startline = 0
68 endline = 0
69 result_instruction = ""
70 if result_configuration:
71 startline = result_configuration['startline']
72 endline = result_configuration['endline']
73 result_instruction = result_configuration["instruction"]
74
75 codeblock = []
76 self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)
77 record = Record(check_id=check.id, check_name=check.name, check_result=check_result,
78 code_block=codeblock,
79 file_path=docker_file_path,
80 file_line_range=[startline,
81 endline],
82 resource="{}.{}".format(docker_file_path,
83 result_instruction,
84 startline),
85 evaluations=None, check_class=check.__class__.__module__,
86 file_abs_path=file_abs_path, entity_tags=None)
87 report.add_record(record=record)
88
89 return report
90
91
92 def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):
93 for line in range(startline, endline + 1):
94 codeblock.append((line, definitions_raw[docker_file_path][line]))
95
[end of checkov/dockerfile/runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/dockerfile/runner.py b/checkov/dockerfile/runner.py
--- a/checkov/dockerfile/runner.py
+++ b/checkov/dockerfile/runner.py
@@ -15,7 +15,7 @@
class Runner(BaseRunner):
check_type = "dockerfile"
- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
+ def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
collect_skip_comments=True):
report = Report(self.check_type)
definitions = {}
@@ -28,7 +28,7 @@
if files:
for file in files:
- if file in DOCKER_FILE_MASK:
+ if os.path.basename(file) in DOCKER_FILE_MASK:
(definitions[file], definitions_raw[file]) = parse(file)
if root_folder:
| {"golden_diff": "diff --git a/checkov/dockerfile/runner.py b/checkov/dockerfile/runner.py\n--- a/checkov/dockerfile/runner.py\n+++ b/checkov/dockerfile/runner.py\n@@ -15,7 +15,7 @@\n class Runner(BaseRunner):\n check_type = \"dockerfile\"\n \n- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n+ def run(self, root_folder=None, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n@@ -28,7 +28,7 @@\n \n if files:\n for file in files:\n- if file in DOCKER_FILE_MASK:\n+ if os.path.basename(file) in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n \n if root_folder:\n", "issue": "Dockerfile scan fails when in directory and used -f\n**Describe the bug**\r\nWhen running directory scan checkov shows Dockerfile failed checks. When scanning file no errors are shown.\r\n\r\n**To Reproduce**\r\nCreate Dockerfile in directory `test` with content:\r\n```\r\nFROM debian:buster\r\n\r\nENV CHECKOV_VERSION 1.0.775\r\n\r\nRUN export DEBIAN_FRONTEND=noninteractive && \\\r\n apt-get -y update && \\\r\n apt-get -y --no-install-recommends install wget unzip ca-certificates git python3 python3-pip python3-setuptools python3-wheel && \\\r\n pip3 install -U checkov==\"${CHECKOV_VERSION}\"\r\n```\r\n\r\n`checkov -f test/Dockerfile` won't show errors\r\n`checkov -d test` will show error\r\n\r\n**Expected behavior**\r\nShow error in both cases.\r\n\r\n**Screenshots**\r\n<img width=\"892\" alt=\"Screenshot 2021-04-10 at 09 39 21\" src=\"https://user-images.githubusercontent.com/672767/114262507-a54dde80-99e0-11eb-9e9e-3e3f5d2d2a7f.png\">\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 11.2.3\r\n - Python: 3.9.4\r\n - Checkov Version 2.0.27\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom dockerfile_parse.constants import DOCKERFILE_FILENAME\n\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_directories\nfrom checkov.dockerfile.parser import parse, collect_skipped_checks\nfrom checkov.dockerfile.registry import registry\nfrom checkov.runner_filter import RunnerFilter\n\nDOCKER_FILE_MASK = [DOCKERFILE_FILENAME]\n\n\nclass Runner(BaseRunner):\n check_type = \"dockerfile\"\n\n def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True):\n report = Report(self.check_type)\n definitions = {}\n definitions_raw = {}\n parsing_errors = {}\n files_list = []\n if external_checks_dir:\n for directory in external_checks_dir:\n registry.load_external_checks(directory)\n\n if files:\n for file in files:\n if file in DOCKER_FILE_MASK:\n (definitions[file], definitions_raw[file]) = parse(file)\n\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_directories(d_names)\n for file in f_names:\n if file in DOCKER_FILE_MASK:\n files_list.append(os.path.join(root, file))\n\n for file in files_list:\n relative_file_path = f'/{os.path.relpath(file, os.path.commonprefix((root_folder, file)))}'\n try:\n (definitions[relative_file_path], definitions_raw[relative_file_path]) = parse(file)\n except TypeError:\n logging.info(f'Dockerfile skipping {file} as it is not a valid dockerfile template')\n\n for docker_file_path in definitions.keys():\n\n # There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,\n # or there will be no leading slash; root_folder will always be none.\n # If -d is used, root_folder will be the value given, and -f will start with a / (hardcoded above).\n # The goal here is simply to get a valid path to the file (which docker_file_path does not always give).\n if docker_file_path[0] == '/':\n path_to_convert = (root_folder + docker_file_path) if root_folder else docker_file_path\n else:\n path_to_convert = (os.path.join(root_folder, docker_file_path)) if root_folder else docker_file_path\n\n file_abs_path = os.path.abspath(path_to_convert)\n skipped_checks = collect_skipped_checks(definitions[docker_file_path])\n instructions = definitions[docker_file_path]\n\n results = registry.scan(docker_file_path, instructions, skipped_checks,\n runner_filter)\n for check, check_result in results.items():\n result_configuration = check_result['results_configuration']\n startline = 0\n endline = 0\n result_instruction = \"\"\n if result_configuration:\n startline = result_configuration['startline']\n endline = result_configuration['endline']\n result_instruction = result_configuration[\"instruction\"]\n\n codeblock = []\n self.calc_record_codeblock(codeblock, definitions_raw, docker_file_path, endline, startline)\n record = Record(check_id=check.id, check_name=check.name, check_result=check_result,\n code_block=codeblock,\n file_path=docker_file_path,\n file_line_range=[startline,\n endline],\n resource=\"{}.{}\".format(docker_file_path,\n result_instruction,\n startline),\n evaluations=None, check_class=check.__class__.__module__,\n file_abs_path=file_abs_path, entity_tags=None)\n report.add_record(record=record)\n\n return report\n\n\n def calc_record_codeblock(self, codeblock, definitions_raw, docker_file_path, endline, startline):\n for line in range(startline, endline + 1):\n codeblock.append((line, definitions_raw[docker_file_path][line]))\n", "path": "checkov/dockerfile/runner.py"}]} | 1,889 | 198 |
gh_patches_debug_41075 | rasdani/github-patches | git_diff | vyperlang__vyper-828 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Solidity Compatible ERC20 broken
The [Solidity compatible ERC20 token](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20_solidity_compatible/ERC20.v.py) no longer compiles, since it was not updated after the removal of separate uint256 math functions. This is a super easy fix. I can do it later in the week if no one gets to it before then.
</issue>
<code>
[start of examples/tokens/ERC20_solidity_compatible/ERC20.v.py]
1 # Solidity-Compatible EIP20/ERC20 Token
2 # Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md
3 # Author: Phil Daian
4
5 # The use of the uint256 datatype as in this token is not
6 # recommended, as it can pose security risks.
7
8 # This token is intended as a proof of concept towards
9 # language interoperability and not for production use.
10
11 # Events issued by the contract
12 Transfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})
13 Approval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})
14
15 balances: uint256[address]
16 allowances: (uint256[address])[address]
17 num_issued: uint256
18
19 @public
20 @payable
21 def deposit():
22 _value: uint256 = convert(msg.value, 'uint256')
23 _sender: address = msg.sender
24 self.balances[_sender] = uint256_add(self.balances[_sender], _value)
25 self.num_issued = uint256_add(self.num_issued, _value)
26 # Fire deposit event as transfer from 0x0
27 log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)
28
29 @public
30 def withdraw(_value : uint256) -> bool:
31 _sender: address = msg.sender
32 # Make sure sufficient funds are present, op will not underflow supply
33 # implicitly through overflow protection
34 self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
35 self.num_issued = uint256_sub(self.num_issued, _value)
36 send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))
37 # Fire withdraw event as transfer to 0x0
38 log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)
39 return true
40
41 @public
42 @constant
43 def totalSupply() -> uint256:
44 return self.num_issued
45
46 @public
47 @constant
48 def balanceOf(_owner : address) -> uint256:
49 return self.balances[_owner]
50
51 @public
52 def transfer(_to : address, _value : uint256) -> bool:
53 _sender: address = msg.sender
54 # Make sure sufficient funds are present implicitly through overflow protection
55 self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
56 self.balances[_to] = uint256_add(self.balances[_to], _value)
57 # Fire transfer event
58 log.Transfer(_sender, _to, _value)
59 return true
60
61 @public
62 def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
63 _sender: address = msg.sender
64 allowance: uint256 = self.allowances[_from][_sender]
65 # Make sure sufficient funds/allowance are present implicitly through overflow protection
66 self.balances[_from] = uint256_sub(self.balances[_from], _value)
67 self.balances[_to] = uint256_add(self.balances[_to], _value)
68 self.allowances[_from][_sender] = uint256_sub(allowance, _value)
69 # Fire transfer event
70 log.Transfer(_from, _to, _value)
71 return true
72
73 @public
74 def approve(_spender : address, _value : uint256) -> bool:
75 _sender: address = msg.sender
76 self.allowances[_sender][_spender] = _value
77 # Fire approval event
78 log.Approval(_sender, _spender, _value)
79 return true
80
81 @public
82 @constant
83 def allowance(_owner : address, _spender : address) -> uint256:
84 return self.allowances[_owner][_spender]
85
86
[end of examples/tokens/ERC20_solidity_compatible/ERC20.v.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
--- a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
+++ b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py
@@ -21,8 +21,8 @@
def deposit():
_value: uint256 = convert(msg.value, 'uint256')
_sender: address = msg.sender
- self.balances[_sender] = uint256_add(self.balances[_sender], _value)
- self.num_issued = uint256_add(self.num_issued, _value)
+ self.balances[_sender] = self.balances[_sender] + _value
+ self.num_issued = self.num_issued + _value
# Fire deposit event as transfer from 0x0
log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)
@@ -31,12 +31,12 @@
_sender: address = msg.sender
# Make sure sufficient funds are present, op will not underflow supply
# implicitly through overflow protection
- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
- self.num_issued = uint256_sub(self.num_issued, _value)
+ self.balances[_sender] = self.balances[_sender] - _value
+ self.num_issued = self.num_issued - _value
send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))
# Fire withdraw event as transfer to 0x0
log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)
- return true
+ return True
@public
@constant
@@ -52,23 +52,23 @@
def transfer(_to : address, _value : uint256) -> bool:
_sender: address = msg.sender
# Make sure sufficient funds are present implicitly through overflow protection
- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)
- self.balances[_to] = uint256_add(self.balances[_to], _value)
+ self.balances[_sender] = self.balances[_sender] - _value
+ self.balances[_to] = self.balances[_to] + _value
# Fire transfer event
log.Transfer(_sender, _to, _value)
- return true
+ return True
@public
def transferFrom(_from : address, _to : address, _value : uint256) -> bool:
_sender: address = msg.sender
allowance: uint256 = self.allowances[_from][_sender]
# Make sure sufficient funds/allowance are present implicitly through overflow protection
- self.balances[_from] = uint256_sub(self.balances[_from], _value)
- self.balances[_to] = uint256_add(self.balances[_to], _value)
- self.allowances[_from][_sender] = uint256_sub(allowance, _value)
+ self.balances[_from] = self.balances[_from] - _value
+ self.balances[_to] = self.balances[_to] + _value
+ self.allowances[_from][_sender] = allowance - _value
# Fire transfer event
log.Transfer(_from, _to, _value)
- return true
+ return True
@public
def approve(_spender : address, _value : uint256) -> bool:
@@ -76,7 +76,7 @@
self.allowances[_sender][_spender] = _value
# Fire approval event
log.Approval(_sender, _spender, _value)
- return true
+ return True
@public
@constant
| {"golden_diff": "diff --git a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n--- a/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n+++ b/examples/tokens/ERC20_solidity_compatible/ERC20.v.py\n@@ -21,8 +21,8 @@\n def deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n- self.balances[_sender] = uint256_add(self.balances[_sender], _value)\n- self.num_issued = uint256_add(self.num_issued, _value)\n+ self.balances[_sender] = self.balances[_sender] + _value\n+ self.num_issued = self.num_issued + _value\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n \n@@ -31,12 +31,12 @@\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n- self.num_issued = uint256_sub(self.num_issued, _value)\n+ self.balances[_sender] = self.balances[_sender] - _value\n+ self.num_issued = self.num_issued - _value\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n- return true\n+ return True\n \n @public\n @constant\n@@ -52,23 +52,23 @@\n def transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n- self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n- self.balances[_to] = uint256_add(self.balances[_to], _value)\n+ self.balances[_sender] = self.balances[_sender] - _value\n+ self.balances[_to] = self.balances[_to] + _value\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n- return true\n+ return True\n \n @public\n def transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n- self.balances[_from] = uint256_sub(self.balances[_from], _value)\n- self.balances[_to] = uint256_add(self.balances[_to], _value)\n- self.allowances[_from][_sender] = uint256_sub(allowance, _value)\n+ self.balances[_from] = self.balances[_from] - _value\n+ self.balances[_to] = self.balances[_to] + _value\n+ self.allowances[_from][_sender] = allowance - _value\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n- return true\n+ return True\n \n @public\n def approve(_spender : address, _value : uint256) -> bool:\n@@ -76,7 +76,7 @@\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n- return true\n+ return True\n \n @public\n @constant\n", "issue": "Solidity Compatible ERC20 broken\nThe [Solidity compatible ERC20 token](https://github.com/ethereum/vyper/blob/master/examples/tokens/ERC20_solidity_compatible/ERC20.v.py) no longer compiles, since it was not updated after the removal of separate uint256 math functions. This is a super easy fix. I can do it later in the week if no one gets to it before then. \n", "before_files": [{"content": "# Solidity-Compatible EIP20/ERC20 Token\n# Implements https://github.com/ethereum/EIPs/blob/master/EIPS/eip-20-token-standard.md\n# Author: Phil Daian\n\n# The use of the uint256 datatype as in this token is not\n# recommended, as it can pose security risks.\n\n# This token is intended as a proof of concept towards\n# language interoperability and not for production use.\n\n# Events issued by the contract\nTransfer: event({_from: indexed(address), _to: indexed(address), _value: uint256})\nApproval: event({_owner: indexed(address), _spender: indexed(address), _value: uint256})\n\nbalances: uint256[address]\nallowances: (uint256[address])[address]\nnum_issued: uint256\n\n@public\n@payable\ndef deposit():\n _value: uint256 = convert(msg.value, 'uint256')\n _sender: address = msg.sender\n self.balances[_sender] = uint256_add(self.balances[_sender], _value)\n self.num_issued = uint256_add(self.num_issued, _value)\n # Fire deposit event as transfer from 0x0\n log.Transfer(0x0000000000000000000000000000000000000000, _sender, _value)\n\n@public\ndef withdraw(_value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present, op will not underflow supply\n # implicitly through overflow protection\n self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n self.num_issued = uint256_sub(self.num_issued, _value)\n send(_sender, as_wei_value(convert(_value, 'int128'), 'wei'))\n # Fire withdraw event as transfer to 0x0\n log.Transfer(_sender, 0x0000000000000000000000000000000000000000, _value)\n return true\n\n@public\n@constant\ndef totalSupply() -> uint256:\n return self.num_issued\n\n@public\n@constant\ndef balanceOf(_owner : address) -> uint256:\n return self.balances[_owner]\n\n@public\ndef transfer(_to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n # Make sure sufficient funds are present implicitly through overflow protection\n self.balances[_sender] = uint256_sub(self.balances[_sender], _value)\n self.balances[_to] = uint256_add(self.balances[_to], _value)\n # Fire transfer event\n log.Transfer(_sender, _to, _value)\n return true\n\n@public\ndef transferFrom(_from : address, _to : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n allowance: uint256 = self.allowances[_from][_sender]\n # Make sure sufficient funds/allowance are present implicitly through overflow protection\n self.balances[_from] = uint256_sub(self.balances[_from], _value)\n self.balances[_to] = uint256_add(self.balances[_to], _value)\n self.allowances[_from][_sender] = uint256_sub(allowance, _value)\n # Fire transfer event\n log.Transfer(_from, _to, _value)\n return true\n\n@public\ndef approve(_spender : address, _value : uint256) -> bool:\n _sender: address = msg.sender\n self.allowances[_sender][_spender] = _value\n # Fire approval event\n log.Approval(_sender, _spender, _value)\n return true\n\n@public\n@constant\ndef allowance(_owner : address, _spender : address) -> uint256:\n return self.allowances[_owner][_spender]\n\n", "path": "examples/tokens/ERC20_solidity_compatible/ERC20.v.py"}]} | 1,765 | 990 |
gh_patches_debug_29690 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1455 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MA: committee scraper for 2017
State: MA
says that it is skipping every page, I believe the site was rewritten and so will need a complete rewrite
</issue>
<code>
[start of openstates/ma/committees.py]
1 from billy.scrape.committees import CommitteeScraper, Committee
2
3 import lxml.html
4
5
6 class MACommitteeScraper(CommitteeScraper):
7 jurisdiction = 'ma'
8
9 def scrape(self, term, chambers):
10 page_types = []
11 if 'upper' in chambers:
12 page_types += ['Senate', 'Joint']
13 if 'lower' in chambers:
14 page_types += ['House']
15 chamber_mapping = {'Senate': 'upper',
16 'House': 'lower',
17 'Joint': 'joint'}
18
19 foundComms = []
20
21 for page_type in page_types:
22 url = 'http://www.malegislature.gov/Committees/' + page_type
23
24 html = self.get(url, verify=False).text
25 doc = lxml.html.fromstring(html)
26 doc.make_links_absolute('http://www.malegislature.gov')
27
28 for com_url in doc.xpath('//ul[@class="committeeList"]/li/a/@href'):
29 chamber = chamber_mapping[page_type]
30 self.scrape_committee(chamber, com_url)
31
32 def scrape_committee(self, chamber, url):
33 html = self.get(url, verify=False).text
34 doc = lxml.html.fromstring(html)
35
36 name = doc.xpath('//span[@class="committeeShortName"]/text()')
37 if len(name) == 0:
38 self.warning("Had to skip this malformed page.")
39 return
40 # Because of http://www.malegislature.gov/Committees/Senate/S29 this
41 # XXX: hack had to be pushed in. Remove me ASAP. This just skips
42 # malformed pages.
43
44 name = name[0]
45 com = Committee(chamber, name)
46 com.add_source(url)
47
48 # get both titles and names, order is consistent
49 titles = doc.xpath('//p[@class="rankingMemberTitle"]/text()')
50 names = doc.xpath('//p[@class="rankingMemberName"]/a/text()')
51
52 for title, name in zip(titles, names):
53 com.add_member(name, title)
54
55 for member in doc.xpath('//div[@class="committeeRegularMembers"]//a/text()'):
56 com.add_member(member)
57
58 if com['members']:
59 self.save_committee(com)
60
[end of openstates/ma/committees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/ma/committees.py b/openstates/ma/committees.py
--- a/openstates/ma/committees.py
+++ b/openstates/ma/committees.py
@@ -16,8 +16,6 @@
'House': 'lower',
'Joint': 'joint'}
- foundComms = []
-
for page_type in page_types:
url = 'http://www.malegislature.gov/Committees/' + page_type
@@ -33,27 +31,15 @@
html = self.get(url, verify=False).text
doc = lxml.html.fromstring(html)
- name = doc.xpath('//span[@class="committeeShortName"]/text()')
- if len(name) == 0:
- self.warning("Had to skip this malformed page.")
- return
- # Because of http://www.malegislature.gov/Committees/Senate/S29 this
- # XXX: hack had to be pushed in. Remove me ASAP. This just skips
- # malformed pages.
-
- name = name[0]
+ name = doc.xpath('//title/text()')[0]
com = Committee(chamber, name)
com.add_source(url)
- # get both titles and names, order is consistent
- titles = doc.xpath('//p[@class="rankingMemberTitle"]/text()')
- names = doc.xpath('//p[@class="rankingMemberName"]/a/text()')
-
- for title, name in zip(titles, names):
- com.add_member(name, title)
-
- for member in doc.xpath('//div[@class="committeeRegularMembers"]//a/text()'):
- com.add_member(member)
+ members = doc.xpath('//a[contains(@href, "/Legislators/Profile")]')
+ for member in members:
+ title = member.xpath('../span')
+ role = title[0].text.lower() if title else 'member'
+ com.add_member(member.text, role)
if com['members']:
self.save_committee(com)
| {"golden_diff": "diff --git a/openstates/ma/committees.py b/openstates/ma/committees.py\n--- a/openstates/ma/committees.py\n+++ b/openstates/ma/committees.py\n@@ -16,8 +16,6 @@\n 'House': 'lower',\n 'Joint': 'joint'}\n \n- foundComms = []\n-\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n \n@@ -33,27 +31,15 @@\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n \n- name = doc.xpath('//span[@class=\"committeeShortName\"]/text()')\n- if len(name) == 0:\n- self.warning(\"Had to skip this malformed page.\")\n- return\n- # Because of http://www.malegislature.gov/Committees/Senate/S29 this\n- # XXX: hack had to be pushed in. Remove me ASAP. This just skips\n- # malformed pages.\n-\n- name = name[0]\n+ name = doc.xpath('//title/text()')[0]\n com = Committee(chamber, name)\n com.add_source(url)\n \n- # get both titles and names, order is consistent\n- titles = doc.xpath('//p[@class=\"rankingMemberTitle\"]/text()')\n- names = doc.xpath('//p[@class=\"rankingMemberName\"]/a/text()')\n-\n- for title, name in zip(titles, names):\n- com.add_member(name, title)\n-\n- for member in doc.xpath('//div[@class=\"committeeRegularMembers\"]//a/text()'):\n- com.add_member(member)\n+ members = doc.xpath('//a[contains(@href, \"/Legislators/Profile\")]')\n+ for member in members:\n+ title = member.xpath('../span')\n+ role = title[0].text.lower() if title else 'member'\n+ com.add_member(member.text, role)\n \n if com['members']:\n self.save_committee(com)\n", "issue": "MA: committee scraper for 2017\nState: MA\r\n\r\nsays that it is skipping every page, I believe the site was rewritten and so will need a complete rewrite\n", "before_files": [{"content": "from billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass MACommitteeScraper(CommitteeScraper):\n jurisdiction = 'ma'\n\n def scrape(self, term, chambers):\n page_types = []\n if 'upper' in chambers:\n page_types += ['Senate', 'Joint']\n if 'lower' in chambers:\n page_types += ['House']\n chamber_mapping = {'Senate': 'upper',\n 'House': 'lower',\n 'Joint': 'joint'}\n\n foundComms = []\n\n for page_type in page_types:\n url = 'http://www.malegislature.gov/Committees/' + page_type\n\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute('http://www.malegislature.gov')\n\n for com_url in doc.xpath('//ul[@class=\"committeeList\"]/li/a/@href'):\n chamber = chamber_mapping[page_type]\n self.scrape_committee(chamber, com_url)\n\n def scrape_committee(self, chamber, url):\n html = self.get(url, verify=False).text\n doc = lxml.html.fromstring(html)\n\n name = doc.xpath('//span[@class=\"committeeShortName\"]/text()')\n if len(name) == 0:\n self.warning(\"Had to skip this malformed page.\")\n return\n # Because of http://www.malegislature.gov/Committees/Senate/S29 this\n # XXX: hack had to be pushed in. Remove me ASAP. This just skips\n # malformed pages.\n\n name = name[0]\n com = Committee(chamber, name)\n com.add_source(url)\n\n # get both titles and names, order is consistent\n titles = doc.xpath('//p[@class=\"rankingMemberTitle\"]/text()')\n names = doc.xpath('//p[@class=\"rankingMemberName\"]/a/text()')\n\n for title, name in zip(titles, names):\n com.add_member(name, title)\n\n for member in doc.xpath('//div[@class=\"committeeRegularMembers\"]//a/text()'):\n com.add_member(member)\n\n if com['members']:\n self.save_committee(com)\n", "path": "openstates/ma/committees.py"}]} | 1,173 | 457 |
gh_patches_debug_23348 | rasdani/github-patches | git_diff | wagtail__wagtail-8210 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tags over 100 characters
Found a bug? Please fill out the sections below. 👍
### Issue Summary
When adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.
### Steps to Reproduce
1. login to admin and edit a page with a tag content panel
2. create a tag with more than 100 characters
3. save, or publish the page
### Technical details
* Python version: Python 3.5.1
* Django version: 1.11.13
* Wagtail version: 1.13.1
Tags over 100 characters
Found a bug? Please fill out the sections below. 👍
### Issue Summary
When adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.
### Steps to Reproduce
1. login to admin and edit a page with a tag content panel
2. create a tag with more than 100 characters
3. save, or publish the page
### Technical details
* Python version: Python 3.5.1
* Django version: 1.11.13
* Wagtail version: 1.13.1
</issue>
<code>
[start of wagtail/admin/forms/tags.py]
1 from taggit.forms import TagField as TaggitTagField
2 from taggit.models import Tag
3
4 from wagtail.admin.widgets import AdminTagWidget
5
6
7 class TagField(TaggitTagField):
8 """
9 Extends taggit's TagField with the option to prevent creating tags that do not already exist
10 """
11
12 widget = AdminTagWidget
13
14 def __init__(self, *args, **kwargs):
15 self.tag_model = kwargs.pop("tag_model", None)
16 self.free_tagging = kwargs.pop("free_tagging", None)
17
18 super().__init__(*args, **kwargs)
19
20 # pass on tag_model and free_tagging kwargs to the widget,
21 # if (and only if) they have been passed explicitly here.
22 # Otherwise, set default values for clean() to use
23 if self.tag_model is None:
24 self.tag_model = Tag
25 else:
26 self.widget.tag_model = self.tag_model
27
28 if self.free_tagging is None:
29 self.free_tagging = getattr(self.tag_model, "free_tagging", True)
30 else:
31 self.widget.free_tagging = self.free_tagging
32
33 def clean(self, value):
34 value = super().clean(value)
35
36 if not self.free_tagging:
37 # filter value to just the tags that already exist in tag_model
38 value = list(
39 self.tag_model.objects.filter(name__in=value).values_list(
40 "name", flat=True
41 )
42 )
43
44 return value
45
[end of wagtail/admin/forms/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/admin/forms/tags.py b/wagtail/admin/forms/tags.py
--- a/wagtail/admin/forms/tags.py
+++ b/wagtail/admin/forms/tags.py
@@ -1,3 +1,5 @@
+from django.core.exceptions import ValidationError
+from django.utils.translation import gettext_lazy as _
from taggit.forms import TagField as TaggitTagField
from taggit.models import Tag
@@ -31,8 +33,27 @@
self.widget.free_tagging = self.free_tagging
def clean(self, value):
+
value = super().clean(value)
+ max_tag_length = self.tag_model.name.field.max_length
+ value_too_long = ""
+ for val in value:
+ if len(val) > max_tag_length:
+ if value_too_long:
+ value_too_long += ", "
+ value_too_long += val
+ if value_too_long:
+ raise ValidationError(
+ _(
+ "Tag(s) %(value_too_long)s are over %(max_tag_length)d characters"
+ % {
+ "value_too_long": value_too_long,
+ "max_tag_length": max_tag_length,
+ }
+ )
+ )
+
if not self.free_tagging:
# filter value to just the tags that already exist in tag_model
value = list(
| {"golden_diff": "diff --git a/wagtail/admin/forms/tags.py b/wagtail/admin/forms/tags.py\n--- a/wagtail/admin/forms/tags.py\n+++ b/wagtail/admin/forms/tags.py\n@@ -1,3 +1,5 @@\n+from django.core.exceptions import ValidationError\n+from django.utils.translation import gettext_lazy as _\n from taggit.forms import TagField as TaggitTagField\n from taggit.models import Tag\n \n@@ -31,8 +33,27 @@\n self.widget.free_tagging = self.free_tagging\n \n def clean(self, value):\n+\n value = super().clean(value)\n \n+ max_tag_length = self.tag_model.name.field.max_length\n+ value_too_long = \"\"\n+ for val in value:\n+ if len(val) > max_tag_length:\n+ if value_too_long:\n+ value_too_long += \", \"\n+ value_too_long += val\n+ if value_too_long:\n+ raise ValidationError(\n+ _(\n+ \"Tag(s) %(value_too_long)s are over %(max_tag_length)d characters\"\n+ % {\n+ \"value_too_long\": value_too_long,\n+ \"max_tag_length\": max_tag_length,\n+ }\n+ )\n+ )\n+\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n", "issue": "Tags over 100 characters\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n\r\n### Issue Summary\r\n\r\nWhen adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.\r\n\r\n### Steps to Reproduce\r\n\r\n1. login to admin and edit a page with a tag content panel\r\n2. create a tag with more than 100 characters\r\n3. save, or publish the page \r\n\r\n### Technical details\r\n\r\n* Python version: Python 3.5.1\r\n* Django version: 1.11.13\r\n* Wagtail version: 1.13.1\nTags over 100 characters\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n\r\n### Issue Summary\r\n\r\nWhen adding a tag while using the ClusterTaggableManager class, if the tag name is greater than the character limit for the database column no validation error is given.\r\n\r\n### Steps to Reproduce\r\n\r\n1. login to admin and edit a page with a tag content panel\r\n2. create a tag with more than 100 characters\r\n3. save, or publish the page \r\n\r\n### Technical details\r\n\r\n* Python version: Python 3.5.1\r\n* Django version: 1.11.13\r\n* Wagtail version: 1.13.1\n", "before_files": [{"content": "from taggit.forms import TagField as TaggitTagField\nfrom taggit.models import Tag\n\nfrom wagtail.admin.widgets import AdminTagWidget\n\n\nclass TagField(TaggitTagField):\n \"\"\"\n Extends taggit's TagField with the option to prevent creating tags that do not already exist\n \"\"\"\n\n widget = AdminTagWidget\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", None)\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n\n super().__init__(*args, **kwargs)\n\n # pass on tag_model and free_tagging kwargs to the widget,\n # if (and only if) they have been passed explicitly here.\n # Otherwise, set default values for clean() to use\n if self.tag_model is None:\n self.tag_model = Tag\n else:\n self.widget.tag_model = self.tag_model\n\n if self.free_tagging is None:\n self.free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n self.widget.free_tagging = self.free_tagging\n\n def clean(self, value):\n value = super().clean(value)\n\n if not self.free_tagging:\n # filter value to just the tags that already exist in tag_model\n value = list(\n self.tag_model.objects.filter(name__in=value).values_list(\n \"name\", flat=True\n )\n )\n\n return value\n", "path": "wagtail/admin/forms/tags.py"}]} | 1,226 | 301 |
gh_patches_debug_1417 | rasdani/github-patches | git_diff | getmoto__moto-1400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mock_xray_client cannot be used as a context manager
PR #1255 added support for `aws_xray_sdk` which is great.
But there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.
As a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.
Also, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.
I will prepare a PR for the return value issue soon.
Also I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.
Installed version: `moto-ext==1.1.25`
The problem seemingly persists in current `master` branch.
</issue>
<code>
[start of moto/xray/mock_client.py]
1 from functools import wraps
2 import os
3 from moto.xray import xray_backends
4 import aws_xray_sdk.core
5 from aws_xray_sdk.core.context import Context as AWSContext
6 from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter
7
8
9 class MockEmitter(UDPEmitter):
10 """
11 Replaces the code that sends UDP to local X-Ray daemon
12 """
13 def __init__(self, daemon_address='127.0.0.1:2000'):
14 address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)
15 self._ip, self._port = self._parse_address(address)
16
17 def _xray_backend(self, region):
18 return xray_backends[region]
19
20 def send_entity(self, entity):
21 # Hack to get region
22 # region = entity.subsegments[0].aws['region']
23 # xray = self._xray_backend(region)
24
25 # TODO store X-Ray data, pretty sure X-Ray needs refactor for this
26 pass
27
28 def _send_data(self, data):
29 raise RuntimeError('Should not be running this')
30
31
32 def mock_xray_client(f):
33 """
34 Mocks the X-Ray sdk by pwning its evil singleton with our methods
35
36 The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.
37 This means the Context() will be very unhappy if an env var isnt present, so we set that, save
38 the old context, then supply our new context.
39 We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing
40 that itno the recorder instance.
41 """
42 @wraps(f)
43 def _wrapped(*args, **kwargs):
44 print("Starting X-Ray Patch")
45
46 old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')
47 os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'
48 old_xray_context = aws_xray_sdk.core.xray_recorder._context
49 old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter
50 aws_xray_sdk.core.xray_recorder._context = AWSContext()
51 aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
52
53 try:
54 f(*args, **kwargs)
55 finally:
56
57 if old_xray_context_var is None:
58 del os.environ['AWS_XRAY_CONTEXT_MISSING']
59 else:
60 os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var
61
62 aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter
63 aws_xray_sdk.core.xray_recorder._context = old_xray_context
64
65 return _wrapped
66
67
68 class XRaySegment(object):
69 """
70 XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark
71 the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated
72 by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop
73 the segment, thus causing it to be emitted via UDP.
74
75 During testing we're going to have to control the start and end of a segment via context managers.
76 """
77 def __enter__(self):
78 aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)
79
80 return self
81
82 def __exit__(self, exc_type, exc_val, exc_tb):
83 aws_xray_sdk.core.xray_recorder.end_segment()
84
[end of moto/xray/mock_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py
--- a/moto/xray/mock_client.py
+++ b/moto/xray/mock_client.py
@@ -51,7 +51,7 @@
aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
try:
- f(*args, **kwargs)
+ return f(*args, **kwargs)
finally:
if old_xray_context_var is None:
| {"golden_diff": "diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py\n--- a/moto/xray/mock_client.py\n+++ b/moto/xray/mock_client.py\n@@ -51,7 +51,7 @@\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n \n try:\n- f(*args, **kwargs)\n+ return f(*args, **kwargs)\n finally:\n \n if old_xray_context_var is None:\n", "issue": "mock_xray_client cannot be used as a context manager\nPR #1255 added support for `aws_xray_sdk` which is great.\r\nBut there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.\r\nAs a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.\r\n\r\nAlso, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.\r\n\r\nI will prepare a PR for the return value issue soon.\r\nAlso I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.\r\n\r\nInstalled version: `moto-ext==1.1.25`\r\nThe problem seemingly persists in current `master` branch.\n", "before_files": [{"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n", "path": "moto/xray/mock_client.py"}]} | 1,740 | 107 |
gh_patches_debug_57933 | rasdani/github-patches | git_diff | scrapy__scrapy-3668 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
about the signal retry_complete
I didn't find the singnal in the singnal list,how can I use it
</issue>
<code>
[start of scrapy/downloadermiddlewares/retry.py]
1 """
2 An extension to retry failed requests that are potentially caused by temporary
3 problems such as a connection timeout or HTTP 500 error.
4
5 You can change the behaviour of this middleware by modifing the scraping settings:
6 RETRY_TIMES - how many times to retry a failed page
7 RETRY_HTTP_CODES - which HTTP response codes to retry
8
9 Failed pages are collected on the scraping process and rescheduled at the end,
10 once the spider has finished crawling all regular (non failed) pages. Once
11 there is no more failed pages to retry this middleware sends a signal
12 (retry_complete), so other extensions could connect to that signal.
13 """
14 import logging
15
16 from twisted.internet import defer
17 from twisted.internet.error import TimeoutError, DNSLookupError, \
18 ConnectionRefusedError, ConnectionDone, ConnectError, \
19 ConnectionLost, TCPTimedOutError
20 from twisted.web.client import ResponseFailed
21
22 from scrapy.exceptions import NotConfigured
23 from scrapy.utils.response import response_status_message
24 from scrapy.core.downloader.handlers.http11 import TunnelError
25 from scrapy.utils.python import global_object_name
26
27 logger = logging.getLogger(__name__)
28
29
30 class RetryMiddleware(object):
31
32 # IOError is raised by the HttpCompression middleware when trying to
33 # decompress an empty response
34 EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
35 ConnectionRefusedError, ConnectionDone, ConnectError,
36 ConnectionLost, TCPTimedOutError, ResponseFailed,
37 IOError, TunnelError)
38
39 def __init__(self, settings):
40 if not settings.getbool('RETRY_ENABLED'):
41 raise NotConfigured
42 self.max_retry_times = settings.getint('RETRY_TIMES')
43 self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
44 self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
45
46 @classmethod
47 def from_crawler(cls, crawler):
48 return cls(crawler.settings)
49
50 def process_response(self, request, response, spider):
51 if request.meta.get('dont_retry', False):
52 return response
53 if response.status in self.retry_http_codes:
54 reason = response_status_message(response.status)
55 return self._retry(request, reason, spider) or response
56 return response
57
58 def process_exception(self, request, exception, spider):
59 if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
60 and not request.meta.get('dont_retry', False):
61 return self._retry(request, exception, spider)
62
63 def _retry(self, request, reason, spider):
64 retries = request.meta.get('retry_times', 0) + 1
65
66 retry_times = self.max_retry_times
67
68 if 'max_retry_times' in request.meta:
69 retry_times = request.meta['max_retry_times']
70
71 stats = spider.crawler.stats
72 if retries <= retry_times:
73 logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
74 {'request': request, 'retries': retries, 'reason': reason},
75 extra={'spider': spider})
76 retryreq = request.copy()
77 retryreq.meta['retry_times'] = retries
78 retryreq.dont_filter = True
79 retryreq.priority = request.priority + self.priority_adjust
80
81 if isinstance(reason, Exception):
82 reason = global_object_name(reason.__class__)
83
84 stats.inc_value('retry/count')
85 stats.inc_value('retry/reason_count/%s' % reason)
86 return retryreq
87 else:
88 stats.inc_value('retry/max_reached')
89 logger.debug("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
90 {'request': request, 'retries': retries, 'reason': reason},
91 extra={'spider': spider})
92
[end of scrapy/downloadermiddlewares/retry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py
--- a/scrapy/downloadermiddlewares/retry.py
+++ b/scrapy/downloadermiddlewares/retry.py
@@ -7,9 +7,7 @@
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
-once the spider has finished crawling all regular (non failed) pages. Once
-there is no more failed pages to retry this middleware sends a signal
-(retry_complete), so other extensions could connect to that signal.
+once the spider has finished crawling all regular (non failed) pages.
"""
import logging
| {"golden_diff": "diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py\n--- a/scrapy/downloadermiddlewares/retry.py\n+++ b/scrapy/downloadermiddlewares/retry.py\n@@ -7,9 +7,7 @@\n RETRY_HTTP_CODES - which HTTP response codes to retry\n \n Failed pages are collected on the scraping process and rescheduled at the end,\n-once the spider has finished crawling all regular (non failed) pages. Once\n-there is no more failed pages to retry this middleware sends a signal\n-(retry_complete), so other extensions could connect to that signal.\n+once the spider has finished crawling all regular (non failed) pages.\n \"\"\"\n import logging\n", "issue": "about the signal retry_complete\nI didn't find the singnal in the singnal list,how can I use it\n", "before_files": [{"content": "\"\"\"\nAn extension to retry failed requests that are potentially caused by temporary\nproblems such as a connection timeout or HTTP 500 error.\n\nYou can change the behaviour of this middleware by modifing the scraping settings:\nRETRY_TIMES - how many times to retry a failed page\nRETRY_HTTP_CODES - which HTTP response codes to retry\n\nFailed pages are collected on the scraping process and rescheduled at the end,\nonce the spider has finished crawling all regular (non failed) pages. Once\nthere is no more failed pages to retry this middleware sends a signal\n(retry_complete), so other extensions could connect to that signal.\n\"\"\"\nimport logging\n\nfrom twisted.internet import defer\nfrom twisted.internet.error import TimeoutError, DNSLookupError, \\\n ConnectionRefusedError, ConnectionDone, ConnectError, \\\n ConnectionLost, TCPTimedOutError\nfrom twisted.web.client import ResponseFailed\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.response import response_status_message\nfrom scrapy.core.downloader.handlers.http11 import TunnelError\nfrom scrapy.utils.python import global_object_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryMiddleware(object):\n\n # IOError is raised by the HttpCompression middleware when trying to\n # decompress an empty response\n EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,\n ConnectionRefusedError, ConnectionDone, ConnectError,\n ConnectionLost, TCPTimedOutError, ResponseFailed,\n IOError, TunnelError)\n\n def __init__(self, settings):\n if not settings.getbool('RETRY_ENABLED'):\n raise NotConfigured\n self.max_retry_times = settings.getint('RETRY_TIMES')\n self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))\n self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_retry', False):\n return response\n if response.status in self.retry_http_codes:\n reason = response_status_message(response.status)\n return self._retry(request, reason, spider) or response\n return response\n\n def process_exception(self, request, exception, spider):\n if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \\\n and not request.meta.get('dont_retry', False):\n return self._retry(request, exception, spider)\n\n def _retry(self, request, reason, spider):\n retries = request.meta.get('retry_times', 0) + 1\n\n retry_times = self.max_retry_times\n\n if 'max_retry_times' in request.meta:\n retry_times = request.meta['max_retry_times']\n\n stats = spider.crawler.stats\n if retries <= retry_times:\n logger.debug(\"Retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n retryreq = request.copy()\n retryreq.meta['retry_times'] = retries\n retryreq.dont_filter = True\n retryreq.priority = request.priority + self.priority_adjust\n\n if isinstance(reason, Exception):\n reason = global_object_name(reason.__class__)\n\n stats.inc_value('retry/count')\n stats.inc_value('retry/reason_count/%s' % reason)\n return retryreq\n else:\n stats.inc_value('retry/max_reached')\n logger.debug(\"Gave up retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n", "path": "scrapy/downloadermiddlewares/retry.py"}]} | 1,537 | 146 |
gh_patches_debug_23607 | rasdani/github-patches | git_diff | vaexio__vaex-217 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pandas dependency
We now depends on Pandas:
https://github.com/vaexio/vaex/blob/255ccbc192d54c619a273de21a05f919da8ffadf/packages/vaex-core/vaex/formatting.py
Introduced in https://github.com/vaexio/vaex/pull/192
We should not depend on pandas, it is not a dependency of vaex-core and should not become, we might also grow to large to run on AWS Lambda.
</issue>
<code>
[start of packages/vaex-core/vaex/formatting.py]
1 import numpy as np
2 import numbers
3 import six
4 import pandas as pd
5
6
7 MAX_LENGTH = 50
8
9
10 def _format_value(value):
11 if isinstance(value, six.string_types):
12 value = str(value)
13 elif isinstance(value, bytes):
14 value = repr(value)
15 elif isinstance(value, np.ma.core.MaskedConstant):
16 value = str(value)
17 if isinstance(value, np.datetime64):
18 value = str(pd.to_datetime(value))
19 if isinstance(value, np.timedelta64):
20 value = str(pd.to_timedelta(value))
21 elif not isinstance(value, numbers.Number):
22 value = str(value)
23 if isinstance(value, float):
24 value = repr(value)
25 if isinstance(value, (str, bytes)):
26 if len(value) > MAX_LENGTH:
27 value = repr(value[:MAX_LENGTH-3])[:-1] + '...'
28 return value
29
[end of packages/vaex-core/vaex/formatting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py
--- a/packages/vaex-core/vaex/formatting.py
+++ b/packages/vaex-core/vaex/formatting.py
@@ -1,7 +1,7 @@
import numpy as np
import numbers
import six
-import pandas as pd
+import datetime
MAX_LENGTH = 50
@@ -15,9 +15,24 @@
elif isinstance(value, np.ma.core.MaskedConstant):
value = str(value)
if isinstance(value, np.datetime64):
- value = str(pd.to_datetime(value))
+ if np.isnat(value):
+ value = 'NaT'
+ else:
+ value = ' '.join(str(value).split('T'))
if isinstance(value, np.timedelta64):
- value = str(pd.to_timedelta(value))
+ if np.isnat(value):
+ value = 'NaT'
+ else:
+ tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))
+ ms = tmp.microseconds
+ s = np.mod(tmp.seconds, 60)
+ m = np.mod(tmp.seconds//60, 60)
+ h = tmp.seconds // 3600
+ d = tmp.days
+ if ms:
+ value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))
+ else:
+ value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))
elif not isinstance(value, numbers.Number):
value = str(value)
if isinstance(value, float):
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py\n--- a/packages/vaex-core/vaex/formatting.py\n+++ b/packages/vaex-core/vaex/formatting.py\n@@ -1,7 +1,7 @@\n import numpy as np\n import numbers\n import six\n-import pandas as pd\n+import datetime\n \n \n MAX_LENGTH = 50\n@@ -15,9 +15,24 @@\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n- value = str(pd.to_datetime(value))\n+ if np.isnat(value):\n+ value = 'NaT'\n+ else:\n+ value = ' '.join(str(value).split('T'))\n if isinstance(value, np.timedelta64):\n- value = str(pd.to_timedelta(value))\n+ if np.isnat(value):\n+ value = 'NaT'\n+ else:\n+ tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))\n+ ms = tmp.microseconds\n+ s = np.mod(tmp.seconds, 60)\n+ m = np.mod(tmp.seconds//60, 60)\n+ h = tmp.seconds // 3600\n+ d = tmp.days\n+ if ms:\n+ value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n+ else:\n+ value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n", "issue": "Pandas dependency\nWe now depends on Pandas:\r\nhttps://github.com/vaexio/vaex/blob/255ccbc192d54c619a273de21a05f919da8ffadf/packages/vaex-core/vaex/formatting.py\r\n\r\nIntroduced in https://github.com/vaexio/vaex/pull/192\r\n\r\nWe should not depend on pandas, it is not a dependency of vaex-core and should not become, we might also grow to large to run on AWS Lambda.\n", "before_files": [{"content": "import numpy as np\nimport numbers\nimport six\nimport pandas as pd\n\n\nMAX_LENGTH = 50\n\n\ndef _format_value(value):\n if isinstance(value, six.string_types):\n value = str(value)\n elif isinstance(value, bytes):\n value = repr(value)\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n value = str(pd.to_datetime(value))\n if isinstance(value, np.timedelta64):\n value = str(pd.to_timedelta(value))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n value = repr(value)\n if isinstance(value, (str, bytes)):\n if len(value) > MAX_LENGTH:\n value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n return value\n", "path": "packages/vaex-core/vaex/formatting.py"}]} | 905 | 388 |
gh_patches_debug_8623 | rasdani/github-patches | git_diff | archlinux__archinstall-262 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Awesome profile installation failed with no such file or directory on xdg-mime


Resolve #261 and related issues
Closes #262.
🚨 PR Guidelines:
# New features *(v2.2.0)*
Merge new features in to `torxed-v2.2.0`.<br>
This branch is designated for potential breaking changes, added complexity and new functionality.
# Bug fixes *(v2.1.4)*
Merge against `master` for bug fixes and anything that improves stability and quality of life.<br>
This excludes:
* New functionality
* Added complexity
* Breaking changes
Any changes to `master` automatically gets pulled in to `torxed-v2.2.0` to avoid merge hell.
# Describe your PR
If the changes has been discussed in an Issue, please tag it so we can backtrace from the Issue later on.<br>
If the PR is larger than ~20 lines, please describe it here unless described in an issue.
# Testing
Any new feature or stability improvement should be tested if possible.
Please follow the test instructions at the bottom of the README.
*These PR guidelines will change after 2021-05-01, which is when `v2.1.4` gets onto the new ISO*
</issue>
<code>
[start of profiles/desktop.py]
1 # A desktop environment selector.
2
3 import archinstall, os
4
5 is_top_level_profile = True
6
7 # New way of defining packages for a profile, which is iterable and can be used out side
8 # of the profile to get a list of "what packages will be installed".
9 __packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
10
11 def _prep_function(*args, **kwargs):
12 """
13 Magic function called by the importing installer
14 before continuing any further. It also avoids executing any
15 other code in this stage. So it's a safe way to ask the user
16 for more input before any other installer steps start.
17 """
18
19 supported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']
20 desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
21
22 # Temporarily store the selected desktop profile
23 # in a session-safe location, since this module will get reloaded
24 # the next time it gets executed.
25 archinstall.storage['_desktop_profile'] = desktop
26
27 profile = archinstall.Profile(None, desktop)
28 # Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.
29 with profile.load_instructions(namespace=f"{desktop}.py") as imported:
30 if hasattr(imported, '_prep_function'):
31 return imported._prep_function()
32 else:
33 print(f"Deprecated (??): {desktop} profile has no _prep_function() anymore")
34
35 if __name__ == 'desktop':
36 """
37 This "profile" is a meta-profile.
38 There are no desktop-specific steps, it simply routes
39 the installer to whichever desktop environment/window manager was chosen.
40
41 Maybe in the future, a network manager or similar things *could* be added here.
42 We should honor that Arch Linux does not officially endorse a desktop-setup, nor is
43 it trying to be a turn-key desktop distribution.
44
45 There are plenty of desktop-turn-key-solutions based on Arch Linux,
46 this is therefore just a helper to get started
47 """
48
49 # Install common packages for all desktop environments
50 installation.add_additional_packages(__packages__)
51
52 # TODO: Remove magic variable 'installation' and place it
53 # in archinstall.storage or archinstall.session/archinstall.installation
54 installation.install_profile(archinstall.storage['_desktop_profile'])
55
56
[end of profiles/desktop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/profiles/desktop.py b/profiles/desktop.py
--- a/profiles/desktop.py
+++ b/profiles/desktop.py
@@ -6,7 +6,7 @@
# New way of defining packages for a profile, which is iterable and can be used out side
# of the profile to get a list of "what packages will be installed".
-__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
+__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']
def _prep_function(*args, **kwargs):
"""
| {"golden_diff": "diff --git a/profiles/desktop.py b/profiles/desktop.py\n--- a/profiles/desktop.py\n+++ b/profiles/desktop.py\n@@ -6,7 +6,7 @@\n \n # New way of defining packages for a profile, which is iterable and can be used out side\n # of the profile to get a list of \"what packages will be installed\".\n-__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n+__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n", "issue": "Awesome profile installation failed with no such file or directory on xdg-mime\n\r\n\r\n\nResolve #261 and related issues\nCloses #262.\r\n\r\n\ud83d\udea8 PR Guidelines:\r\n\r\n# New features *(v2.2.0)*\r\n\r\nMerge new features in to `torxed-v2.2.0`.<br>\r\nThis branch is designated for potential breaking changes, added complexity and new functionality.\r\n\r\n# Bug fixes *(v2.1.4)*\r\n\r\nMerge against `master` for bug fixes and anything that improves stability and quality of life.<br>\r\nThis excludes:\r\n * New functionality\r\n * Added complexity\r\n * Breaking changes\r\n\r\nAny changes to `master` automatically gets pulled in to `torxed-v2.2.0` to avoid merge hell.\r\n\r\n# Describe your PR\r\n\r\nIf the changes has been discussed in an Issue, please tag it so we can backtrace from the Issue later on.<br>\r\nIf the PR is larger than ~20 lines, please describe it here unless described in an issue.\r\n\r\n# Testing\r\n\r\nAny new feature or stability improvement should be tested if possible.\r\nPlease follow the test instructions at the bottom of the README.\r\n\r\n*These PR guidelines will change after 2021-05-01, which is when `v2.1.4` gets onto the new ISO*\r\n\n", "before_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\t\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\t\n\t# Install common packages for all desktop environments\n\tinstallation.add_additional_packages(__packages__)\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n\n", "path": "profiles/desktop.py"}]} | 1,642 | 179 |
gh_patches_debug_31470 | rasdani/github-patches | git_diff | cowrie__cowrie-1093 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
greynoise should catch timeout error
```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred:
2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical]
Traceback (most recent call last):
--- <exception caught here> ---
File "/home/cowrie/cowrie/src/cowrie/output/greynoise.py", line 65, in scanip
headers=headers)
twisted.internet.error.TimeoutError: User timeout caused connection failure.
```
</issue>
<code>
[start of src/cowrie/output/greynoise.py]
1 """
2 Send attackers IP to GreyNoise
3 """
4
5 from __future__ import absolute_import, division
6
7 import treq
8
9 from twisted.internet import defer
10 from twisted.python import log
11
12 import cowrie.core.output
13 from cowrie.core.config import CONFIG
14
15 COWRIE_USER_AGENT = 'Cowrie Honeypot'
16 GNAPI_URL = 'http://api.greynoise.io:8888/v1/'
17
18
19 class Output(cowrie.core.output.Output):
20
21 def __init__(self):
22 self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)
23 self.tags = CONFIG.get('output_greynoise', 'tags', fallback="all").split(",")
24 self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)
25 cowrie.core.output.Output.__init__(self)
26
27 def start(self):
28 """
29 Start output plugin
30 """
31
32 def stop(self):
33 """
34 Stop output plugin
35 """
36 pass
37
38 def write(self, entry):
39 if entry['eventid'] == "cowrie.session.connect":
40 self.scanip(entry)
41
42 @defer.inlineCallbacks
43 def scanip(self, entry):
44 """
45 Scan IP againt Greynoise API
46 """
47 def message(query):
48 log.msg(
49 eventid='cowrie.greynoise.result',
50 format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'
51 ' along with the following %(meta)s metadata',
52 IP=entry['src_ip'],
53 tag=query['name'],
54 conf=query['confidence'],
55 meta=query['metadata']
56 )
57
58 gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')
59 headers = ({'User-Agent': [COWRIE_USER_AGENT]})
60 fields = {'key': self.apiKey, 'ip': entry['src_ip']}
61
62 response = yield treq.post(
63 url=gnUrl,
64 data=fields,
65 headers=headers)
66
67 if response.code != 200:
68 rsp = yield response.text()
69 log.error("greynoise: got error {}".format(rsp))
70 return
71
72 j = yield response.json()
73 if self.debug:
74 log.msg("greynoise: debug: "+repr(j))
75 if j['status'] == "ok":
76 if "all" not in self.tags:
77 for query in j['records']:
78 if query['name'] in self.tags:
79 message(query)
80 else:
81 for query in j['records']:
82 message(query)
83 else:
84 log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
85
[end of src/cowrie/output/greynoise.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py
--- a/src/cowrie/output/greynoise.py
+++ b/src/cowrie/output/greynoise.py
@@ -6,7 +6,7 @@
import treq
-from twisted.internet import defer
+from twisted.internet import defer, error
from twisted.python import log
import cowrie.core.output
@@ -59,10 +59,15 @@
headers = ({'User-Agent': [COWRIE_USER_AGENT]})
fields = {'key': self.apiKey, 'ip': entry['src_ip']}
- response = yield treq.post(
- url=gnUrl,
- data=fields,
- headers=headers)
+ try:
+ response = yield treq.post(
+ url=gnUrl,
+ data=fields,
+ headers=headers,
+ timeout=10)
+ except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):
+ log.msg("GreyNoise requests timeout")
+ return
if response.code != 200:
rsp = yield response.text()
@@ -72,13 +77,14 @@
j = yield response.json()
if self.debug:
log.msg("greynoise: debug: "+repr(j))
- if j['status'] == "ok":
- if "all" not in self.tags:
- for query in j['records']:
- if query['name'] in self.tags:
- message(query)
- else:
- for query in j['records']:
+
+ if j['status'] == "ok":
+ if "all" not in self.tags:
+ for query in j['records']:
+ if query['name'] in self.tags:
message(query)
else:
- log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
+ for query in j['records']:
+ message(query)
+ else:
+ log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
| {"golden_diff": "diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py\n--- a/src/cowrie/output/greynoise.py\n+++ b/src/cowrie/output/greynoise.py\n@@ -6,7 +6,7 @@\n \n import treq\n \n-from twisted.internet import defer\n+from twisted.internet import defer, error\n from twisted.python import log\n \n import cowrie.core.output\n@@ -59,10 +59,15 @@\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n \n- response = yield treq.post(\n- url=gnUrl,\n- data=fields,\n- headers=headers)\n+ try:\n+ response = yield treq.post(\n+ url=gnUrl,\n+ data=fields,\n+ headers=headers,\n+ timeout=10)\n+ except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):\n+ log.msg(\"GreyNoise requests timeout\")\n+ return\n \n if response.code != 200:\n rsp = yield response.text()\n@@ -72,13 +77,14 @@\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n- if j['status'] == \"ok\":\n- if \"all\" not in self.tags:\n- for query in j['records']:\n- if query['name'] in self.tags:\n- message(query)\n- else:\n- for query in j['records']:\n+\n+ if j['status'] == \"ok\":\n+ if \"all\" not in self.tags:\n+ for query in j['records']:\n+ if query['name'] in self.tags:\n message(query)\n else:\n- log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n+ for query in j['records']:\n+ message(query)\n+ else:\n+ log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "issue": "greynoise should catch timeout error\n```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred:\r\n2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical]\r\n Traceback (most recent call last):\r\n --- <exception caught here> ---\r\n File \"/home/cowrie/cowrie/src/cowrie/output/greynoise.py\", line 65, in scanip\r\n headers=headers)\r\n twisted.internet.error.TimeoutError: User timeout caused connection failure.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nSend attackers IP to GreyNoise\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport treq\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nCOWRIE_USER_AGENT = 'Cowrie Honeypot'\nGNAPI_URL = 'http://api.greynoise.io:8888/v1/'\n\n\nclass Output(cowrie.core.output.Output):\n\n def __init__(self):\n self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)\n self.tags = CONFIG.get('output_greynoise', 'tags', fallback=\"all\").split(\",\")\n self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)\n cowrie.core.output.Output.__init__(self)\n\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry['eventid'] == \"cowrie.session.connect\":\n self.scanip(entry)\n\n @defer.inlineCallbacks\n def scanip(self, entry):\n \"\"\"\n Scan IP againt Greynoise API\n \"\"\"\n def message(query):\n log.msg(\n eventid='cowrie.greynoise.result',\n format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'\n ' along with the following %(meta)s metadata',\n IP=entry['src_ip'],\n tag=query['name'],\n conf=query['confidence'],\n meta=query['metadata']\n )\n\n gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n\n response = yield treq.post(\n url=gnUrl,\n data=fields,\n headers=headers)\n\n if response.code != 200:\n rsp = yield response.text()\n log.error(\"greynoise: got error {}\".format(rsp))\n return\n\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n if j['status'] == \"ok\":\n if \"all\" not in self.tags:\n for query in j['records']:\n if query['name'] in self.tags:\n message(query)\n else:\n for query in j['records']:\n message(query)\n else:\n log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "path": "src/cowrie/output/greynoise.py"}]} | 1,440 | 475 |
gh_patches_debug_3627 | rasdani/github-patches | git_diff | ethereum__web3.py-912 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade to or add support for websockets v5
### What was wrong?
We are currently using the `websockets` library's v4 line. The v5 line is out.
### How can it be fixed?
Look into adding support for both v4 and v5.
If this is too cumbersome, we can simply upgrade to requiring `>=v5`
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8
9 setup(
10 name='web3',
11 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
12 version='4.3.0',
13 description="""Web3.py""",
14 long_description_markdown_filename='README.md',
15 author='Piper Merriam',
16 author_email='[email protected]',
17 url='https://github.com/ethereum/web3.py',
18 include_package_data=True,
19 install_requires=[
20 "toolz>=0.9.0,<1.0.0;implementation_name=='pypy'",
21 "cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'",
22 "eth-abi>=1.1.1,<2",
23 "eth-account>=0.2.1,<0.3.0",
24 "eth-utils>=1.0.1,<2.0.0",
25 "hexbytes>=0.1.0,<1.0.0",
26 "lru-dict>=1.1.6,<2.0.0",
27 "eth-hash[pycryptodome]",
28 "requests>=2.16.0,<3.0.0",
29 "websockets>=4.0.1,<5.0.0",
30 "pypiwin32>=223;platform_system=='Windows'",
31 ],
32 setup_requires=['setuptools-markdown'],
33 python_requires='>=3.5, <4',
34 extras_require={
35 'tester': [
36 "eth-tester[py-evm]==0.1.0-beta.26",
37 "py-geth>=2.0.1,<3.0.0",
38 ],
39 'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
40 'linter': [
41 "flake8==3.4.1",
42 "isort>=4.2.15,<5",
43 ],
44 },
45 py_modules=['web3', 'ens'],
46 license="MIT",
47 zip_safe=False,
48 keywords='ethereum',
49 packages=find_packages(exclude=["tests", "tests.*"]),
50 classifiers=[
51 'Development Status :: 5 - Production/Stable',
52 'Intended Audience :: Developers',
53 'License :: OSI Approved :: MIT License',
54 'Natural Language :: English',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 ],
59 )
60
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
"lru-dict>=1.1.6,<2.0.0",
"eth-hash[pycryptodome]",
"requests>=2.16.0,<3.0.0",
- "websockets>=4.0.1,<5.0.0",
+ "websockets>=5.0.1,<6.0.0",
"pypiwin32>=223;platform_system=='Windows'",
],
setup_requires=['setuptools-markdown'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n- \"websockets>=4.0.1,<5.0.0\",\n+ \"websockets>=5.0.1,<6.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n", "issue": "Upgrade to or add support for websockets v5\n### What was wrong?\r\n\r\nWe are currently using the `websockets` library's v4 line. The v5 line is out.\r\n\r\n### How can it be fixed?\r\n\r\nLook into adding support for both v4 and v5.\r\n\r\nIf this is too cumbersome, we can simply upgrade to requiring `>=v5`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.3.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.3.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=4.0.1,<5.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.26\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} | 1,285 | 143 |
gh_patches_debug_30860 | rasdani/github-patches | git_diff | TheAlgorithms__Python-2032 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mergesort Update Variable Names
I was looking over the mergesort.py file in the divide_and_conquer directory when I saw that all of the variable names are a single letter and there is not much documentation. Does anyone know enough about this file to improve the variable names and make the code more understandable?
</issue>
<code>
[start of divide_and_conquer/mergesort.py]
1 def merge(a, b, m, e):
2 l = a[b : m + 1] # noqa: E741
3 r = a[m + 1 : e + 1]
4 k = b
5 i = 0
6 j = 0
7 while i < len(l) and j < len(r):
8 # change sign for Descending order
9 if l[i] < r[j]:
10 a[k] = l[i]
11 i += 1
12 else:
13 a[k] = r[j]
14 j += 1
15 k += 1
16 while i < len(l):
17 a[k] = l[i]
18 i += 1
19 k += 1
20 while j < len(r):
21 a[k] = r[j]
22 j += 1
23 k += 1
24 return a
25
26
27 def mergesort(a, b, e):
28 """
29 >>> mergesort([3,2,1],0,2)
30 [1, 2, 3]
31 >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)
32 [0, 1, 1, 2, 2, 3, 3, 4, 5]
33 """
34 if b < e:
35 m = (b + e) // 2
36 # print("ms1",a,b,m)
37 mergesort(a, b, m)
38 # print("ms2",a,m+1,e)
39 mergesort(a, m + 1, e)
40 # print("m",a,b,m,e)
41 merge(a, b, m, e)
42 return a
43
44
45 if __name__ == "__main__":
46 import doctest
47
48 doctest.testmod()
49
[end of divide_and_conquer/mergesort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py
--- a/divide_and_conquer/mergesort.py
+++ b/divide_and_conquer/mergesort.py
@@ -1,45 +1,48 @@
-def merge(a, b, m, e):
- l = a[b : m + 1] # noqa: E741
- r = a[m + 1 : e + 1]
- k = b
+def merge(arr, left, mid, right):
+ # overall array will divided into 2 array
+ # left_arr contains the left portion of array from left to mid
+ # right_arr contains the right portion of array from mid + 1 to right
+ left_arr = arr[left : mid + 1]
+ right_arr = arr[mid + 1 : right + 1]
+ k = left
i = 0
j = 0
- while i < len(l) and j < len(r):
+ while i < len(left_arr) and j < len(right_arr):
# change sign for Descending order
- if l[i] < r[j]:
- a[k] = l[i]
+ if left_arr[i] < right_arr[j]:
+ arr[k] = left_arr[i]
i += 1
else:
- a[k] = r[j]
+ arr[k] = right_arr[j]
j += 1
k += 1
- while i < len(l):
- a[k] = l[i]
+ while i < len(left_arr):
+ arr[k] = left_arr[i]
i += 1
k += 1
- while j < len(r):
- a[k] = r[j]
+ while j < len(right_arr):
+ arr[k] = right_arr[j]
j += 1
k += 1
- return a
+ return arr
-def mergesort(a, b, e):
+def mergesort(arr, left, right):
"""
- >>> mergesort([3,2,1],0,2)
+ >>> mergesort([3, 2, 1], 0, 2)
[1, 2, 3]
- >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)
+ >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)
[0, 1, 1, 2, 2, 3, 3, 4, 5]
"""
- if b < e:
- m = (b + e) // 2
+ if left < right:
+ mid = (left + right) // 2
# print("ms1",a,b,m)
- mergesort(a, b, m)
+ mergesort(arr, left, mid)
# print("ms2",a,m+1,e)
- mergesort(a, m + 1, e)
+ mergesort(arr, mid + 1, right)
# print("m",a,b,m,e)
- merge(a, b, m, e)
- return a
+ merge(arr, left, mid, right)
+ return arr
if __name__ == "__main__":
| {"golden_diff": "diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py\n--- a/divide_and_conquer/mergesort.py\n+++ b/divide_and_conquer/mergesort.py\n@@ -1,45 +1,48 @@\n-def merge(a, b, m, e):\n- l = a[b : m + 1] # noqa: E741\n- r = a[m + 1 : e + 1]\n- k = b\n+def merge(arr, left, mid, right):\n+ # overall array will divided into 2 array\n+ # left_arr contains the left portion of array from left to mid\n+ # right_arr contains the right portion of array from mid + 1 to right\n+ left_arr = arr[left : mid + 1]\n+ right_arr = arr[mid + 1 : right + 1]\n+ k = left\n i = 0\n j = 0\n- while i < len(l) and j < len(r):\n+ while i < len(left_arr) and j < len(right_arr):\n # change sign for Descending order\n- if l[i] < r[j]:\n- a[k] = l[i]\n+ if left_arr[i] < right_arr[j]:\n+ arr[k] = left_arr[i]\n i += 1\n else:\n- a[k] = r[j]\n+ arr[k] = right_arr[j]\n j += 1\n k += 1\n- while i < len(l):\n- a[k] = l[i]\n+ while i < len(left_arr):\n+ arr[k] = left_arr[i]\n i += 1\n k += 1\n- while j < len(r):\n- a[k] = r[j]\n+ while j < len(right_arr):\n+ arr[k] = right_arr[j]\n j += 1\n k += 1\n- return a\n+ return arr\n \n \n-def mergesort(a, b, e):\n+def mergesort(arr, left, right):\n \"\"\"\n- >>> mergesort([3,2,1],0,2)\n+ >>> mergesort([3, 2, 1], 0, 2)\n [1, 2, 3]\n- >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)\n+ >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n- if b < e:\n- m = (b + e) // 2\n+ if left < right:\n+ mid = (left + right) // 2\n # print(\"ms1\",a,b,m)\n- mergesort(a, b, m)\n+ mergesort(arr, left, mid)\n # print(\"ms2\",a,m+1,e)\n- mergesort(a, m + 1, e)\n+ mergesort(arr, mid + 1, right)\n # print(\"m\",a,b,m,e)\n- merge(a, b, m, e)\n- return a\n+ merge(arr, left, mid, right)\n+ return arr\n \n \n if __name__ == \"__main__\":\n", "issue": "Mergesort Update Variable Names\nI was looking over the mergesort.py file in the divide_and_conquer directory when I saw that all of the variable names are a single letter and there is not much documentation. Does anyone know enough about this file to improve the variable names and make the code more understandable?\n", "before_files": [{"content": "def merge(a, b, m, e):\n l = a[b : m + 1] # noqa: E741\n r = a[m + 1 : e + 1]\n k = b\n i = 0\n j = 0\n while i < len(l) and j < len(r):\n # change sign for Descending order\n if l[i] < r[j]:\n a[k] = l[i]\n i += 1\n else:\n a[k] = r[j]\n j += 1\n k += 1\n while i < len(l):\n a[k] = l[i]\n i += 1\n k += 1\n while j < len(r):\n a[k] = r[j]\n j += 1\n k += 1\n return a\n\n\ndef mergesort(a, b, e):\n \"\"\"\n >>> mergesort([3,2,1],0,2)\n [1, 2, 3]\n >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n if b < e:\n m = (b + e) // 2\n # print(\"ms1\",a,b,m)\n mergesort(a, b, m)\n # print(\"ms2\",a,m+1,e)\n mergesort(a, m + 1, e)\n # print(\"m\",a,b,m,e)\n merge(a, b, m, e)\n return a\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "divide_and_conquer/mergesort.py"}]} | 1,088 | 774 |
gh_patches_debug_13420 | rasdani/github-patches | git_diff | comic__grand-challenge.org-786 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Partial update not working correctly because of custom validation
The validation rule in `app/grandchallenge/annotations/serializers.py:31` is breaking the partial update functionality.
If you try to do a partial update PATCH request to the endpoint, it will try to find the `annotation_set` attribute in the request data. If this is not present it will throw a KeyError.
This should be fixed by first checking if the key exists in the request data and only then running the validation check. The validation check is not needed if the key does not exist because it will then either not change (for partial update request) or throw a `field is required` validation error (for every other type of request).
I will fix this and add a test for it.
</issue>
<code>
[start of app/grandchallenge/annotations/serializers.py]
1 from rest_framework import serializers
2
3 from .models import (
4 ETDRSGridAnnotation,
5 MeasurementAnnotation,
6 BooleanClassificationAnnotation,
7 PolygonAnnotationSet,
8 SinglePolygonAnnotation,
9 LandmarkAnnotationSet,
10 SingleLandmarkAnnotation,
11 )
12 from .validators import validate_grader_is_current_retina_user
13
14
15 class AbstractAnnotationSerializer(serializers.ModelSerializer):
16 def validate_grader(self, value):
17 """
18 Validate that grader is the user creating the object for retina_graders group
19 """
20 validate_grader_is_current_retina_user(value, self.context)
21 return value
22
23 class Meta:
24 abstract = True
25
26
27 class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):
28 def validate(self, data):
29 """
30 Validate that the user that is creating this object equals the annotation_set.grader for retina_graders
31 """
32 validate_grader_is_current_retina_user(
33 data["annotation_set"].grader, self.context
34 )
35 return data
36
37 class Meta:
38 abstract = True
39
40
41 class ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):
42 class Meta:
43 model = ETDRSGridAnnotation
44 fields = ("grader", "created", "image", "fovea", "optic_disk")
45
46
47 class MeasurementAnnotationSerializer(AbstractAnnotationSerializer):
48 class Meta:
49 model = MeasurementAnnotation
50 fields = ("image", "grader", "created", "start_voxel", "end_voxel")
51
52
53 class BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):
54 class Meta:
55 model = BooleanClassificationAnnotation
56 fields = ("image", "grader", "created", "name", "value")
57
58
59 class SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):
60 annotation_set = serializers.PrimaryKeyRelatedField(
61 queryset=PolygonAnnotationSet.objects.all()
62 )
63
64 class Meta:
65 model = SinglePolygonAnnotation
66 fields = ("id", "value", "annotation_set")
67
68
69 class PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):
70 singlepolygonannotation_set = SinglePolygonAnnotationSerializer(
71 many=True, read_only=True
72 )
73
74 class Meta:
75 model = PolygonAnnotationSet
76 fields = (
77 "id",
78 "image",
79 "grader",
80 "created",
81 "name",
82 "singlepolygonannotation_set",
83 )
84
85
86 class LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):
87 class Meta:
88 model = LandmarkAnnotationSet
89 fields = ("grader", "created")
90
91
92 class SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):
93 class Meta:
94 model = SingleLandmarkAnnotation
95 fields = ("image", "annotation_set", "landmarks")
96
[end of app/grandchallenge/annotations/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/annotations/serializers.py b/app/grandchallenge/annotations/serializers.py
--- a/app/grandchallenge/annotations/serializers.py
+++ b/app/grandchallenge/annotations/serializers.py
@@ -27,11 +27,14 @@
class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):
def validate(self, data):
"""
- Validate that the user that is creating this object equals the annotation_set.grader for retina_graders
+ Validate that the user that is creating this object equals the
+ annotation_set.grader for retina_graders
"""
- validate_grader_is_current_retina_user(
- data["annotation_set"].grader, self.context
- )
+ if data.get("annotation_set") is None:
+ return data
+
+ grader = data["annotation_set"].grader
+ validate_grader_is_current_retina_user(grader, self.context)
return data
class Meta:
| {"golden_diff": "diff --git a/app/grandchallenge/annotations/serializers.py b/app/grandchallenge/annotations/serializers.py\n--- a/app/grandchallenge/annotations/serializers.py\n+++ b/app/grandchallenge/annotations/serializers.py\n@@ -27,11 +27,14 @@\n class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n- Validate that the user that is creating this object equals the annotation_set.grader for retina_graders\n+ Validate that the user that is creating this object equals the\n+ annotation_set.grader for retina_graders\n \"\"\"\n- validate_grader_is_current_retina_user(\n- data[\"annotation_set\"].grader, self.context\n- )\n+ if data.get(\"annotation_set\") is None:\n+ return data\n+\n+ grader = data[\"annotation_set\"].grader\n+ validate_grader_is_current_retina_user(grader, self.context)\n return data\n \n class Meta:\n", "issue": "Partial update not working correctly because of custom validation\nThe validation rule in `app/grandchallenge/annotations/serializers.py:31` is breaking the partial update functionality.\r\nIf you try to do a partial update PATCH request to the endpoint, it will try to find the `annotation_set` attribute in the request data. If this is not present it will throw a KeyError. \r\n\r\nThis should be fixed by first checking if the key exists in the request data and only then running the validation check. The validation check is not needed if the key does not exist because it will then either not change (for partial update request) or throw a `field is required` validation error (for every other type of request).\r\n\r\nI will fix this and add a test for it.\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import (\n ETDRSGridAnnotation,\n MeasurementAnnotation,\n BooleanClassificationAnnotation,\n PolygonAnnotationSet,\n SinglePolygonAnnotation,\n LandmarkAnnotationSet,\n SingleLandmarkAnnotation,\n)\nfrom .validators import validate_grader_is_current_retina_user\n\n\nclass AbstractAnnotationSerializer(serializers.ModelSerializer):\n def validate_grader(self, value):\n \"\"\"\n Validate that grader is the user creating the object for retina_graders group\n \"\"\"\n validate_grader_is_current_retina_user(value, self.context)\n return value\n\n class Meta:\n abstract = True\n\n\nclass AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n Validate that the user that is creating this object equals the annotation_set.grader for retina_graders\n \"\"\"\n validate_grader_is_current_retina_user(\n data[\"annotation_set\"].grader, self.context\n )\n return data\n\n class Meta:\n abstract = True\n\n\nclass ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = ETDRSGridAnnotation\n fields = (\"grader\", \"created\", \"image\", \"fovea\", \"optic_disk\")\n\n\nclass MeasurementAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = MeasurementAnnotation\n fields = (\"image\", \"grader\", \"created\", \"start_voxel\", \"end_voxel\")\n\n\nclass BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = BooleanClassificationAnnotation\n fields = (\"image\", \"grader\", \"created\", \"name\", \"value\")\n\n\nclass SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):\n annotation_set = serializers.PrimaryKeyRelatedField(\n queryset=PolygonAnnotationSet.objects.all()\n )\n\n class Meta:\n model = SinglePolygonAnnotation\n fields = (\"id\", \"value\", \"annotation_set\")\n\n\nclass PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):\n singlepolygonannotation_set = SinglePolygonAnnotationSerializer(\n many=True, read_only=True\n )\n\n class Meta:\n model = PolygonAnnotationSet\n fields = (\n \"id\",\n \"image\",\n \"grader\",\n \"created\",\n \"name\",\n \"singlepolygonannotation_set\",\n )\n\n\nclass LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = LandmarkAnnotationSet\n fields = (\"grader\", \"created\")\n\n\nclass SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):\n class Meta:\n model = SingleLandmarkAnnotation\n fields = (\"image\", \"annotation_set\", \"landmarks\")\n", "path": "app/grandchallenge/annotations/serializers.py"}]} | 1,436 | 215 |
gh_patches_debug_21085 | rasdani/github-patches | git_diff | google__flax-541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PPO example does not terminate properly
### Configuration
Running the PPO example for a short number of frames in order to reproduce as fast as possible on a cloud VM with a V100 GPU. Config python3.7, flax 0.2.2, jax 0.2.1, jaxlib 0.1.55 .
Command run:
`python ppo_main.py --config.game=Qbert --config.total_frames=4000`
### Problem you have encountered:
Program does not exit. One can `print('Done')` after `ppo_lib.train` in `ppo_main` but there is an open thread and program can't exit (even after adding `raise SystemExit`).
### Extra comments
Added extra line in `main` ` tf.config.experimental.set_visible_devices([],'GPU')` in order for the program to run properly with `tensorflow-gpu`, this is common in other `flax/examples`.
</issue>
<code>
[start of examples/ppo/ppo_main.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from absl import flags
17 from absl import app
18 import jax
19 import jax.random
20 from ml_collections import config_flags
21
22 import ppo_lib
23 import models
24 import env_utils
25
26 FLAGS = flags.FLAGS
27
28 flags.DEFINE_string(
29 'logdir', default='/tmp/ppo_training',
30 help=('Directory to save checkpoints and logging info.'))
31
32 config_flags.DEFINE_config_file(
33 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),
34 'File path to the default configuration file.')
35
36 def main(argv):
37 config = FLAGS.config
38 game = config.game + 'NoFrameskip-v4'
39 num_actions = env_utils.get_num_actions(game)
40 print(f'Playing {game} with {num_actions} actions')
41 key = jax.random.PRNGKey(0)
42 key, subkey = jax.random.split(key)
43 model = models.create_model(subkey, num_outputs=num_actions)
44 optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)
45 del model
46 optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)
47
48 if __name__ == '__main__':
49 app.run(main)
50
[end of examples/ppo/ppo_main.py]
[start of examples/ppo/agent.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Agent utilities, incl. choosing the move and running in separate process."""
16
17 import multiprocessing
18 import collections
19 import jax
20 import numpy as onp
21
22 import env_utils
23
24 @jax.jit
25 def policy_action(model, state):
26 """Forward pass of the network."""
27 out = model(state)
28 return out
29
30
31 ExpTuple = collections.namedtuple(
32 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])
33
34
35 class RemoteSimulator:
36 """Wrap functionality for an agent emulating Atari in a separate process.
37
38 An object of this class is created for every agent.
39 """
40
41 def __init__(self, game: str):
42 """Start the remote process and create Pipe() to communicate with it."""
43 parent_conn, child_conn = multiprocessing.Pipe()
44 self.proc = multiprocessing.Process(
45 target=rcv_action_send_exp, args=(child_conn, game))
46 self.conn = parent_conn
47 self.proc.start()
48
49
50 def rcv_action_send_exp(conn, game: str):
51 """Run the remote agents.
52
53 Receive action from the main learner, perform one step of simulation and
54 send back collected experience.
55 """
56 env = env_utils.create_env(game, clip_rewards=True)
57 while True:
58 obs = env.reset()
59 done = False
60 # Observations fetched from Atari env need additional batch dimension.
61 state = obs[None, ...]
62 while not done:
63 conn.send(state)
64 action = conn.recv()
65 obs, reward, done, _ = env.step(action)
66 next_state = obs[None, ...] if not done else None
67 experience = (state, action, reward, done)
68 conn.send(experience)
69 if done:
70 break
71 state = next_state
72
[end of examples/ppo/agent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/ppo/agent.py b/examples/ppo/agent.py
--- a/examples/ppo/agent.py
+++ b/examples/ppo/agent.py
@@ -43,6 +43,7 @@
parent_conn, child_conn = multiprocessing.Pipe()
self.proc = multiprocessing.Process(
target=rcv_action_send_exp, args=(child_conn, game))
+ self.proc.daemon = True
self.conn = parent_conn
self.proc.start()
diff --git a/examples/ppo/ppo_main.py b/examples/ppo/ppo_main.py
--- a/examples/ppo/ppo_main.py
+++ b/examples/ppo/ppo_main.py
@@ -19,6 +19,8 @@
import jax.random
from ml_collections import config_flags
+import tensorflow as tf
+
import ppo_lib
import models
import env_utils
@@ -34,6 +36,9 @@
'File path to the default configuration file.')
def main(argv):
+ # Make sure tf does not allocate gpu memory.
+ tf.config.experimental.set_visible_devices([], 'GPU')
+
config = FLAGS.config
game = config.game + 'NoFrameskip-v4'
num_actions = env_utils.get_num_actions(game)
| {"golden_diff": "diff --git a/examples/ppo/agent.py b/examples/ppo/agent.py\n--- a/examples/ppo/agent.py\n+++ b/examples/ppo/agent.py\n@@ -43,6 +43,7 @@\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n+ self.proc.daemon = True\n self.conn = parent_conn\n self.proc.start()\n \ndiff --git a/examples/ppo/ppo_main.py b/examples/ppo/ppo_main.py\n--- a/examples/ppo/ppo_main.py\n+++ b/examples/ppo/ppo_main.py\n@@ -19,6 +19,8 @@\n import jax.random\n from ml_collections import config_flags\n \n+import tensorflow as tf\n+\n import ppo_lib\n import models\n import env_utils\n@@ -34,6 +36,9 @@\n 'File path to the default configuration file.')\n \n def main(argv):\n+ # Make sure tf does not allocate gpu memory.\n+ tf.config.experimental.set_visible_devices([], 'GPU')\n+\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n", "issue": "PPO example does not terminate properly\n### Configuration\r\n\r\nRunning the PPO example for a short number of frames in order to reproduce as fast as possible on a cloud VM with a V100 GPU. Config python3.7, flax 0.2.2, jax 0.2.1, jaxlib 0.1.55 .\r\n\r\nCommand run:\r\n`python ppo_main.py --config.game=Qbert --config.total_frames=4000`\r\n\r\n### Problem you have encountered:\r\n\r\nProgram does not exit. One can `print('Done')` after `ppo_lib.train` in `ppo_main` but there is an open thread and program can't exit (even after adding `raise SystemExit`).\r\n\r\n### Extra comments\r\n\r\nAdded extra line in `main` ` tf.config.experimental.set_visible_devices([],'GPU')` in order for the program to run properly with `tensorflow-gpu`, this is common in other `flax/examples`. \n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom absl import flags\nfrom absl import app\nimport jax\nimport jax.random\nfrom ml_collections import config_flags\n\nimport ppo_lib\nimport models\nimport env_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'logdir', default='/tmp/ppo_training',\n help=('Directory to save checkpoints and logging info.'))\n\nconfig_flags.DEFINE_config_file(\n 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),\n 'File path to the default configuration file.')\n\ndef main(argv):\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n print(f'Playing {game} with {num_actions} actions')\n key = jax.random.PRNGKey(0)\n key, subkey = jax.random.split(key)\n model = models.create_model(subkey, num_outputs=num_actions)\n optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)\n del model\n optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "examples/ppo/ppo_main.py"}, {"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Agent utilities, incl. choosing the move and running in separate process.\"\"\"\n\nimport multiprocessing\nimport collections\nimport jax\nimport numpy as onp\n\nimport env_utils\n\[email protected]\ndef policy_action(model, state):\n \"\"\"Forward pass of the network.\"\"\"\n out = model(state)\n return out\n\n\nExpTuple = collections.namedtuple(\n 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])\n\n\nclass RemoteSimulator:\n \"\"\"Wrap functionality for an agent emulating Atari in a separate process.\n\n An object of this class is created for every agent.\n \"\"\"\n\n def __init__(self, game: str):\n \"\"\"Start the remote process and create Pipe() to communicate with it.\"\"\"\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n self.conn = parent_conn\n self.proc.start()\n\n\ndef rcv_action_send_exp(conn, game: str):\n \"\"\"Run the remote agents.\n\n Receive action from the main learner, perform one step of simulation and\n send back collected experience.\n \"\"\"\n env = env_utils.create_env(game, clip_rewards=True)\n while True:\n obs = env.reset()\n done = False\n # Observations fetched from Atari env need additional batch dimension.\n state = obs[None, ...]\n while not done:\n conn.send(state)\n action = conn.recv()\n obs, reward, done, _ = env.step(action)\n next_state = obs[None, ...] if not done else None\n experience = (state, action, reward, done)\n conn.send(experience)\n if done:\n break\n state = next_state\n", "path": "examples/ppo/agent.py"}]} | 1,886 | 270 |
gh_patches_debug_30444 | rasdani/github-patches | git_diff | dask__dask-618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Base.to_graphviz
Add function to return `graphviz` instance created from dask graph for below reasons:
- When using IPython, `.visualize` outputs unnecessary image file
- Sometimes we want to modify graphviz instance directly
</issue>
<code>
[start of dask/dot.py]
1 from __future__ import absolute_import, division, print_function
2
3 import re
4 from subprocess import check_call, CalledProcessError
5
6 from graphviz import Digraph
7
8 from .core import istask, get_dependencies, ishashable
9
10
11 def task_label(task):
12 """Label for a task on a dot graph.
13
14 Examples
15 --------
16 >>> from operator import add
17 >>> task_label((add, 1, 2))
18 'add'
19 >>> task_label((add, (add, 1, 2), 3))
20 'add(...)'
21 """
22 func = task[0]
23 if hasattr(func, 'funcs'):
24 if len(func.funcs) > 1:
25 return '{0}(...)'.format(funcname(func.funcs[0]))
26 else:
27 head = funcname(func.funcs[0])
28 else:
29 head = funcname(task[0])
30 if any(has_sub_tasks(i) for i in task[1:]):
31 return '{0}(...)'.format(head)
32 else:
33 return head
34
35
36 def has_sub_tasks(task):
37 """Returns True if the task has sub tasks"""
38 if istask(task):
39 return True
40 elif isinstance(task, list):
41 return any(has_sub_tasks(i) for i in task)
42 else:
43 return False
44
45
46 def funcname(func):
47 """Get the name of a function."""
48 while hasattr(func, 'func'):
49 func = func.func
50 return func.__name__
51
52
53 def name(x):
54 try:
55 return str(hash(x))
56 except TypeError:
57 return str(hash(str(x)))
58
59
60 _HASHPAT = re.compile('([0-9a-z]{32})')
61
62
63 def label(x, cache=None):
64 """
65
66 >>> label('x')
67 'x'
68
69 >>> label(('x', 1))
70 "('x', 1)"
71
72 >>> from hashlib import md5
73 >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
74 >>> x
75 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
76
77 >>> label(x)
78 'x-#-hello'
79 """
80 s = str(x)
81 m = re.search(_HASHPAT, s)
82 if m is not None:
83 for h in m.groups():
84 if cache is not None:
85 n = cache.get(h, len(cache))
86 label = '#{0}'.format(n)
87 # cache will be overwritten destructively
88 cache[h] = n
89 else:
90 label = '#'
91 s = s.replace(h, label)
92 return s
93
94
95 def to_graphviz(dsk, data_attributes=None, function_attributes=None):
96 if data_attributes is None:
97 data_attributes = {}
98 if function_attributes is None:
99 function_attributes = {}
100
101 g = Digraph(graph_attr={'rankdir': 'BT'})
102
103 seen = set()
104 cache = {}
105
106 for k, v in dsk.items():
107 k_name = name(k)
108 if k_name not in seen:
109 seen.add(k_name)
110 g.node(k_name, label=label(k, cache=cache), shape='box',
111 **data_attributes.get(k, {}))
112
113 if istask(v):
114 func_name = name((k, 'function'))
115 if func_name not in seen:
116 seen.add(func_name)
117 g.node(func_name, label=task_label(v), shape='circle',
118 **function_attributes.get(k, {}))
119 g.edge(func_name, k_name)
120
121 for dep in get_dependencies(dsk, k):
122 dep_name = name(dep)
123 if dep_name not in seen:
124 seen.add(dep_name)
125 g.node(dep_name, label=label(dep, cache=cache), shape='box',
126 **data_attributes.get(dep, {}))
127 g.edge(dep_name, func_name)
128 elif ishashable(v) and v in dsk:
129 g.edge(name(v), k_name)
130 return g
131
132
133 def dot_graph(dsk, filename='mydask', **kwargs):
134 g = to_graphviz(dsk, **kwargs)
135 g.save(filename + '.dot')
136
137 try:
138 check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)
139 check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)
140 except CalledProcessError:
141 raise RuntimeError(
142 "Please install The `dot` utility from graphviz:\n"
143 " Debian: sudo apt-get install graphviz\n"
144 " Mac OSX: brew install graphviz\n"
145 " Windows: http://www.graphviz.org/Download..php") # pragma: no cover
146 try:
147 from IPython.display import Image
148 return Image(filename + '.png')
149 except ImportError:
150 pass
151
[end of dask/dot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dask/dot.py b/dask/dot.py
--- a/dask/dot.py
+++ b/dask/dot.py
@@ -6,6 +6,7 @@
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
+from .compatibility import BytesIO
def task_label(task):
@@ -132,19 +133,35 @@
def dot_graph(dsk, filename='mydask', **kwargs):
g = to_graphviz(dsk, **kwargs)
- g.save(filename + '.dot')
- try:
- check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)
- check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)
- except CalledProcessError:
- raise RuntimeError(
- "Please install The `dot` utility from graphviz:\n"
- " Debian: sudo apt-get install graphviz\n"
- " Mac OSX: brew install graphviz\n"
- " Windows: http://www.graphviz.org/Download..php") # pragma: no cover
- try:
- from IPython.display import Image
- return Image(filename + '.png')
- except ImportError:
- pass
+ if filename is not None:
+ g.save(filename + '.dot')
+
+ try:
+ check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),
+ shell=True)
+ check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),
+ shell=True)
+
+ except CalledProcessError:
+ msg = ("Please install The `dot` utility from graphviz:\n"
+ " Debian: sudo apt-get install graphviz\n"
+ " Mac OSX: brew install graphviz\n"
+ " Windows: http://www.graphviz.org/Download..php")
+ raise RuntimeError(msg) # pragma: no cover
+
+ try:
+ from IPython.display import Image
+ return Image(filename + '.png')
+ except ImportError:
+ pass
+
+ else:
+ try:
+ from IPython.display import Image
+ s = BytesIO()
+ s.write(g.pipe(format='png'))
+ s.seek(0)
+ return Image(s.read())
+ except ImportError:
+ pass
| {"golden_diff": "diff --git a/dask/dot.py b/dask/dot.py\n--- a/dask/dot.py\n+++ b/dask/dot.py\n@@ -6,6 +6,7 @@\n from graphviz import Digraph\n \n from .core import istask, get_dependencies, ishashable\n+from .compatibility import BytesIO\n \n \n def task_label(task):\n@@ -132,19 +133,35 @@\n \n def dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n- g.save(filename + '.dot')\n \n- try:\n- check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n- check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n- except CalledProcessError:\n- raise RuntimeError(\n- \"Please install The `dot` utility from graphviz:\\n\"\n- \" Debian: sudo apt-get install graphviz\\n\"\n- \" Mac OSX: brew install graphviz\\n\"\n- \" Windows: http://www.graphviz.org/Download..php\") # pragma: no cover\n- try:\n- from IPython.display import Image\n- return Image(filename + '.png')\n- except ImportError:\n- pass\n+ if filename is not None:\n+ g.save(filename + '.dot')\n+\n+ try:\n+ check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),\n+ shell=True)\n+ check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),\n+ shell=True)\n+\n+ except CalledProcessError:\n+ msg = (\"Please install The `dot` utility from graphviz:\\n\"\n+ \" Debian: sudo apt-get install graphviz\\n\"\n+ \" Mac OSX: brew install graphviz\\n\"\n+ \" Windows: http://www.graphviz.org/Download..php\")\n+ raise RuntimeError(msg) # pragma: no cover\n+\n+ try:\n+ from IPython.display import Image\n+ return Image(filename + '.png')\n+ except ImportError:\n+ pass\n+\n+ else:\n+ try:\n+ from IPython.display import Image\n+ s = BytesIO()\n+ s.write(g.pipe(format='png'))\n+ s.seek(0)\n+ return Image(s.read())\n+ except ImportError:\n+ pass\n", "issue": "Add Base.to_graphviz\nAdd function to return `graphviz` instance created from dask graph for below reasons:\n- When using IPython, `.visualize` outputs unnecessary image file\n- Sometimes we want to modify graphviz instance directly\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom subprocess import check_call, CalledProcessError\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n while hasattr(func, 'func'):\n func = func.func\n return func.__name__\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n m = re.search(_HASHPAT, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n g = Digraph(graph_attr={'rankdir': 'BT'})\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\ndef dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n g.save(filename + '.dot')\n\n try:\n check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n except CalledProcessError:\n raise RuntimeError(\n \"Please install The `dot` utility from graphviz:\\n\"\n \" Debian: sudo apt-get install graphviz\\n\"\n \" Mac OSX: brew install graphviz\\n\"\n \" Windows: http://www.graphviz.org/Download..php\") # pragma: no cover\n try:\n from IPython.display import Image\n return Image(filename + '.png')\n except ImportError:\n pass\n", "path": "dask/dot.py"}]} | 1,991 | 552 |
gh_patches_debug_18332 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[INF] Make requirements.txt smaller
Follow-up from #257
The idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.
so we can install the package per feature as needed, such as with extra biology. It goes `pip install "pyjanitor[biology]"`
The example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
[INF] Make requirements.txt smaller
Follow-up from #257
The idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.
so we can install the package per feature as needed, such as with extra biology. It goes `pip install "pyjanitor[biology]"`
The example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
</issue>
<code>
[start of setup.py]
1 import re
2 from pathlib import Path
3
4 from setuptools import setup
5
6
7 def requirements():
8 with open("requirements.txt", "r+") as f:
9 return f.read()
10
11
12 def generate_long_description() -> str:
13 """
14 Extra chunks from README for PyPI description.
15
16 Target chunks must be contained within `.. pypi-doc` pair comments,
17 so there must be an even number of comments in README.
18
19 :returns: Extracted description from README
20
21 """
22 # Read the contents of README file
23 this_directory = Path(__file__).parent
24 with open(this_directory / "README.rst", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Find pypi-doc comments in README
28 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
29 if len(indices) % 2 != 0:
30 raise Exception("Odd number of `.. pypi-doc` comments in README")
31
32 # Loop through pairs of comments and save text between pairs
33 long_description = ""
34 for i in range(0, len(indices), 2):
35 start_index = indices[i] + 11
36 end_index = indices[i + 1]
37 long_description += readme[start_index:end_index]
38 return long_description
39
40
41 setup(
42 name="pyjanitor",
43 version="0.18.2",
44 description="Tools for cleaning pandas DataFrames",
45 author="Eric J. Ma",
46 author_email="[email protected]",
47 url="https://github.com/ericmjl/pyjanitor",
48 packages=["janitor"],
49 install_requires=requirements(),
50 python_requires=">=3.6",
51 long_description=generate_long_description(),
52 long_description_content_type="text/x-rst",
53 )
54
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,6 +38,12 @@
return long_description
+extra_spark = ["pyspark"]
+extra_biology = ["biopython"]
+extra_chemistry = ["rdkit"]
+extra_engineering = ["unyt"]
+extra_all = extra_biology + extra_engineering + extra_spark
+
setup(
name="pyjanitor",
version="0.18.2",
@@ -47,6 +53,14 @@
url="https://github.com/ericmjl/pyjanitor",
packages=["janitor"],
install_requires=requirements(),
+ extras_require={
+ "all": extra_all,
+ "biology": extra_biology,
+ # "chemistry": extra_chemistry, should be inserted once rdkit
+ # fixes https://github.com/rdkit/rdkit/issues/1812
+ "engineering": extra_engineering,
+ "spark": extra_spark,
+ },
python_requires=">=3.6",
long_description=generate_long_description(),
long_description_content_type="text/x-rst",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,6 +38,12 @@\n return long_description\n \n \n+extra_spark = [\"pyspark\"]\n+extra_biology = [\"biopython\"]\n+extra_chemistry = [\"rdkit\"]\n+extra_engineering = [\"unyt\"]\n+extra_all = extra_biology + extra_engineering + extra_spark\n+\n setup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n@@ -47,6 +53,14 @@\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n+ extras_require={\n+ \"all\": extra_all,\n+ \"biology\": extra_biology,\n+ # \"chemistry\": extra_chemistry, should be inserted once rdkit\n+ # fixes https://github.com/rdkit/rdkit/issues/1812\n+ \"engineering\": extra_engineering,\n+ \"spark\": extra_spark,\n+ },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n", "issue": "[INF] Make requirements.txt smaller\nFollow-up from #257 \r\n\r\nThe idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.\r\n\r\nso we can install the package per feature as needed, such as with extra biology. It goes `pip install \"pyjanitor[biology]\"`\r\n\r\nThe example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies\n[INF] Make requirements.txt smaller\nFollow-up from #257 \r\n\r\nThe idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.\r\n\r\nso we can install the package per feature as needed, such as with extra biology. It goes `pip install \"pyjanitor[biology]\"`\r\n\r\nThe example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]} | 1,239 | 264 |
gh_patches_debug_36545 | rasdani/github-patches | git_diff | translate__pootle-6680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Try simpler language code as fallback before settings.LANGUAGE_CODE
In https://github.com/translate/pootle/blob/10913224/pootle/i18n/override.py#L87-L101 if the language code `it-IT` (for example) is tried and eventually falls back to `settings.LANGUAGE_CODE`, but it makes sense to first try `it` (simpler version of `it-IT`) before falling back to `settings.LANGUAGE_CODE`.
</issue>
<code>
[start of pootle/i18n/override.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 """Overrides and support functions for arbitrary locale support."""
10
11 import os
12
13 from translate.lang import data
14
15 from django.utils import translation
16 from django.utils.translation import LANGUAGE_SESSION_KEY, trans_real
17
18 from pootle.i18n import gettext
19
20
21 def find_languages(locale_path):
22 """Generate supported languages list from the :param:`locale_path`
23 directory.
24 """
25 dirs = os.listdir(locale_path)
26 langs = []
27 for lang in dirs:
28 if (data.langcode_re.match(lang) and
29 os.path.isdir(os.path.join(locale_path, lang))):
30 langs.append((trans_real.to_language(lang),
31 data.languages.get(lang, (lang,))[0]))
32 return langs
33
34
35 def supported_langs():
36 """Returns a list of supported locales."""
37 from django.conf import settings
38 return settings.LANGUAGES
39
40
41 def get_lang_from_session(request, supported):
42 if hasattr(request, 'session'):
43 lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
44 if lang_code and lang_code in supported:
45 return lang_code
46
47 return None
48
49
50 def get_lang_from_cookie(request, supported):
51 """See if the user's browser sent a cookie with a preferred language."""
52 from django.conf import settings
53 lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
54
55 if lang_code and lang_code in supported:
56 return lang_code
57
58 return None
59
60
61 def get_lang_from_http_header(request, supported):
62 """If the user's browser sends a list of preferred languages in the
63 HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through
64 the list, and for each entry, we check whether we have a matching
65 pootle translation project. If so, we return it.
66
67 If nothing is found, return None.
68 """
69 accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
70 for accept_lang, __ in trans_real.parse_accept_lang_header(accept):
71 if accept_lang == '*':
72 return None
73
74 normalized = data.normalize_code(data.simplify_to_common(accept_lang))
75 if normalized in ['en-us', 'en']:
76 return None
77 if normalized in supported:
78 return normalized
79
80 # FIXME: horribly slow way of dealing with languages with @ in them
81 for lang in supported.keys():
82 if normalized == data.normalize_code(lang):
83 return lang
84 return None
85
86
87 def get_language_from_request(request, check_path=False):
88 """Try to get the user's preferred language by first checking the
89 cookie and then by checking the HTTP language headers.
90
91 If all fails, try fall back to default language.
92 """
93 supported = dict(supported_langs())
94 for lang_getter in (get_lang_from_session,
95 get_lang_from_cookie,
96 get_lang_from_http_header):
97 lang = lang_getter(request, supported)
98 if lang is not None:
99 return lang
100 from django.conf import settings
101 return settings.LANGUAGE_CODE
102
103
104 def get_language_bidi():
105 """Override for Django's get_language_bidi that's aware of more RTL
106 languages.
107 """
108 return gettext.language_dir(translation.get_language()) == 'rtl'
109
110
111 def hijack_translation():
112 """Sabotage Django's fascist linguistical regime."""
113 # Override functions that check if language is known to Django
114 translation.check_for_language = lambda lang_code: True
115 trans_real.check_for_language = lambda lang_code: True
116 translation.get_language_from_request = get_language_from_request
117
118 # Override django's inadequate bidi detection
119 translation.get_language_bidi = get_language_bidi
120
[end of pootle/i18n/override.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/i18n/override.py b/pootle/i18n/override.py
--- a/pootle/i18n/override.py
+++ b/pootle/i18n/override.py
@@ -38,24 +38,35 @@
return settings.LANGUAGES
-def get_lang_from_session(request, supported):
- if hasattr(request, 'session'):
- lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
- if lang_code and lang_code in supported:
- return lang_code
+def get_language_supported(lang_code, supported):
+ normalized = data.normalize_code(data.simplify_to_common(lang_code))
+ if normalized in supported:
+ return normalized
+
+ # FIXME: horribly slow way of dealing with languages with @ in them
+ for lang in supported.keys():
+ if normalized == data.normalize_code(lang):
+ return lang
return None
+def get_lang_from_session(request, supported):
+ if not hasattr(request, 'session'):
+ return None
+ lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
+ if not lang_code:
+ return None
+ return get_language_supported(lang_code, supported)
+
+
def get_lang_from_cookie(request, supported):
"""See if the user's browser sent a cookie with a preferred language."""
from django.conf import settings
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
-
- if lang_code and lang_code in supported:
- return lang_code
-
- return None
+ if not lang_code:
+ return None
+ return get_language_supported(lang_code, supported)
def get_lang_from_http_header(request, supported):
@@ -70,17 +81,9 @@
for accept_lang, __ in trans_real.parse_accept_lang_header(accept):
if accept_lang == '*':
return None
-
- normalized = data.normalize_code(data.simplify_to_common(accept_lang))
- if normalized in ['en-us', 'en']:
- return None
- if normalized in supported:
- return normalized
-
- # FIXME: horribly slow way of dealing with languages with @ in them
- for lang in supported.keys():
- if normalized == data.normalize_code(lang):
- return lang
+ supported_lang = get_language_supported(accept_lang, supported)
+ if supported_lang:
+ return supported_lang
return None
@@ -98,7 +101,9 @@
if lang is not None:
return lang
from django.conf import settings
- return settings.LANGUAGE_CODE
+ if settings.LANGUAGE_CODE in supported:
+ return settings.LANGUAGE_CODE
+ return 'en-us'
def get_language_bidi():
| {"golden_diff": "diff --git a/pootle/i18n/override.py b/pootle/i18n/override.py\n--- a/pootle/i18n/override.py\n+++ b/pootle/i18n/override.py\n@@ -38,24 +38,35 @@\n return settings.LANGUAGES\n \n \n-def get_lang_from_session(request, supported):\n- if hasattr(request, 'session'):\n- lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n- if lang_code and lang_code in supported:\n- return lang_code\n+def get_language_supported(lang_code, supported):\n+ normalized = data.normalize_code(data.simplify_to_common(lang_code))\n+ if normalized in supported:\n+ return normalized\n+\n+ # FIXME: horribly slow way of dealing with languages with @ in them\n+ for lang in supported.keys():\n+ if normalized == data.normalize_code(lang):\n+ return lang\n \n return None\n \n \n+def get_lang_from_session(request, supported):\n+ if not hasattr(request, 'session'):\n+ return None\n+ lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n+ if not lang_code:\n+ return None\n+ return get_language_supported(lang_code, supported)\n+\n+\n def get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n-\n- if lang_code and lang_code in supported:\n- return lang_code\n-\n- return None\n+ if not lang_code:\n+ return None\n+ return get_language_supported(lang_code, supported)\n \n \n def get_lang_from_http_header(request, supported):\n@@ -70,17 +81,9 @@\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n-\n- normalized = data.normalize_code(data.simplify_to_common(accept_lang))\n- if normalized in ['en-us', 'en']:\n- return None\n- if normalized in supported:\n- return normalized\n-\n- # FIXME: horribly slow way of dealing with languages with @ in them\n- for lang in supported.keys():\n- if normalized == data.normalize_code(lang):\n- return lang\n+ supported_lang = get_language_supported(accept_lang, supported)\n+ if supported_lang:\n+ return supported_lang\n return None\n \n \n@@ -98,7 +101,9 @@\n if lang is not None:\n return lang\n from django.conf import settings\n- return settings.LANGUAGE_CODE\n+ if settings.LANGUAGE_CODE in supported:\n+ return settings.LANGUAGE_CODE\n+ return 'en-us'\n \n \n def get_language_bidi():\n", "issue": "Try simpler language code as fallback before settings.LANGUAGE_CODE\nIn https://github.com/translate/pootle/blob/10913224/pootle/i18n/override.py#L87-L101 if the language code `it-IT` (for example) is tried and eventually falls back to `settings.LANGUAGE_CODE`, but it makes sense to first try `it` (simpler version of `it-IT`) before falling back to `settings.LANGUAGE_CODE`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Overrides and support functions for arbitrary locale support.\"\"\"\n\nimport os\n\nfrom translate.lang import data\n\nfrom django.utils import translation\nfrom django.utils.translation import LANGUAGE_SESSION_KEY, trans_real\n\nfrom pootle.i18n import gettext\n\n\ndef find_languages(locale_path):\n \"\"\"Generate supported languages list from the :param:`locale_path`\n directory.\n \"\"\"\n dirs = os.listdir(locale_path)\n langs = []\n for lang in dirs:\n if (data.langcode_re.match(lang) and\n os.path.isdir(os.path.join(locale_path, lang))):\n langs.append((trans_real.to_language(lang),\n data.languages.get(lang, (lang,))[0]))\n return langs\n\n\ndef supported_langs():\n \"\"\"Returns a list of supported locales.\"\"\"\n from django.conf import settings\n return settings.LANGUAGES\n\n\ndef get_lang_from_session(request, supported):\n if hasattr(request, 'session'):\n lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n if lang_code and lang_code in supported:\n return lang_code\n\n return None\n\n\ndef get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n\n if lang_code and lang_code in supported:\n return lang_code\n\n return None\n\n\ndef get_lang_from_http_header(request, supported):\n \"\"\"If the user's browser sends a list of preferred languages in the\n HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through\n the list, and for each entry, we check whether we have a matching\n pootle translation project. If so, we return it.\n\n If nothing is found, return None.\n \"\"\"\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n\n normalized = data.normalize_code(data.simplify_to_common(accept_lang))\n if normalized in ['en-us', 'en']:\n return None\n if normalized in supported:\n return normalized\n\n # FIXME: horribly slow way of dealing with languages with @ in them\n for lang in supported.keys():\n if normalized == data.normalize_code(lang):\n return lang\n return None\n\n\ndef get_language_from_request(request, check_path=False):\n \"\"\"Try to get the user's preferred language by first checking the\n cookie and then by checking the HTTP language headers.\n\n If all fails, try fall back to default language.\n \"\"\"\n supported = dict(supported_langs())\n for lang_getter in (get_lang_from_session,\n get_lang_from_cookie,\n get_lang_from_http_header):\n lang = lang_getter(request, supported)\n if lang is not None:\n return lang\n from django.conf import settings\n return settings.LANGUAGE_CODE\n\n\ndef get_language_bidi():\n \"\"\"Override for Django's get_language_bidi that's aware of more RTL\n languages.\n \"\"\"\n return gettext.language_dir(translation.get_language()) == 'rtl'\n\n\ndef hijack_translation():\n \"\"\"Sabotage Django's fascist linguistical regime.\"\"\"\n # Override functions that check if language is known to Django\n translation.check_for_language = lambda lang_code: True\n trans_real.check_for_language = lambda lang_code: True\n translation.get_language_from_request = get_language_from_request\n\n # Override django's inadequate bidi detection\n translation.get_language_bidi = get_language_bidi\n", "path": "pootle/i18n/override.py"}]} | 1,743 | 608 |
gh_patches_debug_8007 | rasdani/github-patches | git_diff | medtagger__MedTagger-401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error indicator when logging in or registering went wrong
## Current Behaviour
- currently, only error icon is displayed when something went wrong during logging in or registering new account
## Expected Behaviour
- an error message should be displayed next to the error icon, so that user knows what went wrong
</issue>
<code>
[start of backend/medtagger/api/auth/business.py]
1 """Module responsible for business logic in all Auth endpoint."""
2 from medtagger.api import InvalidArgumentsException
3 from medtagger.api.security import hash_password, verify_user_password, generate_auth_token
4 from medtagger.database.models import User
5 from medtagger.repositories import roles as RolesRepository, users as UsersRepository
6
7
8 def create_user(email: str, password: str, first_name: str, last_name: str) -> int:
9 """Create user with the given user information. Password is being hashed.
10
11 :param email: user email in string format
12 :param password: user password in string format
13 :param first_name: user first name in string format
14 :param last_name: user last name in string format
15
16 :return: id of the new user
17 """
18 user = UsersRepository.get_user_by_email(email)
19 if user:
20 raise InvalidArgumentsException('User with this email already exist')
21 password_hash = hash_password(password)
22 new_user = User(email, password_hash, first_name, last_name)
23 role = RolesRepository.get_role_with_name('volunteer')
24 if not role:
25 raise InvalidArgumentsException('Role does not exist.')
26 new_user.roles.append(role)
27 return UsersRepository.add_new_user(new_user)
28
29
30 def sign_in_user(email: str, password: str) -> str:
31 """Sign in user using given username and password.
32
33 :param email: user email in string format
34 :param password: user password in string format
35
36 :return: authentication token
37 """
38 user = UsersRepository.get_user_by_email(email)
39 if not user:
40 raise InvalidArgumentsException('User does not exist.')
41 if not verify_user_password(user, password):
42 raise InvalidArgumentsException('Password does not match.')
43 return generate_auth_token(user)
44
[end of backend/medtagger/api/auth/business.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py
--- a/backend/medtagger/api/auth/business.py
+++ b/backend/medtagger/api/auth/business.py
@@ -17,7 +17,7 @@
"""
user = UsersRepository.get_user_by_email(email)
if user:
- raise InvalidArgumentsException('User with this email already exist')
+ raise InvalidArgumentsException('User with this email already exists')
password_hash = hash_password(password)
new_user = User(email, password_hash, first_name, last_name)
role = RolesRepository.get_role_with_name('volunteer')
| {"golden_diff": "diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py\n--- a/backend/medtagger/api/auth/business.py\n+++ b/backend/medtagger/api/auth/business.py\n@@ -17,7 +17,7 @@\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n- raise InvalidArgumentsException('User with this email already exist')\n+ raise InvalidArgumentsException('User with this email already exists')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n", "issue": "Error indicator when logging in or registering went wrong\n## Current Behaviour\r\n - currently, only error icon is displayed when something went wrong during logging in or registering new account\r\n\r\n## Expected Behaviour \r\n - an error message should be displayed next to the error icon, so that user knows what went wrong\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.api.security import hash_password, verify_user_password, generate_auth_token\nfrom medtagger.database.models import User\nfrom medtagger.repositories import roles as RolesRepository, users as UsersRepository\n\n\ndef create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n \"\"\"Create user with the given user information. Password is being hashed.\n\n :param email: user email in string format\n :param password: user password in string format\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n\n :return: id of the new user\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n raise InvalidArgumentsException('User with this email already exist')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n return UsersRepository.add_new_user(new_user)\n\n\ndef sign_in_user(email: str, password: str) -> str:\n \"\"\"Sign in user using given username and password.\n\n :param email: user email in string format\n :param password: user password in string format\n\n :return: authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if not user:\n raise InvalidArgumentsException('User does not exist.')\n if not verify_user_password(user, password):\n raise InvalidArgumentsException('Password does not match.')\n return generate_auth_token(user)\n", "path": "backend/medtagger/api/auth/business.py"}]} | 1,064 | 143 |
gh_patches_debug_53690 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-2180 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Elasticdl client crashes with invalid args
```
$ elasticdl -v
Traceback (most recent call last):
File "/usr/local/bin/elasticdl", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/elasticdl_client/main.py", line 97, in main
args, _ = parser.parse_known_args()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py", line 1787, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py", line 2022, in _parse_known_args
', '.join(required_actions))
TypeError: sequence item 0: expected str instance, NoneType found
```
</issue>
<code>
[start of elasticdl_client/main.py]
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import argparse
15 import sys
16
17 from elasticdl_client.api import (
18 build_zoo,
19 evaluate,
20 init_zoo,
21 predict,
22 push_zoo,
23 train,
24 )
25 from elasticdl_client.common import args
26
27
28 def build_argument_parser():
29 parser = argparse.ArgumentParser()
30 subparsers = parser.add_subparsers()
31 subparsers.required = True
32
33 # Initialize the parser for the `elasticdl zoo` commands
34 zoo_parser = subparsers.add_parser(
35 "zoo",
36 help="Initialize | Build | Push a docker image for the model zoo.",
37 )
38 zoo_subparsers = zoo_parser.add_subparsers()
39 zoo_subparsers.required = True
40
41 # elasticdl zoo init
42 zoo_init_parser = zoo_subparsers.add_parser(
43 "init", help="Initialize the model zoo."
44 )
45 zoo_init_parser.set_defaults(func=init_zoo)
46 args.add_zoo_init_params(zoo_init_parser)
47
48 # elasticdl zoo build
49 zoo_build_parser = zoo_subparsers.add_parser(
50 "build", help="Build a docker image for the model zoo."
51 )
52 zoo_build_parser.set_defaults(func=build_zoo)
53 args.add_zoo_build_params(zoo_build_parser)
54
55 # elasticdl zoo push
56 zoo_push_parser = zoo_subparsers.add_parser(
57 "push",
58 help="Push the docker image to a remote registry for the distributed"
59 "ElasticDL job.",
60 )
61 zoo_push_parser.set_defaults(func=push_zoo)
62 args.add_zoo_push_params(zoo_push_parser)
63
64 # elasticdl train
65 train_parser = subparsers.add_parser(
66 "train", help="Submit a ElasticDL distributed training job"
67 )
68 train_parser.set_defaults(func=train)
69 args.add_common_params(train_parser)
70 args.add_train_params(train_parser)
71
72 # elasticdl evaluate
73 evaluate_parser = subparsers.add_parser(
74 "evaluate", help="Submit a ElasticDL distributed evaluation job"
75 )
76 evaluate_parser.set_defaults(func=evaluate)
77 args.add_common_params(evaluate_parser)
78 args.add_evaluate_params(evaluate_parser)
79
80 # elasticdl predict
81 predict_parser = subparsers.add_parser(
82 "predict", help="Submit a ElasticDL distributed prediction job"
83 )
84 predict_parser.set_defaults(func=predict)
85 args.add_common_params(predict_parser)
86 args.add_predict_params(predict_parser)
87
88 return parser
89
90
91 def main():
92 parser = build_argument_parser()
93 if len(sys.argv) == 1:
94 parser.print_help(sys.stderr)
95 sys.exit(1)
96
97 args, _ = parser.parse_known_args()
98 args.func(args)
99
100
101 if __name__ == "__main__":
102 main()
103
[end of elasticdl_client/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl_client/main.py b/elasticdl_client/main.py
--- a/elasticdl_client/main.py
+++ b/elasticdl_client/main.py
@@ -94,7 +94,12 @@
parser.print_help(sys.stderr)
sys.exit(1)
- args, _ = parser.parse_known_args()
+ try:
+ args, _ = parser.parse_known_args()
+ except TypeError:
+ parser.print_help(sys.stderr)
+ sys.exit(1)
+
args.func(args)
| {"golden_diff": "diff --git a/elasticdl_client/main.py b/elasticdl_client/main.py\n--- a/elasticdl_client/main.py\n+++ b/elasticdl_client/main.py\n@@ -94,7 +94,12 @@\n parser.print_help(sys.stderr)\n sys.exit(1)\n \n- args, _ = parser.parse_known_args()\n+ try:\n+ args, _ = parser.parse_known_args()\n+ except TypeError:\n+ parser.print_help(sys.stderr)\n+ sys.exit(1)\n+\n args.func(args)\n", "issue": "Elasticdl client crashes with invalid args\n```\r\n$ elasticdl -v\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/elasticdl\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.7/site-packages/elasticdl_client/main.py\", line 97, in main\r\n args, _ = parser.parse_known_args()\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py\", line 1787, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py\", line 2022, in _parse_known_args\r\n ', '.join(required_actions))\r\nTypeError: sequence item 0: expected str instance, NoneType found\r\n```\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\nfrom elasticdl_client.api import (\n build_zoo,\n evaluate,\n init_zoo,\n predict,\n push_zoo,\n train,\n)\nfrom elasticdl_client.common import args\n\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n subparsers.required = True\n\n # Initialize the parser for the `elasticdl zoo` commands\n zoo_parser = subparsers.add_parser(\n \"zoo\",\n help=\"Initialize | Build | Push a docker image for the model zoo.\",\n )\n zoo_subparsers = zoo_parser.add_subparsers()\n zoo_subparsers.required = True\n\n # elasticdl zoo init\n zoo_init_parser = zoo_subparsers.add_parser(\n \"init\", help=\"Initialize the model zoo.\"\n )\n zoo_init_parser.set_defaults(func=init_zoo)\n args.add_zoo_init_params(zoo_init_parser)\n\n # elasticdl zoo build\n zoo_build_parser = zoo_subparsers.add_parser(\n \"build\", help=\"Build a docker image for the model zoo.\"\n )\n zoo_build_parser.set_defaults(func=build_zoo)\n args.add_zoo_build_params(zoo_build_parser)\n\n # elasticdl zoo push\n zoo_push_parser = zoo_subparsers.add_parser(\n \"push\",\n help=\"Push the docker image to a remote registry for the distributed\"\n \"ElasticDL job.\",\n )\n zoo_push_parser.set_defaults(func=push_zoo)\n args.add_zoo_push_params(zoo_push_parser)\n\n # elasticdl train\n train_parser = subparsers.add_parser(\n \"train\", help=\"Submit a ElasticDL distributed training job\"\n )\n train_parser.set_defaults(func=train)\n args.add_common_params(train_parser)\n args.add_train_params(train_parser)\n\n # elasticdl evaluate\n evaluate_parser = subparsers.add_parser(\n \"evaluate\", help=\"Submit a ElasticDL distributed evaluation job\"\n )\n evaluate_parser.set_defaults(func=evaluate)\n args.add_common_params(evaluate_parser)\n args.add_evaluate_params(evaluate_parser)\n\n # elasticdl predict\n predict_parser = subparsers.add_parser(\n \"predict\", help=\"Submit a ElasticDL distributed prediction job\"\n )\n predict_parser.set_defaults(func=predict)\n args.add_common_params(predict_parser)\n args.add_predict_params(predict_parser)\n\n return parser\n\n\ndef main():\n parser = build_argument_parser()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args, _ = parser.parse_known_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl_client/main.py"}]} | 1,663 | 116 |
gh_patches_debug_7059 | rasdani/github-patches | git_diff | modin-project__modin-6283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor ci.yml to reduce the amount of copy-pasting
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r", encoding="utf-8") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
8 ray_deps = ["ray[default]>=1.13.0", "pyarrow"]
9 unidist_deps = ["unidist[mpi]>=0.2.1"]
10 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
11 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
12 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
13 all_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps
14
15 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
16 # This file provides the "import pandas before Ray init" feature if specific
17 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
18 cmdclass = versioneer.get_cmdclass()
19 extra_files = ["modin-autoimport-pandas.pth"]
20
21
22 class AddPthFileBuild(cmdclass["build_py"]):
23 def _get_data_files(self):
24 return (super()._get_data_files() or []) + [
25 (".", ".", self.build_lib, extra_files)
26 ]
27
28
29 class AddPthFileSDist(cmdclass["sdist"]):
30 def make_distribution(self):
31 self.filelist.extend(extra_files)
32 return super().make_distribution()
33
34
35 cmdclass["build_py"] = AddPthFileBuild
36 cmdclass["sdist"] = AddPthFileSDist
37
38 setup(
39 name="modin",
40 version=versioneer.get_version(),
41 cmdclass=cmdclass,
42 description="Modin: Make your pandas code run faster by changing one line of code.",
43 packages=find_packages(exclude=["scripts", "scripts.*"]),
44 include_package_data=True,
45 license="Apache 2",
46 url="https://github.com/modin-project/modin",
47 long_description=long_description,
48 long_description_content_type="text/markdown",
49 install_requires=[
50 "pandas>=2,<2.1",
51 "packaging",
52 "numpy>=1.18.5",
53 "fsspec",
54 "psutil",
55 ],
56 extras_require={
57 # can be installed by pip install modin[dask]
58 "dask": dask_deps,
59 "ray": ray_deps,
60 "unidist": unidist_deps,
61 "remote": remote_deps,
62 "spreadsheet": spreadsheet_deps,
63 "sql": sql_deps,
64 "all": all_deps,
65 },
66 python_requires=">=3.8",
67 )
68
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,8 @@
long_description = fh.read()
dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
-ray_deps = ["ray[default]>=1.13.0", "pyarrow"]
+# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
+ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
unidist_deps = ["unidist[mpi]>=0.2.1"]
remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,8 @@\n long_description = fh.read()\n \n dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n-ray_deps = [\"ray[default]>=1.13.0\", \"pyarrow\"]\n+# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n+ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\n unidist_deps = [\"unidist[mpi]>=0.2.1\"]\n remote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\n spreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\n", "issue": "Refactor ci.yml to reduce the amount of copy-pasting\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\nray_deps = [\"ray[default]>=1.13.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]} | 1,279 | 196 |
gh_patches_debug_1143 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3132 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When logged in landing page should be "myRSR"
</issue>
<code>
[start of akvo/rsr/views/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from django.core.urlresolvers import reverse
10 from django.http import HttpResponseRedirect
11
12
13 def index(request):
14 """."""
15 return HttpResponseRedirect(reverse('project-directory', args=[]))
16
[end of akvo/rsr/views/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py
--- a/akvo/rsr/views/__init__.py
+++ b/akvo/rsr/views/__init__.py
@@ -11,5 +11,7 @@
def index(request):
- """."""
- return HttpResponseRedirect(reverse('project-directory', args=[]))
+ """Redirect user to project directory or My RSR."""
+
+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'
+ return HttpResponseRedirect(reverse(redirect_url, args=[]))
| {"golden_diff": "diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py\n--- a/akvo/rsr/views/__init__.py\n+++ b/akvo/rsr/views/__init__.py\n@@ -11,5 +11,7 @@\n \n \n def index(request):\n- \"\"\".\"\"\"\n- return HttpResponseRedirect(reverse('project-directory', args=[]))\n+ \"\"\"Redirect user to project directory or My RSR.\"\"\"\n+\n+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'\n+ return HttpResponseRedirect(reverse(redirect_url, args=[]))\n", "issue": "When logged in landing page should be \"myRSR\"\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\".\"\"\"\n return HttpResponseRedirect(reverse('project-directory', args=[]))\n", "path": "akvo/rsr/views/__init__.py"}]} | 683 | 134 |
gh_patches_debug_4446 | rasdani/github-patches | git_diff | zenml-io__zenml-317 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Repeated Paragraph in the documentation for `core-concepts`
In the file `core-concepts.md`, the section on [`Pipeline`](https://github.com/zenml-io/zenml/blob/b94dff83f0e7c8ab29e99d6b42a0c906a3512b63/docs/book/introduction/core-concepts.md?plain=1#L27-L41) includes a repeated paragraph. The first paragraph in the the pipeline section is repeated in the 3rd paragraph of the same section.
```markdown
Within your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML
pipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored
within the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a
step. The standard pipelines within ZenML are designed to have easy interfaces to add pre-decided steps, with the
order also pre-decided. Other sorts of pipelines can be created as well from scratch.
Pipelines are designed as simple functions. They are created by using decorators appropriate to the specific use case
you have. The moment it is `run`, a pipeline is compiled and passed directly to the orchestrator, to be run in the
orchestrator environment.
Within your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML
pipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored
within the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a
step. The standard pipelines (like `TrainingPipeline`) within ZenML are designed to have easy interfaces to add
pre-decided steps, with the order also pre-decided. Other sorts of pipelines can be created as well from scratch.
```
</issue>
<code>
[start of src/zenml/materializers/built_in_materializer.py]
1 # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at:
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12 # or implied. See the License for the specific language governing
13 # permissions and limitations under the License.
14 import os
15 from typing import Any, Type
16
17 from zenml.artifacts import DataAnalysisArtifact, DataArtifact
18 from zenml.logger import get_logger
19 from zenml.materializers.base_materializer import BaseMaterializer
20 from zenml.utils import yaml_utils
21
22 logger = get_logger(__name__)
23 DEFAULT_FILENAME = "data.json"
24
25
26 class BuiltInMaterializer(BaseMaterializer):
27 """Read/Write JSON files."""
28
29 # TODO [LOW]: consider adding typing.Dict and typing.List
30 # since these are the 'correct' way to annotate these types.
31
32 ASSOCIATED_ARTIFACT_TYPES = [
33 DataArtifact,
34 DataAnalysisArtifact,
35 ]
36 ASSOCIATED_TYPES = [
37 int,
38 str,
39 bytes,
40 dict,
41 float,
42 list,
43 tuple,
44 bool,
45 ]
46
47 def handle_input(self, data_type: Type[Any]) -> Any:
48 """Reads basic primitive types from json."""
49 super().handle_input(data_type)
50 filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)
51 contents = yaml_utils.read_json(filepath)
52 if type(contents) != data_type:
53 # TODO [ENG-142]: Raise error or try to coerce
54 logger.debug(
55 f"Contents {contents} was type {type(contents)} but expected "
56 f"{data_type}"
57 )
58 return contents
59
60 def handle_return(self, data: Any) -> None:
61 """Handles basic built-in types and stores them as json"""
62 super().handle_return(data)
63 filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)
64 yaml_utils.write_json(filepath, data)
65
[end of src/zenml/materializers/built_in_materializer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/zenml/materializers/built_in_materializer.py b/src/zenml/materializers/built_in_materializer.py
--- a/src/zenml/materializers/built_in_materializer.py
+++ b/src/zenml/materializers/built_in_materializer.py
@@ -26,7 +26,7 @@
class BuiltInMaterializer(BaseMaterializer):
"""Read/Write JSON files."""
- # TODO [LOW]: consider adding typing.Dict and typing.List
+ # TODO [ENG-322]: consider adding typing.Dict and typing.List
# since these are the 'correct' way to annotate these types.
ASSOCIATED_ARTIFACT_TYPES = [
| {"golden_diff": "diff --git a/src/zenml/materializers/built_in_materializer.py b/src/zenml/materializers/built_in_materializer.py\n--- a/src/zenml/materializers/built_in_materializer.py\n+++ b/src/zenml/materializers/built_in_materializer.py\n@@ -26,7 +26,7 @@\n class BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n \n- # TODO [LOW]: consider adding typing.Dict and typing.List\n+ # TODO [ENG-322]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n \n ASSOCIATED_ARTIFACT_TYPES = [\n", "issue": "Repeated Paragraph in the documentation for `core-concepts`\nIn the file `core-concepts.md`, the section on [`Pipeline`](https://github.com/zenml-io/zenml/blob/b94dff83f0e7c8ab29e99d6b42a0c906a3512b63/docs/book/introduction/core-concepts.md?plain=1#L27-L41) includes a repeated paragraph. The first paragraph in the the pipeline section is repeated in the 3rd paragraph of the same section. \r\n\r\n```markdown\r\nWithin your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML \r\npipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored \r\nwithin the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a \r\nstep. The standard pipelines within ZenML are designed to have easy interfaces to add pre-decided steps, with the \r\norder also pre-decided. Other sorts of pipelines can be created as well from scratch.\r\n\r\nPipelines are designed as simple functions. They are created by using decorators appropriate to the specific use case \r\nyou have. The moment it is `run`, a pipeline is compiled and passed directly to the orchestrator, to be run in the \r\norchestrator environment.\r\n\r\nWithin your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML \r\npipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored \r\nwithin the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a \r\nstep. The standard pipelines (like `TrainingPipeline`) within ZenML are designed to have easy interfaces to add \r\npre-decided steps, with the order also pre-decided. Other sorts of pipelines can be created as well from scratch.\r\n```\n", "before_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport os\nfrom typing import Any, Type\n\nfrom zenml.artifacts import DataAnalysisArtifact, DataArtifact\nfrom zenml.logger import get_logger\nfrom zenml.materializers.base_materializer import BaseMaterializer\nfrom zenml.utils import yaml_utils\n\nlogger = get_logger(__name__)\nDEFAULT_FILENAME = \"data.json\"\n\n\nclass BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n\n # TODO [LOW]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n\n ASSOCIATED_ARTIFACT_TYPES = [\n DataArtifact,\n DataAnalysisArtifact,\n ]\n ASSOCIATED_TYPES = [\n int,\n str,\n bytes,\n dict,\n float,\n list,\n tuple,\n bool,\n ]\n\n def handle_input(self, data_type: Type[Any]) -> Any:\n \"\"\"Reads basic primitive types from json.\"\"\"\n super().handle_input(data_type)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n contents = yaml_utils.read_json(filepath)\n if type(contents) != data_type:\n # TODO [ENG-142]: Raise error or try to coerce\n logger.debug(\n f\"Contents {contents} was type {type(contents)} but expected \"\n f\"{data_type}\"\n )\n return contents\n\n def handle_return(self, data: Any) -> None:\n \"\"\"Handles basic built-in types and stores them as json\"\"\"\n super().handle_return(data)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n yaml_utils.write_json(filepath, data)\n", "path": "src/zenml/materializers/built_in_materializer.py"}]} | 1,568 | 151 |
gh_patches_debug_37926 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-5842 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Samsonite spider finds dealers, not official stores
This spider is wrong, e.g., the stores in Norway are not official Samsonite stores but dealers carrying the Samsonite brand
E.g., this is Chillout Travel Store, not a Samsonite store
https://www.alltheplaces.xyz/map/#15.79/59.920398/10.757257
The website does list official stores and dealers separately, so it should be possible to import the right type?
https://www.samsonite.no/samsonite-store/?search=dealer&city=&country=no&lat=59.920469259204786&lng=10.755597088646583&radius=20
_Originally posted by @eisams in https://github.com/alltheplaces/alltheplaces/issues/4385#issuecomment-1586255246_
</issue>
<code>
[start of locations/spiders/samsonite_eu.py]
1 import scrapy
2 import xmltodict
3
4 from locations.dict_parser import DictParser
5
6
7 class SamsoniteEuSpider(scrapy.Spider):
8 name = "samsonite_eu"
9 item_attributes = {
10 "brand": "Samsonite",
11 "brand_wikidata": "Q1203426",
12 }
13 allowed_domains = ["samsonite.com"]
14
15 def start_requests(self):
16 country_eu = [
17 "AL",
18 "CZ",
19 "DE",
20 "DK",
21 "CY",
22 "AT",
23 "BE",
24 "BG",
25 "CH",
26 "EE",
27 "EL",
28 "ES",
29 "FI",
30 "FR",
31 "HR",
32 "HU",
33 "IE",
34 "IS",
35 "IT",
36 "LT",
37 "LU",
38 "NL",
39 "NO",
40 "LV",
41 "ME",
42 "MT",
43 "MK",
44 "LI",
45 "PL",
46 "SI",
47 "SK",
48 "TR",
49 "UK",
50 "RS",
51 "SE",
52 "PT",
53 "RO",
54 ]
55 template = "https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000"
56 for country in country_eu:
57 yield scrapy.Request(url=template.format(country), callback=self.parse)
58
59 def parse(self, response):
60 data = xmltodict.parse(response.text)
61 if data.get("dealers"):
62 stores = data.get("dealers", {}).get("dealer")
63 stores = stores if type(stores) == list else [stores]
64 for store in stores:
65 item = DictParser.parse(store)
66 item["ref"] = store.get("fld_Deal_Id")
67 item["street_address"] = store.get("fld_Deal_Address1")
68 item["city"] = store.get("fld_Deal_City1")
69 item["postcode"] = store.get("fld_Deal_Zip")
70 item["country"] = store.get("fld_Coun_Name")
71 item["phone"] = store.get("fld_Deal_Phone")
72 item["email"] = store.get("fld_Deal_Email")
73
74 yield item
75
[end of locations/spiders/samsonite_eu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/samsonite_eu.py b/locations/spiders/samsonite_eu.py
--- a/locations/spiders/samsonite_eu.py
+++ b/locations/spiders/samsonite_eu.py
@@ -1,15 +1,13 @@
import scrapy
import xmltodict
-from locations.dict_parser import DictParser
+from locations.items import Feature, add_social_media
class SamsoniteEuSpider(scrapy.Spider):
name = "samsonite_eu"
- item_attributes = {
- "brand": "Samsonite",
- "brand_wikidata": "Q1203426",
- }
+ CHIC_ACCENT = {"brand": "Chic Accent"}
+ SAMSONITE = {"brand": "Samsonite", "brand_wikidata": "Q1203426"}
allowed_domains = ["samsonite.com"]
def start_requests(self):
@@ -51,6 +49,7 @@
"SE",
"PT",
"RO",
+ "GB",
]
template = "https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000"
for country in country_eu:
@@ -62,13 +61,31 @@
stores = data.get("dealers", {}).get("dealer")
stores = stores if type(stores) == list else [stores]
for store in stores:
- item = DictParser.parse(store)
+ if store["fld_Deal_DeCl_ID"] != "9":
+ continue
+ item = Feature()
+ item["lat"] = store["Latitude"]
+ item["lon"] = store["Longitude"]
item["ref"] = store.get("fld_Deal_Id")
item["street_address"] = store.get("fld_Deal_Address1")
item["city"] = store.get("fld_Deal_City1")
item["postcode"] = store.get("fld_Deal_Zip")
item["country"] = store.get("fld_Coun_Name")
- item["phone"] = store.get("fld_Deal_Phone")
- item["email"] = store.get("fld_Deal_Email")
+ item["email"] = store.get("fld_Deal_Email") or ""
+ item["website"] = store["fld_Deal_DetailPageUrl"]
+
+ if "chicaccent.com" in item["email"]:
+ item.update(self.CHIC_ACCENT)
+ else:
+ item.update(self.SAMSONITE)
+
+ if phone := store.get("fld_Deal_Phone"):
+ phone = store["fld_Deal_Prefix"] + phone.lower()
+
+ if "whatsapp" in phone:
+ phone, whats_app = phone.split("whatsapp")
+ add_social_media(item, "WhatsApp", whats_app.strip(" :"))
+
+ item["phone"] = phone
yield item
| {"golden_diff": "diff --git a/locations/spiders/samsonite_eu.py b/locations/spiders/samsonite_eu.py\n--- a/locations/spiders/samsonite_eu.py\n+++ b/locations/spiders/samsonite_eu.py\n@@ -1,15 +1,13 @@\n import scrapy\n import xmltodict\n \n-from locations.dict_parser import DictParser\n+from locations.items import Feature, add_social_media\n \n \n class SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n- item_attributes = {\n- \"brand\": \"Samsonite\",\n- \"brand_wikidata\": \"Q1203426\",\n- }\n+ CHIC_ACCENT = {\"brand\": \"Chic Accent\"}\n+ SAMSONITE = {\"brand\": \"Samsonite\", \"brand_wikidata\": \"Q1203426\"}\n allowed_domains = [\"samsonite.com\"]\n \n def start_requests(self):\n@@ -51,6 +49,7 @@\n \"SE\",\n \"PT\",\n \"RO\",\n+ \"GB\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n@@ -62,13 +61,31 @@\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n- item = DictParser.parse(store)\n+ if store[\"fld_Deal_DeCl_ID\"] != \"9\":\n+ continue\n+ item = Feature()\n+ item[\"lat\"] = store[\"Latitude\"]\n+ item[\"lon\"] = store[\"Longitude\"]\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n- item[\"phone\"] = store.get(\"fld_Deal_Phone\")\n- item[\"email\"] = store.get(\"fld_Deal_Email\")\n+ item[\"email\"] = store.get(\"fld_Deal_Email\") or \"\"\n+ item[\"website\"] = store[\"fld_Deal_DetailPageUrl\"]\n+\n+ if \"chicaccent.com\" in item[\"email\"]:\n+ item.update(self.CHIC_ACCENT)\n+ else:\n+ item.update(self.SAMSONITE)\n+\n+ if phone := store.get(\"fld_Deal_Phone\"):\n+ phone = store[\"fld_Deal_Prefix\"] + phone.lower()\n+\n+ if \"whatsapp\" in phone:\n+ phone, whats_app = phone.split(\"whatsapp\")\n+ add_social_media(item, \"WhatsApp\", whats_app.strip(\" :\"))\n+\n+ item[\"phone\"] = phone\n \n yield item\n", "issue": "Samsonite spider finds dealers, not official stores\nThis spider is wrong, e.g., the stores in Norway are not official Samsonite stores but dealers carrying the Samsonite brand\r\n\r\nE.g., this is Chillout Travel Store, not a Samsonite store\r\nhttps://www.alltheplaces.xyz/map/#15.79/59.920398/10.757257\r\n\r\nThe website does list official stores and dealers separately, so it should be possible to import the right type?\r\nhttps://www.samsonite.no/samsonite-store/?search=dealer&city=&country=no&lat=59.920469259204786&lng=10.755597088646583&radius=20\r\n\r\n_Originally posted by @eisams in https://github.com/alltheplaces/alltheplaces/issues/4385#issuecomment-1586255246_\r\n \n", "before_files": [{"content": "import scrapy\nimport xmltodict\n\nfrom locations.dict_parser import DictParser\n\n\nclass SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n item_attributes = {\n \"brand\": \"Samsonite\",\n \"brand_wikidata\": \"Q1203426\",\n }\n allowed_domains = [\"samsonite.com\"]\n\n def start_requests(self):\n country_eu = [\n \"AL\",\n \"CZ\",\n \"DE\",\n \"DK\",\n \"CY\",\n \"AT\",\n \"BE\",\n \"BG\",\n \"CH\",\n \"EE\",\n \"EL\",\n \"ES\",\n \"FI\",\n \"FR\",\n \"HR\",\n \"HU\",\n \"IE\",\n \"IS\",\n \"IT\",\n \"LT\",\n \"LU\",\n \"NL\",\n \"NO\",\n \"LV\",\n \"ME\",\n \"MT\",\n \"MK\",\n \"LI\",\n \"PL\",\n \"SI\",\n \"SK\",\n \"TR\",\n \"UK\",\n \"RS\",\n \"SE\",\n \"PT\",\n \"RO\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n yield scrapy.Request(url=template.format(country), callback=self.parse)\n\n def parse(self, response):\n data = xmltodict.parse(response.text)\n if data.get(\"dealers\"):\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n item = DictParser.parse(store)\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n item[\"phone\"] = store.get(\"fld_Deal_Phone\")\n item[\"email\"] = store.get(\"fld_Deal_Email\")\n\n yield item\n", "path": "locations/spiders/samsonite_eu.py"}]} | 1,431 | 700 |
gh_patches_debug_25598 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider public_storage is broken
During the global build at 2021-08-04-14-42-45, spider **public_storage** failed with **834 features** and **1879 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/public_storage.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson))
</issue>
<code>
[start of locations/spiders/public_storage.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6 from locations.hours import OpeningHours
7
8
9 class PublicStorageSpider(scrapy.Spider):
10 name = "public_storage"
11 item_attributes = { 'brand': "Public Storage" }
12 allowed_domains = ["www.publicstorage.com"]
13 start_urls = (
14 'https://www.publicstorage.com/sitemap_plp.xml',
15 )
16
17 def parse(self, response):
18 response.selector.remove_namespaces()
19 city_urls = response.xpath('//url/loc/text()').extract()
20 for path in city_urls:
21 yield scrapy.Request(
22 path.strip(),
23 callback=self.parse_store,
24 )
25
26 def parse_hours(self, hours):
27 opening_hours = OpeningHours()
28
29 for hour in hours:
30 for day in hour['dayOfWeek']:
31 opening_hours.add_range(
32 day=day[:2],
33 open_time=hour["opens"],
34 close_time=hour["closes"],
35 )
36
37 return opening_hours.as_opening_hours()
38
39 def parse_store(self, response):
40 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
41 data = data['@graph'][0]
42
43 properties = {
44 "ref": data['@id'],
45 "opening_hours": self.parse_hours(data['openingHoursSpecification']),
46 "addr_full": data['address']['streetAddress'],
47 "city": data['address']['addressLocality'],
48 "state": data['address']['addressRegion'],
49 "postcode": data['address']['postalCode'],
50 "phone": data['telephone'],
51 "lat": data['geo']['latitude'],
52 "lon": data['geo']['longitude'],
53 }
54
55 yield GeojsonPointItem(**properties)
56
[end of locations/spiders/public_storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py
--- a/locations/spiders/public_storage.py
+++ b/locations/spiders/public_storage.py
@@ -20,9 +20,13 @@
for path in city_urls:
yield scrapy.Request(
path.strip(),
- callback=self.parse_store,
+ callback=self.load_store,
)
+ def load_store(self, response):
+ ldjson = response.xpath('//link[@type="application/ld+json"]/@href').get()
+ yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)
+
def parse_hours(self, hours):
opening_hours = OpeningHours()
@@ -37,11 +41,11 @@
return opening_hours.as_opening_hours()
def parse_store(self, response):
- data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
- data = data['@graph'][0]
+ data = response.json()['@graph'][0]
properties = {
"ref": data['@id'],
+ "website": data['url'],
"opening_hours": self.parse_hours(data['openingHoursSpecification']),
"addr_full": data['address']['streetAddress'],
"city": data['address']['addressLocality'],
| {"golden_diff": "diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py\n--- a/locations/spiders/public_storage.py\n+++ b/locations/spiders/public_storage.py\n@@ -20,9 +20,13 @@\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n- callback=self.parse_store,\n+ callback=self.load_store,\n )\n \n+ def load_store(self, response):\n+ ldjson = response.xpath('//link[@type=\"application/ld+json\"]/@href').get()\n+ yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)\n+\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n \n@@ -37,11 +41,11 @@\n return opening_hours.as_opening_hours()\n \n def parse_store(self, response):\n- data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n- data = data['@graph'][0]\n+ data = response.json()['@graph'][0]\n \n properties = {\n \"ref\": data['@id'],\n+ \"website\": data['url'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n", "issue": "Spider public_storage is broken\nDuring the global build at 2021-08-04-14-42-45, spider **public_storage** failed with **834 features** and **1879 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/public_storage.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n for day in hour['dayOfWeek']:\n opening_hours.add_range(\n day=day[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n data = data['@graph'][0]\n\n properties = {\n \"ref\": data['@id'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n \"state\": data['address']['addressRegion'],\n \"postcode\": data['address']['postalCode'],\n \"phone\": data['telephone'],\n \"lat\": data['geo']['latitude'],\n \"lon\": data['geo']['longitude'],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}]} | 1,204 | 293 |
gh_patches_debug_22879 | rasdani/github-patches | git_diff | techmatters__terraso-backend-1223 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add ability to change user password from Django admin
## Description
Add ability to change user password from Django admin.
</issue>
<code>
[start of terraso_backend/apps/core/admin.py]
1 # Copyright © 2021-2023 Technology Matters
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License as published
5 # by the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU Affero General Public License for more details.
12 #
13 # You should have received a copy of the GNU Affero General Public License
14 # along with this program. If not, see https://www.gnu.org/licenses/.
15
16 from django.contrib import admin
17
18 from .models import (
19 Group,
20 Landscape,
21 LandscapeDevelopmentStrategy,
22 LandscapeGroup,
23 SharedResource,
24 TaxonomyTerm,
25 User,
26 UserPreference,
27 )
28
29
30 @admin.register(Group)
31 class GroupAdmin(admin.ModelAdmin):
32 list_display = ("name", "slug", "website", "created_at")
33
34 def get_queryset(self, request):
35 qs = super().get_queryset(request)
36 return qs.exclude(associated_landscapes__is_default_landscape_group=True)
37
38
39 @admin.register(Landscape)
40 class LandscapeAdmin(admin.ModelAdmin):
41 list_display = ("name", "slug", "location", "website", "created_at")
42 raw_id_fields = ("membership_list",)
43
44
45 class LandscapeDefaultGroup(Group):
46 class Meta:
47 proxy = True
48
49
50 @admin.register(LandscapeGroup)
51 class LandscapeGroupAdmin(admin.ModelAdmin):
52 list_display = ("landscape", "group")
53
54
55 class UserPreferenceInline(admin.TabularInline):
56 model = UserPreference
57
58
59 @admin.register(User)
60 class UserAdmin(admin.ModelAdmin):
61 list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
62 inlines = [UserPreferenceInline]
63
64
65 @admin.register(TaxonomyTerm)
66 class TaxonomyTermAdmin(admin.ModelAdmin):
67 list_display = ("value_original", "type", "value_en", "value_es")
68
69
70 @admin.register(LandscapeDevelopmentStrategy)
71 class LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):
72 list_display = ("id", "landscape")
73
74
75 @admin.register(SharedResource)
76 class SharedResourceAdmin(admin.ModelAdmin):
77 list_display = ("id", "share_uuid", "share_access")
78
[end of terraso_backend/apps/core/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py
--- a/terraso_backend/apps/core/admin.py
+++ b/terraso_backend/apps/core/admin.py
@@ -14,6 +14,7 @@
# along with this program. If not, see https://www.gnu.org/licenses/.
from django.contrib import admin
+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from .models import (
Group,
@@ -57,9 +58,28 @@
@admin.register(User)
-class UserAdmin(admin.ModelAdmin):
+class UserAdmin(DjangoUserAdmin):
+ ordering = ("email",)
list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
+ search_fields = ("email", "first_name", "last_name")
inlines = [UserPreferenceInline]
+ fieldsets = (
+ (None, {"fields": ("email", "password")}),
+ ("Personal info", {"fields": ("first_name", "last_name")}),
+ (
+ "Permissions",
+ {
+ "fields": (
+ "is_active",
+ "is_staff",
+ "is_superuser",
+ "groups",
+ "user_permissions",
+ ),
+ },
+ ),
+ ("Important dates", {"fields": ("last_login", "date_joined")}),
+ )
@admin.register(TaxonomyTerm)
| {"golden_diff": "diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py\n--- a/terraso_backend/apps/core/admin.py\n+++ b/terraso_backend/apps/core/admin.py\n@@ -14,6 +14,7 @@\n # along with this program. If not, see https://www.gnu.org/licenses/.\n \n from django.contrib import admin\n+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\n \n from .models import (\n Group,\n@@ -57,9 +58,28 @@\n \n \n @admin.register(User)\n-class UserAdmin(admin.ModelAdmin):\n+class UserAdmin(DjangoUserAdmin):\n+ ordering = (\"email\",)\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n+ search_fields = (\"email\", \"first_name\", \"last_name\")\n inlines = [UserPreferenceInline]\n+ fieldsets = (\n+ (None, {\"fields\": (\"email\", \"password\")}),\n+ (\"Personal info\", {\"fields\": (\"first_name\", \"last_name\")}),\n+ (\n+ \"Permissions\",\n+ {\n+ \"fields\": (\n+ \"is_active\",\n+ \"is_staff\",\n+ \"is_superuser\",\n+ \"groups\",\n+ \"user_permissions\",\n+ ),\n+ },\n+ ),\n+ (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n+ )\n \n \n @admin.register(TaxonomyTerm)\n", "issue": "Add ability to change user password from Django admin\n## Description\r\nAdd ability to change user password from Django admin.\n", "before_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nfrom django.contrib import admin\n\nfrom .models import (\n Group,\n Landscape,\n LandscapeDevelopmentStrategy,\n LandscapeGroup,\n SharedResource,\n TaxonomyTerm,\n User,\n UserPreference,\n)\n\n\[email protected](Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"website\", \"created_at\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.exclude(associated_landscapes__is_default_landscape_group=True)\n\n\[email protected](Landscape)\nclass LandscapeAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"location\", \"website\", \"created_at\")\n raw_id_fields = (\"membership_list\",)\n\n\nclass LandscapeDefaultGroup(Group):\n class Meta:\n proxy = True\n\n\[email protected](LandscapeGroup)\nclass LandscapeGroupAdmin(admin.ModelAdmin):\n list_display = (\"landscape\", \"group\")\n\n\nclass UserPreferenceInline(admin.TabularInline):\n model = UserPreference\n\n\[email protected](User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n inlines = [UserPreferenceInline]\n\n\[email protected](TaxonomyTerm)\nclass TaxonomyTermAdmin(admin.ModelAdmin):\n list_display = (\"value_original\", \"type\", \"value_en\", \"value_es\")\n\n\[email protected](LandscapeDevelopmentStrategy)\nclass LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"landscape\")\n\n\[email protected](SharedResource)\nclass SharedResourceAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"share_uuid\", \"share_access\")\n", "path": "terraso_backend/apps/core/admin.py"}]} | 1,222 | 322 |
gh_patches_debug_27671 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-417 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
From django administration page, in Portal, can't access Teachers or Students
Trying to access a Student or Teacher from the administration page leads to an error:
Failed to load resource: the server responded with a status of 500 (OK)
</issue>
<code>
[start of portal/admin.py]
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2016, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django.contrib import admin
38 from django.contrib.auth.models import User
39 from django.contrib.auth.admin import UserAdmin
40
41
42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification
43
44
45 class ClassAdmin(admin.ModelAdmin):
46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
47 list_filter = ['teacher']
48
49
50 class SchoolAdmin(admin.ModelAdmin):
51 search_fields = ['name', 'country', 'postcode', 'town']
52 list_filter = ['postcode', 'country']
53
54
55 class StudentAdmin(admin.ModelAdmin):
56 search_fields = ['new_user__first_name', 'new_user__last_name']
57 list_filter = ['class_field', 'class_field__teacher']
58
59
60 class TeacherAdmin(admin.ModelAdmin):
61 search_fields = ['new_user__first_name', 'new_user__last_name']
62 list_filter = ['school']
63
64
65 class UserProfileAdmin(admin.ModelAdmin):
66 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
67 list_filter = ['user__date_joined']
68 list_display = ['user', 'joined_recently']
69
70
71 class EmailVerificationAdmin(admin.ModelAdmin):
72 search_fields = ['new_user']
73
74
75 UserAdmin.list_display += ('date_joined',)
76 UserAdmin.list_filter += ('date_joined',)
77
78
79 admin.site.register(Class, ClassAdmin)
80 admin.site.register(Student, StudentAdmin)
81 admin.site.register(Guardian)
82 admin.site.register(Teacher, TeacherAdmin)
83 admin.site.register(School, SchoolAdmin)
84 admin.site.unregister(User)
85 admin.site.register(User, UserAdmin)
86 admin.site.register(UserProfile, UserProfileAdmin)
87 admin.site.register(FrontPageNews)
88 admin.site.register(EmailVerification, EmailVerificationAdmin)
89
[end of portal/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/portal/admin.py b/portal/admin.py
--- a/portal/admin.py
+++ b/portal/admin.py
@@ -45,6 +45,7 @@
class ClassAdmin(admin.ModelAdmin):
search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
list_filter = ['teacher']
+ readonly_fields = ['teacher']
class SchoolAdmin(admin.ModelAdmin):
@@ -55,17 +56,22 @@
class StudentAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['class_field', 'class_field__teacher']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['class_field', 'pending_class_request']
class TeacherAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['school']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['school', 'pending_join_request']
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
list_filter = ['user__date_joined']
list_display = ['user', 'joined_recently']
+ readonly_fields = ['user']
class EmailVerificationAdmin(admin.ModelAdmin):
| {"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -45,6 +45,7 @@\n class ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n+ readonly_fields = ['teacher']\n \n \n class SchoolAdmin(admin.ModelAdmin):\n@@ -55,17 +56,22 @@\n class StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['class_field', 'pending_class_request']\n \n \n class TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['school', 'pending_join_request']\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n+ readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n", "issue": "From django administration page, in Portal, can't access Teachers or Students\nTrying to access a Student or Teacher from the administration page leads to an error:\nFailed to load resource: the server responded with a status of 500 (OK)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]} | 1,541 | 304 |
gh_patches_debug_27236 | rasdani/github-patches | git_diff | redis__redis-py-2324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for WITHSUFFIXTRIE to FT.CREATE
RediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this
</issue>
<code>
[start of redis/commands/search/field.py]
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108 CASESENSITIVE = "CASESENSITIVE"
109
110 def __init__(
111 self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
112 ):
113 args = [Field.TAG, self.SEPARATOR, separator]
114 if case_sensitive:
115 args.append(self.CASESENSITIVE)
116
117 Field.__init__(self, name, args=args, **kwargs)
118
119
120 class VectorField(Field):
121 """
122 Allows vector similarity queries against the value in this attribute.
123 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
124 """
125
126 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
127 """
128 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
129 although it's also a Field.
130
131 ``name`` is the name of the field.
132
133 ``algorithm`` can be "FLAT" or "HNSW".
134
135 ``attributes`` each algorithm can have specific attributes. Some of them
136 are mandatory and some of them are optional. See
137 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
138 for more information.
139 """
140 sort = kwargs.get("sortable", False)
141 noindex = kwargs.get("no_index", False)
142
143 if sort or noindex:
144 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
145
146 if algorithm.upper() not in ["FLAT", "HNSW"]:
147 raise DataError(
148 "Realtime vector indexing supporting 2 Indexing Methods:"
149 "'FLAT' and 'HNSW'."
150 )
151
152 attr_li = []
153
154 for key, value in attributes.items():
155 attr_li.extend([key, value])
156
157 Field.__init__(
158 self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs
159 )
160
[end of redis/commands/search/field.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -64,6 +64,7 @@
weight: float = 1.0,
no_stem: bool = False,
phonetic_matcher: str = None,
+ withsuffixtrie: bool = False,
**kwargs,
):
Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
@@ -78,6 +79,8 @@
]:
Field.append_arg(self, self.PHONETIC)
Field.append_arg(self, phonetic_matcher)
+ if withsuffixtrie:
+ Field.append_arg(self, "WITHSUFFIXTRIE")
class NumericField(Field):
@@ -108,11 +111,18 @@
CASESENSITIVE = "CASESENSITIVE"
def __init__(
- self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ withsuffixtrie: bool = False,
+ **kwargs,
):
args = [Field.TAG, self.SEPARATOR, separator]
if case_sensitive:
args.append(self.CASESENSITIVE)
+ if withsuffixtrie:
+ args.append("WITHSUFFIXTRIE")
Field.__init__(self, name, args=args, **kwargs)
| {"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -64,6 +64,7 @@\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n+ withsuffixtrie: bool = False,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n@@ -78,6 +79,8 @@\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n+ if withsuffixtrie:\n+ Field.append_arg(self, \"WITHSUFFIXTRIE\")\n \n \n class NumericField(Field):\n@@ -108,11 +111,18 @@\n CASESENSITIVE = \"CASESENSITIVE\"\n \n def __init__(\n- self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ withsuffixtrie: bool = False,\n+ **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n+ if withsuffixtrie:\n+ args.append(\"WITHSUFFIXTRIE\")\n \n Field.__init__(self, name, args=args, **kwargs)\n", "issue": "Add support for WITHSUFFIXTRIE to FT.CREATE \nRediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs\n )\n", "path": "redis/commands/search/field.py"}]} | 2,044 | 354 |
gh_patches_debug_22042 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-2706 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Idea: Enhance Amazon Polly support
Amazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.
This [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.
Thanks!
</issue>
<code>
[start of mycroft/tts/polly_tts.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from mycroft.tts.tts import TTS, TTSValidator
16 from mycroft.configuration import Configuration
17
18
19 class PollyTTS(TTS):
20 def __init__(self, lang="en-us", config=None):
21 import boto3
22 config = config or Configuration.get().get("tts", {}).get("polly", {})
23 super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),
24 audio_ext="mp3",
25 ssml_tags=["speak", "say-as", "voice",
26 "prosody", "break",
27 "emphasis", "sub", "lang",
28 "phoneme", "w", "whisper",
29 "amazon:auto-breaths",
30 "p", "s", "amazon:effect",
31 "mark"])
32
33 self.voice = self.config.get("voice", "Matthew")
34 self.key_id = self.config.get("access_key_id", '')
35 self.key = self.config.get("secret_access_key", '')
36 self.region = self.config.get("region", 'us-east-1')
37 self.polly = boto3.Session(aws_access_key_id=self.key_id,
38 aws_secret_access_key=self.key,
39 region_name=self.region).client('polly')
40
41 def get_tts(self, sentence, wav_file):
42 text_type = "text"
43 if self.remove_ssml(sentence) != sentence:
44 text_type = "ssml"
45 sentence = sentence \
46 .replace("\\whispered", "/amazon:effect") \
47 .replace("whispered", "amazon:effect name=\"whispered\"")
48 response = self.polly.synthesize_speech(
49 OutputFormat=self.audio_ext,
50 Text=sentence,
51 TextType=text_type,
52 VoiceId=self.voice)
53
54 with open(wav_file, 'wb') as f:
55 f.write(response['AudioStream'].read())
56 return (wav_file, None) # No phonemes
57
58 def describe_voices(self, language_code="en-US"):
59 if language_code.islower():
60 a, b = language_code.split("-")
61 b = b.upper()
62 language_code = "-".join([a, b])
63 # example 'it-IT' useful to retrieve voices
64 voices = self.polly.describe_voices(LanguageCode=language_code)
65
66 return voices
67
68
69 class PollyTTSValidator(TTSValidator):
70 def __init__(self, tts):
71 super(PollyTTSValidator, self).__init__(tts)
72
73 def validate_lang(self):
74 # TODO
75 pass
76
77 def validate_dependencies(self):
78 try:
79 from boto3 import Session
80 except ImportError:
81 raise Exception(
82 'PollyTTS dependencies not installed, please run pip install '
83 'boto3 ')
84
85 def validate_connection(self):
86 try:
87 if not self.tts.voice:
88 raise Exception("Polly TTS Voice not configured")
89 output = self.tts.describe_voices()
90 except TypeError:
91 raise Exception(
92 'PollyTTS server could not be verified. Please check your '
93 'internet connection and credentials.')
94
95 def get_tts_class(self):
96 return PollyTTS
97
[end of mycroft/tts/polly_tts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py
--- a/mycroft/tts/polly_tts.py
+++ b/mycroft/tts/polly_tts.py
@@ -34,6 +34,7 @@
self.key_id = self.config.get("access_key_id", '')
self.key = self.config.get("secret_access_key", '')
self.region = self.config.get("region", 'us-east-1')
+ self.engine = self.config.get("engine", "standard")
self.polly = boto3.Session(aws_access_key_id=self.key_id,
aws_secret_access_key=self.key,
region_name=self.region).client('polly')
@@ -49,7 +50,8 @@
OutputFormat=self.audio_ext,
Text=sentence,
TextType=text_type,
- VoiceId=self.voice)
+ VoiceId=self.voice,
+ Engine=self.engine)
with open(wav_file, 'wb') as f:
f.write(response['AudioStream'].read())
| {"golden_diff": "diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py\n--- a/mycroft/tts/polly_tts.py\n+++ b/mycroft/tts/polly_tts.py\n@@ -34,6 +34,7 @@\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n+ self.engine = self.config.get(\"engine\", \"standard\")\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n@@ -49,7 +50,8 @@\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n- VoiceId=self.voice)\n+ VoiceId=self.voice,\n+ Engine=self.engine)\n \n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n", "issue": "Idea: Enhance Amazon Polly support\nAmazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.\r\n\r\nThis [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.\r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mycroft.tts.tts import TTS, TTSValidator\nfrom mycroft.configuration import Configuration\n\n\nclass PollyTTS(TTS):\n def __init__(self, lang=\"en-us\", config=None):\n import boto3\n config = config or Configuration.get().get(\"tts\", {}).get(\"polly\", {})\n super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),\n audio_ext=\"mp3\",\n ssml_tags=[\"speak\", \"say-as\", \"voice\",\n \"prosody\", \"break\",\n \"emphasis\", \"sub\", \"lang\",\n \"phoneme\", \"w\", \"whisper\",\n \"amazon:auto-breaths\",\n \"p\", \"s\", \"amazon:effect\",\n \"mark\"])\n\n self.voice = self.config.get(\"voice\", \"Matthew\")\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n\n def get_tts(self, sentence, wav_file):\n text_type = \"text\"\n if self.remove_ssml(sentence) != sentence:\n text_type = \"ssml\"\n sentence = sentence \\\n .replace(\"\\\\whispered\", \"/amazon:effect\") \\\n .replace(\"whispered\", \"amazon:effect name=\\\"whispered\\\"\")\n response = self.polly.synthesize_speech(\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n VoiceId=self.voice)\n\n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n return (wav_file, None) # No phonemes\n\n def describe_voices(self, language_code=\"en-US\"):\n if language_code.islower():\n a, b = language_code.split(\"-\")\n b = b.upper()\n language_code = \"-\".join([a, b])\n # example 'it-IT' useful to retrieve voices\n voices = self.polly.describe_voices(LanguageCode=language_code)\n\n return voices\n\n\nclass PollyTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(PollyTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_dependencies(self):\n try:\n from boto3 import Session\n except ImportError:\n raise Exception(\n 'PollyTTS dependencies not installed, please run pip install '\n 'boto3 ')\n\n def validate_connection(self):\n try:\n if not self.tts.voice:\n raise Exception(\"Polly TTS Voice not configured\")\n output = self.tts.describe_voices()\n except TypeError:\n raise Exception(\n 'PollyTTS server could not be verified. Please check your '\n 'internet connection and credentials.')\n\n def get_tts_class(self):\n return PollyTTS\n", "path": "mycroft/tts/polly_tts.py"}]} | 1,667 | 228 |
gh_patches_debug_22555 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-967 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gh: prefix doesn't work anymore
* Cookiecutter version: 1.5.1
* Template project url: `gh:*`
* Python version: 2.7.13
* Operating System: Linux
### Description:
cookiecutter does not honor prefixes anymore.
### What I've run:
Simply testing the example from the README doesn't work as expected:
``` bash
$ cookiecutter gh:audreyr/cookiecutter-pypackage
A valid repository for "gh:audreyr/cookiecutter-pypackage" could not be found in the following locations:
gh:audreyr/cookiecutter-pypackage
/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage
```
The same commands using the full repository path works as expected:
```bash
$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage
```
</issue>
<code>
[start of cookiecutter/config.py]
1 # -*- coding: utf-8 -*-
2
3 """Global configuration handling."""
4
5 from __future__ import unicode_literals
6 import copy
7 import logging
8 import os
9 import io
10
11 import poyo
12
13 from .exceptions import ConfigDoesNotExistException
14 from .exceptions import InvalidConfiguration
15
16
17 logger = logging.getLogger(__name__)
18
19 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
20
21 BUILTIN_ABBREVIATIONS = {
22 'gh': 'https://github.com/{0}.git',
23 'gl': 'https://gitlab.com/{0}.git',
24 'bb': 'https://bitbucket.org/{0}',
25 }
26
27 DEFAULT_CONFIG = {
28 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
29 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
30 'default_context': {},
31 'abbreviations': BUILTIN_ABBREVIATIONS,
32 }
33
34
35 def _expand_path(path):
36 """Expand both environment variables and user home in the given path."""
37 path = os.path.expandvars(path)
38 path = os.path.expanduser(path)
39 return path
40
41
42 def get_config(config_path):
43 """Retrieve the config from the specified path, returning a config dict."""
44 if not os.path.exists(config_path):
45 raise ConfigDoesNotExistException
46
47 logger.debug('config_path is {0}'.format(config_path))
48 with io.open(config_path, encoding='utf-8') as file_handle:
49 try:
50 yaml_dict = poyo.parse_string(file_handle.read())
51 except poyo.exceptions.PoyoException as e:
52 raise InvalidConfiguration(
53 'Unable to parse YAML file {}. Error: {}'
54 ''.format(config_path, e)
55 )
56
57 config_dict = copy.copy(DEFAULT_CONFIG)
58 config_dict.update(yaml_dict)
59
60 raw_replay_dir = config_dict['replay_dir']
61 config_dict['replay_dir'] = _expand_path(raw_replay_dir)
62
63 raw_cookies_dir = config_dict['cookiecutters_dir']
64 config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)
65
66 return config_dict
67
68
69 def get_user_config(config_file=None, default_config=False):
70 """Return the user config as a dict.
71
72 If ``default_config`` is True, ignore ``config_file`` and return default
73 values for the config parameters.
74
75 If a path to a ``config_file`` is given, that is different from the default
76 location, load the user config from that.
77
78 Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
79 environment variable. If set, load the config from this path. This will
80 raise an error if the specified path is not valid.
81
82 If the environment variable is not set, try the default config file path
83 before falling back to the default config values.
84 """
85 # Do NOT load a config. Return defaults instead.
86 if default_config:
87 return copy.copy(DEFAULT_CONFIG)
88
89 # Load the given config file
90 if config_file and config_file is not USER_CONFIG_PATH:
91 return get_config(config_file)
92
93 try:
94 # Does the user set up a config environment variable?
95 env_config_file = os.environ['COOKIECUTTER_CONFIG']
96 except KeyError:
97 # Load an optional user config if it exists
98 # otherwise return the defaults
99 if os.path.exists(USER_CONFIG_PATH):
100 return get_config(USER_CONFIG_PATH)
101 else:
102 return copy.copy(DEFAULT_CONFIG)
103 else:
104 # There is a config environment variable. Try to load it.
105 # Do not check for existence, so invalid file paths raise an error.
106 return get_config(env_config_file)
107
[end of cookiecutter/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/config.py b/cookiecutter/config.py
--- a/cookiecutter/config.py
+++ b/cookiecutter/config.py
@@ -39,6 +39,25 @@
return path
+def merge_configs(default, overwrite):
+ """Recursively update a dict with the key/value pair of another.
+
+ Dict values that are dictionaries themselves will be updated, whilst
+ preserving existing keys.
+ """
+ new_config = copy.deepcopy(default)
+
+ for k, v in overwrite.items():
+ # Make sure to preserve existing items in
+ # nested dicts, for example `abbreviations`
+ if isinstance(v, dict):
+ new_config[k] = merge_configs(default[k], v)
+ else:
+ new_config[k] = v
+
+ return new_config
+
+
def get_config(config_path):
"""Retrieve the config from the specified path, returning a config dict."""
if not os.path.exists(config_path):
@@ -54,8 +73,7 @@
''.format(config_path, e)
)
- config_dict = copy.copy(DEFAULT_CONFIG)
- config_dict.update(yaml_dict)
+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)
raw_replay_dir = config_dict['replay_dir']
config_dict['replay_dir'] = _expand_path(raw_replay_dir)
| {"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -39,6 +39,25 @@\n return path\n \n \n+def merge_configs(default, overwrite):\n+ \"\"\"Recursively update a dict with the key/value pair of another.\n+\n+ Dict values that are dictionaries themselves will be updated, whilst\n+ preserving existing keys.\n+ \"\"\"\n+ new_config = copy.deepcopy(default)\n+\n+ for k, v in overwrite.items():\n+ # Make sure to preserve existing items in\n+ # nested dicts, for example `abbreviations`\n+ if isinstance(v, dict):\n+ new_config[k] = merge_configs(default[k], v)\n+ else:\n+ new_config[k] = v\n+\n+ return new_config\n+\n+\n def get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n@@ -54,8 +73,7 @@\n ''.format(config_path, e)\n )\n \n- config_dict = copy.copy(DEFAULT_CONFIG)\n- config_dict.update(yaml_dict)\n+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)\n \n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n", "issue": "gh: prefix doesn't work anymore\n* Cookiecutter version: 1.5.1\r\n* Template project url: `gh:*`\r\n* Python version: 2.7.13\r\n* Operating System: Linux\r\n\r\n### Description:\r\n\r\ncookiecutter does not honor prefixes anymore.\r\n\r\n### What I've run:\r\n\r\nSimply testing the example from the README doesn't work as expected:\r\n\r\n``` bash\r\n$ cookiecutter gh:audreyr/cookiecutter-pypackage\r\nA valid repository for \"gh:audreyr/cookiecutter-pypackage\" could not be found in the following locations:\r\ngh:audreyr/cookiecutter-pypackage\r\n/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage\r\n```\r\nThe same commands using the full repository path works as expected:\r\n\r\n```bash\r\n$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Global configuration handling.\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nBUILTIN_ABBREVIATIONS = {\n 'gh': 'https://github.com/{0}.git',\n 'gl': 'https://gitlab.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {},\n 'abbreviations': BUILTIN_ABBREVIATIONS,\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=None, default_config=False):\n \"\"\"Return the user config as a dict.\n\n If ``default_config`` is True, ignore ``config_file`` and return default\n values for the config parameters.\n\n If a path to a ``config_file`` is given, that is different from the default\n location, load the user config from that.\n\n Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``\n environment variable. If set, load the config from this path. This will\n raise an error if the specified path is not valid.\n\n If the environment variable is not set, try the default config file path\n before falling back to the default config values.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if default_config:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}]} | 1,729 | 306 |
gh_patches_debug_4279 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-3345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reddit Movie Dialog no longer exists
**Bug description**
[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.
**Reproduction steps**
```
TrainModel.main(
# similar to before
task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat',
model='transformer/generator',
model_file='from_pretrained/model',
# initialize with a pretrained model
init_model='zoo:tutorial_transformer_generator/model',
# arguments we get from the pretrained model.
# Unfortunately, these must be looked up separately for each model.
n_heads=16, n_layers=8, n_positions=512, text_truncate=512,
label_truncate=128, ffn_size=2048, embedding_size=512,
activation='gelu', variant='xlm',
dict_lower=True, dict_tokenizer='bpe',
dict_file='zoo:tutorial_transformer_generator/model.dict',
learn_positional_embeddings=True,
# some training arguments, specific to this fine-tuning
# use a small learning rate with ADAM optimizer
lr=1e-5, optimizer='adam',
warmup_updates=100,
# early stopping on perplexity
validation_metric='ppl',
# train at most 10 minutes, and validate every 0.25 epochs
max_train_time=600, validation_every_n_epochs=0.25,
# depend on your gpu. If you have a V100, this is good
batchsize=12, fp16=True, fp16_impl='mem_efficient',
# speeds up validation
skip_generation=True,
# helps us cram more examples into our gpu at a time
dynamic_batching='full',
)
```
**Logs**
Please paste the command line output:
```
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-39-ff3044de39fe> in <module>()
36
37 # helps us cram more examples into our gpu at a time
---> 38 dynamic_batching='full',
39 )
15 frames
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
ModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------```
</issue>
<code>
[start of docs/source/generate_task_list.py]
1 #!/usr/bin/env python3
2 # Copyright (c) Facebook, Inc. and its affiliates.
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from parlai.tasks.task_list import task_list
7
8 MASTER = "https://github.com/facebookresearch/ParlAI/tree/master"
9
10 category_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']
11 category_task_list = {x: [] for x in category_order}
12
13 fout = open('task_list.inc', 'w')
14
15 s = "They consist of: "
16 for t in category_order:
17 fout.write(f"1. {t} tasks\n")
18 fout.write("\n")
19
20 for task_dict in task_list:
21 tags = task_dict.get('tags', None)
22 for tag in tags:
23 if tag in category_task_list:
24 category_task_list[tag].append(task_dict)
25
26 for num_category, (category, tl) in enumerate(category_task_list.items()):
27 if num_category != 0:
28 fout.write("\n-----\n\n")
29
30 fout.write(f'## {category} Tasks\n')
31
32 for task_dict in tl:
33 id = task_dict.get('id', None)
34 display_name = task_dict.get('display_name', None)
35 task = task_dict.get('task', None)
36 tags = task_dict.get('tags', None)
37 description = task_dict.get('description', None)
38 notes = task_dict.get('notes', None)
39 code_urlend = task[: max(task.find(':'), len(task))]
40 code_url = f"{MASTER}/parlai/tasks/{code_urlend}"
41 links = task_dict.get("links", {})
42 assert isinstance(links, dict), f"task {id} is poorly formatted"
43 urls = [(k, v) for k, v in links.items()]
44 urls.append(("code", code_url))
45
46 urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
47 fout.write(f"### {display_name}\n")
48 fout.write(f"_Links_: {urls_md}\n\n")
49 if description:
50 fout.write(description + "\n")
51 if notes:
52 fout.write(":::{admonition,note} Notes\n")
53 fout.write(notes + "\n")
54 fout.write(":::\n")
55 fout.write("\n\n")
56
57 fout.close()
58
[end of docs/source/generate_task_list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py
--- a/docs/source/generate_task_list.py
+++ b/docs/source/generate_task_list.py
@@ -45,6 +45,7 @@
urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
fout.write(f"### {display_name}\n")
+ fout.write(f"_Usage_: `--task {task}`\n\n")
fout.write(f"_Links_: {urls_md}\n\n")
if description:
fout.write(description + "\n")
| {"golden_diff": "diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py\n--- a/docs/source/generate_task_list.py\n+++ b/docs/source/generate_task_list.py\n@@ -45,6 +45,7 @@\n \n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n+ fout.write(f\"_Usage_: `--task {task}`\\n\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n", "issue": "Reddit Movie Dialog no longer exists\n**Bug description**\r\n[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.\r\n\r\n**Reproduction steps**\r\n```\r\nTrainModel.main(\r\n # similar to before\r\n task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat', \r\n model='transformer/generator',\r\n model_file='from_pretrained/model',\r\n \r\n # initialize with a pretrained model\r\n init_model='zoo:tutorial_transformer_generator/model',\r\n \r\n # arguments we get from the pretrained model.\r\n # Unfortunately, these must be looked up separately for each model.\r\n n_heads=16, n_layers=8, n_positions=512, text_truncate=512,\r\n label_truncate=128, ffn_size=2048, embedding_size=512,\r\n activation='gelu', variant='xlm',\r\n dict_lower=True, dict_tokenizer='bpe',\r\n dict_file='zoo:tutorial_transformer_generator/model.dict',\r\n learn_positional_embeddings=True,\r\n \r\n # some training arguments, specific to this fine-tuning\r\n # use a small learning rate with ADAM optimizer\r\n lr=1e-5, optimizer='adam',\r\n warmup_updates=100,\r\n # early stopping on perplexity\r\n validation_metric='ppl',\r\n # train at most 10 minutes, and validate every 0.25 epochs\r\n max_train_time=600, validation_every_n_epochs=0.25,\r\n \r\n # depend on your gpu. If you have a V100, this is good\r\n batchsize=12, fp16=True, fp16_impl='mem_efficient',\r\n \r\n # speeds up validation\r\n skip_generation=True,\r\n \r\n # helps us cram more examples into our gpu at a time\r\n dynamic_batching='full',\r\n)\r\n```\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-39-ff3044de39fe> in <module>()\r\n 36 \r\n 37 # helps us cram more examples into our gpu at a time\r\n---> 38 dynamic_batching='full',\r\n 39 )\r\n\r\n15 frames\r\n/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)\r\n\r\nModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'\r\n\r\n---------------------------------------------------------------------------\r\nNOTE: If your import is failing due to a missing package, you can\r\nmanually install dependencies using either !pip or !apt.\r\n\r\nTo view examples of installing some common dependencies, click the\r\n\"Open Examples\" button below.\r\n---------------------------------------------------------------------------```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.tasks.task_list import task_list\n\nMASTER = \"https://github.com/facebookresearch/ParlAI/tree/master\"\n\ncategory_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']\ncategory_task_list = {x: [] for x in category_order}\n\nfout = open('task_list.inc', 'w')\n\ns = \"They consist of: \"\nfor t in category_order:\n fout.write(f\"1. {t} tasks\\n\")\nfout.write(\"\\n\")\n\nfor task_dict in task_list:\n tags = task_dict.get('tags', None)\n for tag in tags:\n if tag in category_task_list:\n category_task_list[tag].append(task_dict)\n\nfor num_category, (category, tl) in enumerate(category_task_list.items()):\n if num_category != 0:\n fout.write(\"\\n-----\\n\\n\")\n\n fout.write(f'## {category} Tasks\\n')\n\n for task_dict in tl:\n id = task_dict.get('id', None)\n display_name = task_dict.get('display_name', None)\n task = task_dict.get('task', None)\n tags = task_dict.get('tags', None)\n description = task_dict.get('description', None)\n notes = task_dict.get('notes', None)\n code_urlend = task[: max(task.find(':'), len(task))]\n code_url = f\"{MASTER}/parlai/tasks/{code_urlend}\"\n links = task_dict.get(\"links\", {})\n assert isinstance(links, dict), f\"task {id} is poorly formatted\"\n urls = [(k, v) for k, v in links.items()]\n urls.append((\"code\", code_url))\n\n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n if notes:\n fout.write(\":::{admonition,note} Notes\\n\")\n fout.write(notes + \"\\n\")\n fout.write(\":::\\n\")\n fout.write(\"\\n\\n\")\n\nfout.close()\n", "path": "docs/source/generate_task_list.py"}]} | 1,780 | 135 |
gh_patches_debug_4443 | rasdani/github-patches | git_diff | pytorch__text-145 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'list' object has no attribute 'rstrip'
Hi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.
It gives me the following error:
```
File "/Library/Python/2.7/site-packages/torchtext/data/example.py", line 59, in fromlist
setattr(ex, name, field.preprocess(val.rstrip('\n')))
AttributeError: 'list' object has no attribute 'rstrip'
```
Thanks!
</issue>
<code>
[start of torchtext/data/example.py]
1 import csv
2 import json
3
4 import six
5
6
7 class Example(object):
8 """Defines a single training or test example.
9
10 Stores each column of the example as an attribute.
11 """
12
13 @classmethod
14 def fromJSON(cls, data, fields):
15 return cls.fromdict(json.loads(data), fields)
16
17 @classmethod
18 def fromdict(cls, data, fields):
19 ex = cls()
20 for key, vals in fields.items():
21 if key not in data:
22 raise ValueError("Specified key {} was not found in "
23 "the input data".format(key))
24 if vals is not None:
25 if not isinstance(vals, list):
26 vals = [vals]
27 for val in vals:
28 name, field = val
29 setattr(ex, name, field.preprocess(data[key]))
30 return ex
31
32 @classmethod
33 def fromTSV(cls, data, fields):
34 return cls.fromlist(data.split('\t'), fields)
35
36 @classmethod
37 def fromCSV(cls, data, fields):
38 data = data.rstrip("\n")
39 # If Python 2, encode to utf-8 since CSV doesn't take unicode input
40 if six.PY2:
41 data = data.encode('utf-8')
42 # Use Python CSV module to parse the CSV line
43 parsed_csv_lines = csv.reader([data])
44
45 # If Python 2, decode back to unicode (the original input format).
46 if six.PY2:
47 for line in parsed_csv_lines:
48 parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]
49 break
50 else:
51 parsed_csv_line = list(parsed_csv_lines)[0]
52 return cls.fromlist(parsed_csv_line, fields)
53
54 @classmethod
55 def fromlist(cls, data, fields):
56 ex = cls()
57 for (name, field), val in zip(fields, data):
58 if field is not None:
59 setattr(ex, name, field.preprocess(val.rstrip('\n')))
60 return ex
61
62 @classmethod
63 def fromtree(cls, data, fields, subtrees=False):
64 try:
65 from nltk.tree import Tree
66 except ImportError:
67 print("Please install NLTK. "
68 "See the docs at http://nltk.org for more information.")
69 raise
70 tree = Tree.fromstring(data)
71 if subtrees:
72 return [cls.fromlist(
73 [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]
74 return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)
75
[end of torchtext/data/example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchtext/data/example.py b/torchtext/data/example.py
--- a/torchtext/data/example.py
+++ b/torchtext/data/example.py
@@ -56,7 +56,9 @@
ex = cls()
for (name, field), val in zip(fields, data):
if field is not None:
- setattr(ex, name, field.preprocess(val.rstrip('\n')))
+ if isinstance(val, six.string_types):
+ val = val.rstrip('\n')
+ setattr(ex, name, field.preprocess(val))
return ex
@classmethod
| {"golden_diff": "diff --git a/torchtext/data/example.py b/torchtext/data/example.py\n--- a/torchtext/data/example.py\n+++ b/torchtext/data/example.py\n@@ -56,7 +56,9 @@\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n- setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n+ if isinstance(val, six.string_types):\n+ val = val.rstrip('\\n')\n+ setattr(ex, name, field.preprocess(val))\n return ex\n \n @classmethod\n", "issue": "AttributeError: 'list' object has no attribute 'rstrip'\nHi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.\r\n\r\nIt gives me the following error: \r\n\r\n```\r\nFile \"/Library/Python/2.7/site-packages/torchtext/data/example.py\", line 59, in fromlist\r\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\r\nAttributeError: 'list' object has no attribute 'rstrip'\r\n\r\n```\r\n\r\nThanks!\n", "before_files": [{"content": "import csv\nimport json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n data = data.rstrip(\"\\n\")\n # If Python 2, encode to utf-8 since CSV doesn't take unicode input\n if six.PY2:\n data = data.encode('utf-8')\n # Use Python CSV module to parse the CSV line\n parsed_csv_lines = csv.reader([data])\n\n # If Python 2, decode back to unicode (the original input format).\n if six.PY2:\n for line in parsed_csv_lines:\n parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]\n break\n else:\n parsed_csv_line = list(parsed_csv_lines)[0]\n return cls.fromlist(parsed_csv_line, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}]} | 1,336 | 129 |
gh_patches_debug_13914 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
store window: district tile announces more results than there are if I click on them
store window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.
can we only count running projects with participation?
</issue>
<code>
[start of meinberlin/apps/cms/models/storefronts.py]
1 import random
2
3 from django.db import models
4 from django.utils.functional import cached_property
5 from modelcluster.fields import ParentalKey
6 from modelcluster.models import ClusterableModel
7 from wagtail.admin import edit_handlers
8 from wagtail.admin.edit_handlers import FieldPanel
9 from wagtail.images.edit_handlers import ImageChooserPanel
10 from wagtail.snippets.models import register_snippet
11
12 from adhocracy4.comments.models import Comment
13 from adhocracy4.modules.models import Item
14 from adhocracy4.projects.models import Project
15 from meinberlin.apps.projects import get_project_type
16
17
18 class StorefrontItem(models.Model):
19 district = models.ForeignKey(
20 'a4administrative_districts.AdministrativeDistrict',
21 related_name='+',
22 null=True,
23 blank=True
24 )
25 project = models.ForeignKey(
26 'a4projects.Project',
27 related_name='+',
28 null=True,
29 blank=True
30 )
31 quote = models.TextField(
32 blank=True,
33 max_length=150
34 )
35
36 def __str__(self):
37 return str(self.pk)
38
39 @cached_property
40 def item_type(self):
41 if get_project_type(self.project) in ('external', 'bplan'):
42 return 'external'
43 return 'project'
44
45 @cached_property
46 def project_url(self):
47 if self.item_type == 'external':
48 return self.project.externalproject.url
49 return self.project.get_absolute_url()
50
51 @cached_property
52 def district_project_count(self):
53 return Project.objects\
54 .filter(administrative_district=self.district,
55 is_draft=False,
56 is_public=True,
57 is_archived=False
58 ).count()
59
60 panels = [
61 FieldPanel('district'),
62 FieldPanel('project'),
63 FieldPanel('quote'),
64 ]
65
66
67 @register_snippet
68 class Storefront(ClusterableModel):
69 title = models.CharField(max_length=255, null=False, blank=False)
70 image = models.ForeignKey(
71 'meinberlin_cms.CustomImage',
72 null=True,
73 blank=True,
74 on_delete=models.SET_NULL,
75 related_name='+'
76 )
77 teaser = models.CharField(max_length=100)
78
79 def __str__(self):
80 return self.title
81
82 @cached_property
83 def num_entries(self):
84 num_comments = Comment.objects.all().count()
85 num_items = Item.objects.all().count()
86 return num_comments + num_items
87
88 @cached_property
89 def num_projects(self):
90 projects = Project.objects.all()\
91 .filter(is_draft=False, is_archived=False, is_public=True)
92 active_project_count = 0
93 for project in projects:
94 if project.active_phase or project.future_phases:
95 active_project_count += 1
96 return active_project_count
97
98 @cached_property
99 def random_items(self):
100 items = self.items.all()
101 if items.count() > 3:
102 items_list = items.values_list('id', flat=True)
103 random_items = random.sample(list(items_list), 3)
104 return StorefrontItem.objects.filter(id__in=random_items)
105 else:
106 return items
107
108 title_panel = [
109 edit_handlers.FieldPanel('title')
110 ]
111
112 image_tile_panel = [
113 ImageChooserPanel('image'),
114 edit_handlers.FieldPanel('teaser')
115 ]
116
117 project_tiles_panel = [
118 edit_handlers.InlinePanel('items', min_num=3)
119 ]
120
121 edit_handler = edit_handlers.TabbedInterface([
122 edit_handlers.ObjectList(title_panel, heading='Title'),
123 edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),
124 edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')
125 ])
126
127
128 class StorefrontCollection(StorefrontItem):
129 parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')
130
[end of meinberlin/apps/cms/models/storefronts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py
--- a/meinberlin/apps/cms/models/storefronts.py
+++ b/meinberlin/apps/cms/models/storefronts.py
@@ -50,12 +50,17 @@
@cached_property
def district_project_count(self):
- return Project.objects\
+ projects = Project.objects\
.filter(administrative_district=self.district,
is_draft=False,
is_public=True,
is_archived=False
- ).count()
+ )
+ active_project_count = 0
+ for project in projects:
+ if project.active_phase or project.future_phases:
+ active_project_count += 1
+ return active_project_count
panels = [
FieldPanel('district'),
| {"golden_diff": "diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py\n--- a/meinberlin/apps/cms/models/storefronts.py\n+++ b/meinberlin/apps/cms/models/storefronts.py\n@@ -50,12 +50,17 @@\n \n @cached_property\n def district_project_count(self):\n- return Project.objects\\\n+ projects = Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n- ).count()\n+ )\n+ active_project_count = 0\n+ for project in projects:\n+ if project.active_phase or project.future_phases:\n+ active_project_count += 1\n+ return active_project_count\n \n panels = [\n FieldPanel('district'),\n", "issue": "store window: district tile announces more results than there are if I click on them\nstore window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.\r\n\r\ncan we only count running projects with participation?\n", "before_files": [{"content": "import random\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.models import register_snippet\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.modules.models import Item\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass StorefrontItem(models.Model):\n district = models.ForeignKey(\n 'a4administrative_districts.AdministrativeDistrict',\n related_name='+',\n null=True,\n blank=True\n )\n project = models.ForeignKey(\n 'a4projects.Project',\n related_name='+',\n null=True,\n blank=True\n )\n quote = models.TextField(\n blank=True,\n max_length=150\n )\n\n def __str__(self):\n return str(self.pk)\n\n @cached_property\n def item_type(self):\n if get_project_type(self.project) in ('external', 'bplan'):\n return 'external'\n return 'project'\n\n @cached_property\n def project_url(self):\n if self.item_type == 'external':\n return self.project.externalproject.url\n return self.project.get_absolute_url()\n\n @cached_property\n def district_project_count(self):\n return Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n ).count()\n\n panels = [\n FieldPanel('district'),\n FieldPanel('project'),\n FieldPanel('quote'),\n ]\n\n\n@register_snippet\nclass Storefront(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n image = models.ForeignKey(\n 'meinberlin_cms.CustomImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n teaser = models.CharField(max_length=100)\n\n def __str__(self):\n return self.title\n\n @cached_property\n def num_entries(self):\n num_comments = Comment.objects.all().count()\n num_items = Item.objects.all().count()\n return num_comments + num_items\n\n @cached_property\n def num_projects(self):\n projects = Project.objects.all()\\\n .filter(is_draft=False, is_archived=False, is_public=True)\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n @cached_property\n def random_items(self):\n items = self.items.all()\n if items.count() > 3:\n items_list = items.values_list('id', flat=True)\n random_items = random.sample(list(items_list), 3)\n return StorefrontItem.objects.filter(id__in=random_items)\n else:\n return items\n\n title_panel = [\n edit_handlers.FieldPanel('title')\n ]\n\n image_tile_panel = [\n ImageChooserPanel('image'),\n edit_handlers.FieldPanel('teaser')\n ]\n\n project_tiles_panel = [\n edit_handlers.InlinePanel('items', min_num=3)\n ]\n\n edit_handler = edit_handlers.TabbedInterface([\n edit_handlers.ObjectList(title_panel, heading='Title'),\n edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),\n edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')\n ])\n\n\nclass StorefrontCollection(StorefrontItem):\n parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')\n", "path": "meinberlin/apps/cms/models/storefronts.py"}]} | 1,694 | 186 |
gh_patches_debug_15530 | rasdani/github-patches | git_diff | ibis-project__ibis-6950 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug(bigquery): memtable and string literals not escaping `\n` or `\` which results in invalid syntax
### What happened?
Code:
```python
import ibis
ibis_client = ibis.bigquery.connect()
table = ibis.memtable(
{
"col1": ["a\tb\nc", "d e f", "g'e\"h"],
}
)
print(ibis_client.compile(table))
```
Output:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b
c' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Note, the following SQL works as expected:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Therefore, we should really be escaping `\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals
Alternatively, using triple-quoted strings allows for newline characters in the string literal itself.
### What version of ibis are you using?
6.1.0
also tested on latest commit: 15f8d9575
### What backend(s) are you using, if any?
BigQuery
### Relevant log output
```sh
BigQuery API: Syntax error: Unclosed string literal at [2:47]
```
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of ibis/backends/base/sql/registry/literal.py]
1 from __future__ import annotations
2
3 import datetime
4 import math
5
6 import ibis.expr.types as ir
7
8
9 def _set_literal_format(translator, expr):
10 value_type = expr.type().value_type
11
12 formatted = [
13 translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value
14 ]
15
16 return "(" + ", ".join(formatted) + ")"
17
18
19 def _boolean_literal_format(translator, op):
20 return "TRUE" if op.value else "FALSE"
21
22
23 def _string_literal_format(translator, op):
24 return "'{}'".format(op.value.replace("'", "\\'"))
25
26
27 def _number_literal_format(translator, op):
28 if math.isfinite(op.value):
29 formatted = repr(op.value)
30 else:
31 if math.isnan(op.value):
32 formatted_val = "NaN"
33 elif math.isinf(op.value):
34 if op.value > 0:
35 formatted_val = "Infinity"
36 else:
37 formatted_val = "-Infinity"
38 formatted = f"CAST({formatted_val!r} AS DOUBLE)"
39
40 return formatted
41
42
43 def _interval_literal_format(translator, op):
44 return f"INTERVAL {op.value} {op.dtype.resolution.upper()}"
45
46
47 def _date_literal_format(translator, op):
48 value = op.value
49 if isinstance(value, datetime.date):
50 value = value.strftime("%Y-%m-%d")
51
52 return repr(value)
53
54
55 def _timestamp_literal_format(translator, op):
56 value = op.value
57 if isinstance(value, datetime.datetime):
58 value = value.isoformat()
59
60 return repr(value)
61
62
63 literal_formatters = {
64 "boolean": _boolean_literal_format,
65 "number": _number_literal_format,
66 "string": _string_literal_format,
67 "interval": _interval_literal_format,
68 "timestamp": _timestamp_literal_format,
69 "date": _date_literal_format,
70 "set": _set_literal_format,
71 }
72
73
74 def literal(translator, op):
75 """Return the expression as its literal value."""
76
77 dtype = op.dtype
78
79 if op.value is None:
80 return "NULL"
81
82 if dtype.is_boolean():
83 typeclass = "boolean"
84 elif dtype.is_string():
85 typeclass = "string"
86 elif dtype.is_date():
87 typeclass = "date"
88 elif dtype.is_numeric():
89 typeclass = "number"
90 elif dtype.is_timestamp():
91 typeclass = "timestamp"
92 elif dtype.is_interval():
93 typeclass = "interval"
94 else:
95 raise NotImplementedError(f"Unsupported type: {dtype!r}")
96
97 return literal_formatters[typeclass](translator, op)
98
[end of ibis/backends/base/sql/registry/literal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py
--- a/ibis/backends/base/sql/registry/literal.py
+++ b/ibis/backends/base/sql/registry/literal.py
@@ -21,7 +21,22 @@
def _string_literal_format(translator, op):
- return "'{}'".format(op.value.replace("'", "\\'"))
+ return "'{}'".format(
+ op.value
+ # Escape \ first so we don't double escape other characters.
+ .replace("\\", "\\\\")
+ # Escape ' since we're using those for the string literal.
+ .replace("'", "\\'")
+ # ASCII escape sequences that are recognized in Python:
+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
+ .replace("\a", "\\a") # Bell
+ .replace("\b", "\\b") # Backspace
+ .replace("\f", "\\f") # Formfeed
+ .replace("\n", "\\n") # Newline / Linefeed
+ .replace("\r", "\\r") # Carriage return
+ .replace("\t", "\\t") # Tab
+ .replace("\v", "\\v") # Vertical tab
+ )
def _number_literal_format(translator, op):
| {"golden_diff": "diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py\n--- a/ibis/backends/base/sql/registry/literal.py\n+++ b/ibis/backends/base/sql/registry/literal.py\n@@ -21,7 +21,22 @@\n \n \n def _string_literal_format(translator, op):\n- return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n+ return \"'{}'\".format(\n+ op.value\n+ # Escape \\ first so we don't double escape other characters.\n+ .replace(\"\\\\\", \"\\\\\\\\\")\n+ # Escape ' since we're using those for the string literal.\n+ .replace(\"'\", \"\\\\'\")\n+ # ASCII escape sequences that are recognized in Python:\n+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n+ .replace(\"\\a\", \"\\\\a\") # Bell\n+ .replace(\"\\b\", \"\\\\b\") # Backspace\n+ .replace(\"\\f\", \"\\\\f\") # Formfeed\n+ .replace(\"\\n\", \"\\\\n\") # Newline / Linefeed\n+ .replace(\"\\r\", \"\\\\r\") # Carriage return\n+ .replace(\"\\t\", \"\\\\t\") # Tab\n+ .replace(\"\\v\", \"\\\\v\") # Vertical tab\n+ )\n \n \n def _number_literal_format(translator, op):\n", "issue": "bug(bigquery): memtable and string literals not escaping `\\n` or `\\` which results in invalid syntax\n### What happened?\n\nCode:\r\n\r\n```python\r\nimport ibis\r\n\r\nibis_client = ibis.bigquery.connect()\r\ntable = ibis.memtable(\r\n {\r\n \"col1\": [\"a\\tb\\nc\", \"d e f\", \"g'e\\\"h\"],\r\n }\r\n)\r\nprint(ibis_client.compile(table))\r\n```\r\n\r\nOutput:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\r\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nNote, the following SQL works as expected:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nTherefore, we should really be escaping `\\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals\r\n\r\nAlternatively, using triple-quoted strings allows for newline characters in the string literal itself.\n\n### What version of ibis are you using?\n\n6.1.0\r\n\r\nalso tested on latest commit: 15f8d9575\n\n### What backend(s) are you using, if any?\n\nBigQuery\n\n### Relevant log output\n\n```sh\nBigQuery API: Syntax error: Unclosed string literal at [2:47]\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport math\n\nimport ibis.expr.types as ir\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value\n ]\n\n return \"(\" + \", \".join(formatted) + \")\"\n\n\ndef _boolean_literal_format(translator, op):\n return \"TRUE\" if op.value else \"FALSE\"\n\n\ndef _string_literal_format(translator, op):\n return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n\n\ndef _number_literal_format(translator, op):\n if math.isfinite(op.value):\n formatted = repr(op.value)\n else:\n if math.isnan(op.value):\n formatted_val = \"NaN\"\n elif math.isinf(op.value):\n if op.value > 0:\n formatted_val = \"Infinity\"\n else:\n formatted_val = \"-Infinity\"\n formatted = f\"CAST({formatted_val!r} AS DOUBLE)\"\n\n return formatted\n\n\ndef _interval_literal_format(translator, op):\n return f\"INTERVAL {op.value} {op.dtype.resolution.upper()}\"\n\n\ndef _date_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.date):\n value = value.strftime(\"%Y-%m-%d\")\n\n return repr(value)\n\n\ndef _timestamp_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.datetime):\n value = value.isoformat()\n\n return repr(value)\n\n\nliteral_formatters = {\n \"boolean\": _boolean_literal_format,\n \"number\": _number_literal_format,\n \"string\": _string_literal_format,\n \"interval\": _interval_literal_format,\n \"timestamp\": _timestamp_literal_format,\n \"date\": _date_literal_format,\n \"set\": _set_literal_format,\n}\n\n\ndef literal(translator, op):\n \"\"\"Return the expression as its literal value.\"\"\"\n\n dtype = op.dtype\n\n if op.value is None:\n return \"NULL\"\n\n if dtype.is_boolean():\n typeclass = \"boolean\"\n elif dtype.is_string():\n typeclass = \"string\"\n elif dtype.is_date():\n typeclass = \"date\"\n elif dtype.is_numeric():\n typeclass = \"number\"\n elif dtype.is_timestamp():\n typeclass = \"timestamp\"\n elif dtype.is_interval():\n typeclass = \"interval\"\n else:\n raise NotImplementedError(f\"Unsupported type: {dtype!r}\")\n\n return literal_formatters[typeclass](translator, op)\n", "path": "ibis/backends/base/sql/registry/literal.py"}]} | 1,680 | 317 |
gh_patches_debug_18800 | rasdani/github-patches | git_diff | DistrictDataLabs__yellowbrick-407 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClassificationScoreVisualizers should return accuracy
See #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure.
</issue>
<code>
[start of yellowbrick/classifier/base.py]
1 # yellowbrick.classifier.base
2 # API for classification visualizer hierarchy.
3 #
4 # Author: Rebecca Bilbro <[email protected]>
5 # Author: Benjamin Bengfort <[email protected]>
6 # Author: Neal Humphrey
7 # Created: Wed May 18 12:39:40 2016 -0400
8 #
9 # Copyright (C) 2016 District Data Labs
10 # For license information, see LICENSE.txt
11 #
12 # ID: base.py [5388065] [email protected] $
13
14 """
15 API for classification visualizer hierarchy.
16 """
17
18 ##########################################################################
19 ## Imports
20 ##########################################################################
21
22 import numpy as np
23
24 from ..utils import isclassifier
25 from ..base import ScoreVisualizer
26 from ..style.palettes import color_palette
27 from ..exceptions import YellowbrickTypeError
28
29
30 ##########################################################################
31 ## Base Classification Visualizer
32 ##########################################################################
33
34 class ClassificationScoreVisualizer(ScoreVisualizer):
35
36 def __init__(self, model, ax=None, classes=None, **kwargs):
37 """
38 Check to see if model is an instance of a classifer.
39 Should return an error if it isn't.
40
41 .. todo:: document this class.
42 .. tood:: accept as input classes as all visualizers need this.
43 """
44 # A bit of type checking
45 if not isclassifier(model):
46 raise YellowbrickTypeError(
47 "This estimator is not a classifier; "
48 "try a regression or clustering score visualizer instead!"
49 )
50
51 # Initialize the super method.
52 super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
53
54 # Convert to array if necessary to match estimator.classes_
55 if classes is not None:
56 classes = np.array(classes)
57
58 # Set up classifier score visualization properties
59 if classes is not None:
60 n_colors = len(classes)
61 else:
62 n_colors = None
63
64 self.colors = color_palette(kwargs.pop('colors', None), n_colors)
65 self.classes_ = classes
66
67 @property
68 def classes_(self):
69 """
70 Proxy property to smartly access the classes from the estimator or
71 stored locally on the score visualizer for visualization.
72 """
73 if self.__classes is None:
74 try:
75 return self.estimator.classes_
76 except AttributeError:
77 return None
78 return self.__classes
79
80 @classes_.setter
81 def classes_(self, value):
82 self.__classes = value
83
84 def fit(self, X, y=None, **kwargs):
85 """
86 Parameters
87 ----------
88
89 X : ndarray or DataFrame of shape n x m
90 A matrix of n instances with m features
91
92 y : ndarray or Series of length n
93 An array or series of target or class values
94
95 kwargs: keyword arguments passed to Scikit-Learn API.
96
97 Returns
98 -------
99 self : instance
100 Returns the instance of the classification score visualizer
101
102 """
103 # Fit the inner estimator
104 self.estimator.fit(X, y)
105
106 # Extract the classes from the estimator
107 if self.classes_ is None:
108 self.classes_ = self.estimator.classes_
109
110 # Always return self from fit
111 return self
112
113 #TODO during refactoring this can be used to generalize ClassBalance
114 def class_counts(self, y):
115 unique, counts = np.unique(y, return_counts=True)
116 return dict(zip(unique, counts))
117
[end of yellowbrick/classifier/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py
--- a/yellowbrick/classifier/base.py
+++ b/yellowbrick/classifier/base.py
@@ -110,6 +110,28 @@
# Always return self from fit
return self
+
+ def score(self, X, y, **kwargs):
+ """
+ The score function is the hook for visual interaction. Pass in test
+ data and the visualizer will create predictions on the data and
+ evaluate them with respect to the test values. The evaluation will
+ then be passed to draw() and the result of the estimator score will
+ be returned.
+ Parameters
+ ----------
+ X : array-like
+ X (also X_test) are the dependent variables of test set to predict
+ y : array-like
+ y (also y_test) is the independent actual variables to score against
+ Returns
+ -------
+ score : float
+ """
+ self.score_ = self.estimator.score(X, y, **kwargs)
+
+ return self.score_
+
#TODO during refactoring this can be used to generalize ClassBalance
def class_counts(self, y):
unique, counts = np.unique(y, return_counts=True)
| {"golden_diff": "diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py\n--- a/yellowbrick/classifier/base.py\n+++ b/yellowbrick/classifier/base.py\n@@ -110,6 +110,28 @@\n # Always return self from fit\n return self\n \n+\n+ def score(self, X, y, **kwargs):\n+ \"\"\"\n+ The score function is the hook for visual interaction. Pass in test\n+ data and the visualizer will create predictions on the data and\n+ evaluate them with respect to the test values. The evaluation will\n+ then be passed to draw() and the result of the estimator score will\n+ be returned.\n+ Parameters\n+ ----------\n+ X : array-like\n+ X (also X_test) are the dependent variables of test set to predict\n+ y : array-like\n+ y (also y_test) is the independent actual variables to score against\n+ Returns\n+ -------\n+ score : float\n+ \"\"\"\n+ self.score_ = self.estimator.score(X, y, **kwargs)\n+\n+ return self.score_\n+\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n", "issue": "ClassificationScoreVisualizers should return accuracy\nSee #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure. \n", "before_files": [{"content": "# yellowbrick.classifier.base\n# API for classification visualizer hierarchy.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Neal Humphrey\n# Created: Wed May 18 12:39:40 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: base.py [5388065] [email protected] $\n\n\"\"\"\nAPI for classification visualizer hierarchy.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom ..utils import isclassifier\nfrom ..base import ScoreVisualizer\nfrom ..style.palettes import color_palette\nfrom ..exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Base Classification Visualizer\n##########################################################################\n\nclass ClassificationScoreVisualizer(ScoreVisualizer):\n\n def __init__(self, model, ax=None, classes=None, **kwargs):\n \"\"\"\n Check to see if model is an instance of a classifer.\n Should return an error if it isn't.\n\n .. todo:: document this class.\n .. tood:: accept as input classes as all visualizers need this.\n \"\"\"\n # A bit of type checking\n if not isclassifier(model):\n raise YellowbrickTypeError(\n \"This estimator is not a classifier; \"\n \"try a regression or clustering score visualizer instead!\"\n )\n\n # Initialize the super method.\n super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Convert to array if necessary to match estimator.classes_\n if classes is not None:\n classes = np.array(classes)\n\n # Set up classifier score visualization properties\n if classes is not None:\n n_colors = len(classes)\n else:\n n_colors = None\n\n self.colors = color_palette(kwargs.pop('colors', None), n_colors)\n self.classes_ = classes\n\n @property\n def classes_(self):\n \"\"\"\n Proxy property to smartly access the classes from the estimator or\n stored locally on the score visualizer for visualization.\n \"\"\"\n if self.__classes is None:\n try:\n return self.estimator.classes_\n except AttributeError:\n return None\n return self.__classes\n\n @classes_.setter\n def classes_(self, value):\n self.__classes = value\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n X : ndarray or DataFrame of shape n x m\n A matrix of n instances with m features\n\n y : ndarray or Series of length n\n An array or series of target or class values\n\n kwargs: keyword arguments passed to Scikit-Learn API.\n\n Returns\n -------\n self : instance\n Returns the instance of the classification score visualizer\n\n \"\"\"\n # Fit the inner estimator\n self.estimator.fit(X, y)\n\n # Extract the classes from the estimator\n if self.classes_ is None:\n self.classes_ = self.estimator.classes_\n\n # Always return self from fit\n return self\n\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n return dict(zip(unique, counts))\n", "path": "yellowbrick/classifier/base.py"}]} | 1,581 | 287 |
gh_patches_debug_8883 | rasdani/github-patches | git_diff | python-pillow__Pillow-906 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot from PIL import ImageGrab
Does Pillow2.5.3 ImageGrab still not support other OS except windows?
If not, why we cannot do that?
---
/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py
Python 2.7.5 (default, Mar 9 2014, 22:15:05)
[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
> > > from PIL import ImageGrab
> > > Traceback (most recent call last):
> > > File "<stdin>", line 1, in <module>
> > > File "build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py", line 26, in <module>
> > > ImportError: No module named _grabscreen
</issue>
<code>
[start of PIL/ImageGrab.py]
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # screen grabber (windows only)
6 #
7 # History:
8 # 2001-04-26 fl created
9 # 2001-09-17 fl use builtin driver, if present
10 # 2002-11-19 fl added grabclipboard support
11 #
12 # Copyright (c) 2001-2002 by Secret Labs AB
13 # Copyright (c) 2001-2002 by Fredrik Lundh
14 #
15 # See the README file for information on usage and redistribution.
16 #
17
18 from PIL import Image
19
20
21 try:
22 # built-in driver (1.1.3 and later)
23 grabber = Image.core.grabscreen
24 except AttributeError:
25 # stand-alone driver (pil plus)
26 import _grabscreen
27 grabber = _grabscreen.grab
28
29
30 def grab(bbox=None):
31 size, data = grabber()
32 im = Image.frombytes(
33 "RGB", size, data,
34 # RGB, 32-bit line padding, origo in lower left corner
35 "raw", "BGR", (size[0]*3 + 3) & -4, -1
36 )
37 if bbox:
38 im = im.crop(bbox)
39 return im
40
41
42 def grabclipboard():
43 debug = 0 # temporary interface
44 data = Image.core.grabclipboard(debug)
45 if isinstance(data, bytes):
46 from PIL import BmpImagePlugin
47 import io
48 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
49 return data
50
[end of PIL/ImageGrab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py
--- a/PIL/ImageGrab.py
+++ b/PIL/ImageGrab.py
@@ -17,6 +17,9 @@
from PIL import Image
+import sys
+if sys.platform != "win32":
+ raise ImportError("ImageGrab is Windows only")
try:
# built-in driver (1.1.3 and later)
@@ -40,7 +43,7 @@
def grabclipboard():
- debug = 0 # temporary interface
+ debug = 0 # temporary interface
data = Image.core.grabclipboard(debug)
if isinstance(data, bytes):
from PIL import BmpImagePlugin
| {"golden_diff": "diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py\n--- a/PIL/ImageGrab.py\n+++ b/PIL/ImageGrab.py\n@@ -17,6 +17,9 @@\n \n from PIL import Image\n \n+import sys\n+if sys.platform != \"win32\":\n+ raise ImportError(\"ImageGrab is Windows only\")\n \n try:\n # built-in driver (1.1.3 and later)\n@@ -40,7 +43,7 @@\n \n \n def grabclipboard():\n- debug = 0 # temporary interface\n+ debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n", "issue": "Cannot from PIL import ImageGrab\nDoes Pillow2.5.3 ImageGrab still not support other OS except windows?\nIf not, why we cannot do that?\n\n---\n\n/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py\n\nPython 2.7.5 (default, Mar 9 2014, 22:15:05)\n[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n\n> > > from PIL import ImageGrab\n> > > Traceback (most recent call last):\n> > > File \"<stdin>\", line 1, in <module>\n> > > File \"build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py\", line 26, in <module>\n> > > ImportError: No module named _grabscreen\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom PIL import Image\n\n\ntry:\n # built-in driver (1.1.3 and later)\n grabber = Image.core.grabscreen\nexcept AttributeError:\n # stand-alone driver (pil plus)\n import _grabscreen\n grabber = _grabscreen.grab\n\n\ndef grab(bbox=None):\n size, data = grabber()\n im = Image.frombytes(\n \"RGB\", size, data,\n # RGB, 32-bit line padding, origo in lower left corner\n \"raw\", \"BGR\", (size[0]*3 + 3) & -4, -1\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n import io\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "PIL/ImageGrab.py"}]} | 1,220 | 157 |
gh_patches_debug_1942 | rasdani/github-patches | git_diff | ocf__ocfweb-72 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "edit this page" link on docs?
It would link to the GitHub editor page.
</issue>
<code>
[start of ocfweb/docs/doc.py]
1 from collections import namedtuple
2
3
4 class Document(namedtuple('Document', ['name', 'title', 'render'])):
5
6 @property
7 def category(self):
8 """Return full category path of the document.
9
10 For example, "/" or "/staff/backend/".
11 """
12 return self.name.rsplit('/', 1)[0] + '/'
13
14 @property
15 def category_for_sidebar(self):
16 """Return the category to show similar pages for in the sidebar.
17
18 If this page isn't at the root category, we just return this page's
19 category.
20
21 If this page is at the root category, we return the category rooted at
22 this page (which may or may not have any pages in it).
23 """
24 if self.category == '/':
25 return self.name + '/'
26 else:
27 return self.category
28
[end of ocfweb/docs/doc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py
--- a/ocfweb/docs/doc.py
+++ b/ocfweb/docs/doc.py
@@ -25,3 +25,12 @@
return self.name + '/'
else:
return self.category
+
+ @property
+ def edit_url(self):
+ """Return a GitHub edit URL for this page."""
+ return (
+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +
+ self.name +
+ '.md'
+ )
| {"golden_diff": "diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py\n--- a/ocfweb/docs/doc.py\n+++ b/ocfweb/docs/doc.py\n@@ -25,3 +25,12 @@\n return self.name + '/'\n else:\n return self.category\n+\n+ @property\n+ def edit_url(self):\n+ \"\"\"Return a GitHub edit URL for this page.\"\"\"\n+ return (\n+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +\n+ self.name +\n+ '.md'\n+ )\n", "issue": "Add \"edit this page\" link on docs?\nIt would link to the GitHub editor page.\n\n", "before_files": [{"content": "from collections import namedtuple\n\n\nclass Document(namedtuple('Document', ['name', 'title', 'render'])):\n\n @property\n def category(self):\n \"\"\"Return full category path of the document.\n\n For example, \"/\" or \"/staff/backend/\".\n \"\"\"\n return self.name.rsplit('/', 1)[0] + '/'\n\n @property\n def category_for_sidebar(self):\n \"\"\"Return the category to show similar pages for in the sidebar.\n\n If this page isn't at the root category, we just return this page's\n category.\n\n If this page is at the root category, we return the category rooted at\n this page (which may or may not have any pages in it).\n \"\"\"\n if self.category == '/':\n return self.name + '/'\n else:\n return self.category\n", "path": "ocfweb/docs/doc.py"}]} | 780 | 134 |
gh_patches_debug_29372 | rasdani/github-patches | git_diff | conda__conda-6752 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda is broken if your home directory is read-only
Conda currently requires the user's home directory to be writable.
If the directory conda is installed into is writable (say a tmpfs) then you can get along way by using
```shell
./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f
conda config --system --set always_yes yes
conda config --system --set changeps1 no
conda config --system --add envs_dirs $CONDA_DIR/envs
conda config --system --add pkgs_dirs $CONDA_DIR/pkgs
```
However, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18
```python
USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
```
I'm not sure if this would even work on Windows?
</issue>
<code>
[start of conda/core/envs_manager.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from logging import getLogger
5 from os import listdir
6 from os.path import dirname, isdir, isfile, join, normpath, split as path_split
7
8 from ..base.constants import ROOT_ENV_NAME
9 from ..base.context import context
10 from ..common.compat import ensure_text_type, on_win, open
11 from ..common.path import expand, paths_equal
12 from ..gateways.disk.read import yield_lines
13 from ..gateways.disk.test import is_conda_environment
14
15 log = getLogger(__name__)
16
17
18 USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
19
20
21 def register_env(location):
22 location = normpath(location)
23
24 if "placehold_pl" in location:
25 # Don't record envs created by conda-build.
26 return
27
28 if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):
29 # Nothing to do. Location is already recorded in a known environments.txt file.
30 return
31
32 with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
33 fh.write(ensure_text_type(location))
34 fh.write('\n')
35
36
37 def unregister_env(location):
38 if isdir(location):
39 meta_dir = join(location, 'conda-meta')
40 if isdir(meta_dir):
41 meta_dir_contents = listdir(meta_dir)
42 if len(meta_dir_contents) > 1:
43 # if there are any files left other than 'conda-meta/history'
44 # then don't unregister
45 return
46
47 _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)
48
49
50 def list_all_known_prefixes():
51 all_env_paths = set()
52 if on_win:
53 home_dir_dir = dirname(expand('~'))
54 for home_dir in listdir(home_dir_dir):
55 environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')
56 if isfile(environments_txt_file):
57 all_env_paths.update(_clean_environments_txt(environments_txt_file))
58 else:
59 from os import geteuid
60 from pwd import getpwall
61 if geteuid() == 0:
62 search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)
63 else:
64 search_dirs = (expand('~'),)
65 for home_dir in search_dirs:
66 environments_txt_file = join(home_dir, '.conda', 'environments.txt')
67 if isfile(environments_txt_file):
68 all_env_paths.update(_clean_environments_txt(environments_txt_file))
69
70 # in case environments.txt files aren't complete, also add all known conda environments in
71 # all envs_dirs
72 envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))
73 all_env_paths.update(path for path in (
74 join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)
75 ) if path not in all_env_paths and is_conda_environment(path))
76
77 all_env_paths.add(context.root_prefix)
78 return sorted(all_env_paths)
79
80
81 def env_name(prefix):
82 if not prefix:
83 return None
84 if paths_equal(prefix, context.root_prefix):
85 return ROOT_ENV_NAME
86 maybe_envs_dir, maybe_name = path_split(prefix)
87 for envs_dir in context.envs_dirs:
88 if paths_equal(envs_dir, maybe_envs_dir):
89 return maybe_name
90 return prefix
91
92
93 def _clean_environments_txt(environments_txt_file, remove_location=None):
94 if not isfile(environments_txt_file):
95 return ()
96
97 if remove_location:
98 remove_location = normpath(remove_location)
99 environments_txt_lines = tuple(yield_lines(environments_txt_file))
100 environments_txt_lines_cleaned = tuple(
101 prefix for prefix in environments_txt_lines
102 if prefix != remove_location and is_conda_environment(prefix)
103 )
104 if environments_txt_lines_cleaned != environments_txt_lines:
105 _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)
106 return environments_txt_lines_cleaned
107
108
109 def _rewrite_environments_txt(environments_txt_file, prefixes):
110 try:
111 with open(environments_txt_file, 'w') as fh:
112 fh.write('\n'.join(prefixes))
113 fh.write('\n')
114 except (IOError, OSError) as e:
115 log.info("File not cleaned: %s", environments_txt_file)
116 log.debug('%r', e, exc_info=True)
117
[end of conda/core/envs_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py
--- a/conda/core/envs_manager.py
+++ b/conda/core/envs_manager.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
+from errno import EACCES
from logging import getLogger
from os import listdir
from os.path import dirname, isdir, isfile, join, normpath, split as path_split
@@ -29,9 +30,17 @@
# Nothing to do. Location is already recorded in a known environments.txt file.
return
- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
- fh.write(ensure_text_type(location))
- fh.write('\n')
+ try:
+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
+ fh.write(ensure_text_type(location))
+ fh.write('\n')
+ except EnvironmentError as e:
+ if e.errno == EACCES:
+ log.warn("Unable to register environment. Path not writable.\n"
+ " environment location: %s\n"
+ " registry file: %s", location, USER_ENVIRONMENTS_TXT_FILE)
+ else:
+ raise
def unregister_env(location):
@@ -111,6 +120,6 @@
with open(environments_txt_file, 'w') as fh:
fh.write('\n'.join(prefixes))
fh.write('\n')
- except (IOError, OSError) as e:
+ except EnvironmentError as e:
log.info("File not cleaned: %s", environments_txt_file)
log.debug('%r', e, exc_info=True)
| {"golden_diff": "diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py\n--- a/conda/core/envs_manager.py\n+++ b/conda/core/envs_manager.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+from errno import EACCES\n from logging import getLogger\n from os import listdir\n from os.path import dirname, isdir, isfile, join, normpath, split as path_split\n@@ -29,9 +30,17 @@\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n \n- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n- fh.write(ensure_text_type(location))\n- fh.write('\\n')\n+ try:\n+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n+ fh.write(ensure_text_type(location))\n+ fh.write('\\n')\n+ except EnvironmentError as e:\n+ if e.errno == EACCES:\n+ log.warn(\"Unable to register environment. Path not writable.\\n\"\n+ \" environment location: %s\\n\"\n+ \" registry file: %s\", location, USER_ENVIRONMENTS_TXT_FILE)\n+ else:\n+ raise\n \n \n def unregister_env(location):\n@@ -111,6 +120,6 @@\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n- except (IOError, OSError) as e:\n+ except EnvironmentError as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "issue": "conda is broken if your home directory is read-only\nConda currently requires the user's home directory to be writable.\r\n\r\nIf the directory conda is installed into is writable (say a tmpfs) then you can get along way by using \r\n```shell\r\n\t\t./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f\r\n\t\tconda config --system --set always_yes yes\r\n\t\tconda config --system --set changeps1 no\r\n\t\tconda config --system --add envs_dirs $CONDA_DIR/envs\r\n\t\tconda config --system --add pkgs_dirs $CONDA_DIR/pkgs\r\n```\r\n\r\nHowever, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18\r\n\r\n```python\r\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\r\n```\r\n\r\nI'm not sure if this would even work on Windows?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nfrom os import listdir\nfrom os.path import dirname, isdir, isfile, join, normpath, split as path_split\n\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, on_win, open\nfrom ..common.path import expand, paths_equal\nfrom ..gateways.disk.read import yield_lines\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\n\n\ndef register_env(location):\n location = normpath(location)\n\n if \"placehold_pl\" in location:\n # Don't record envs created by conda-build.\n return\n\n if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n\n with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n fh.write(ensure_text_type(location))\n fh.write('\\n')\n\n\ndef unregister_env(location):\n if isdir(location):\n meta_dir = join(location, 'conda-meta')\n if isdir(meta_dir):\n meta_dir_contents = listdir(meta_dir)\n if len(meta_dir_contents) > 1:\n # if there are any files left other than 'conda-meta/history'\n # then don't unregister\n return\n\n _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)\n\n\ndef list_all_known_prefixes():\n all_env_paths = set()\n if on_win:\n home_dir_dir = dirname(expand('~'))\n for home_dir in listdir(home_dir_dir):\n environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n else:\n from os import geteuid\n from pwd import getpwall\n if geteuid() == 0:\n search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)\n else:\n search_dirs = (expand('~'),)\n for home_dir in search_dirs:\n environments_txt_file = join(home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n\n # in case environments.txt files aren't complete, also add all known conda environments in\n # all envs_dirs\n envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))\n all_env_paths.update(path for path in (\n join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)\n ) if path not in all_env_paths and is_conda_environment(path))\n\n all_env_paths.add(context.root_prefix)\n return sorted(all_env_paths)\n\n\ndef env_name(prefix):\n if not prefix:\n return None\n if paths_equal(prefix, context.root_prefix):\n return ROOT_ENV_NAME\n maybe_envs_dir, maybe_name = path_split(prefix)\n for envs_dir in context.envs_dirs:\n if paths_equal(envs_dir, maybe_envs_dir):\n return maybe_name\n return prefix\n\n\ndef _clean_environments_txt(environments_txt_file, remove_location=None):\n if not isfile(environments_txt_file):\n return ()\n\n if remove_location:\n remove_location = normpath(remove_location)\n environments_txt_lines = tuple(yield_lines(environments_txt_file))\n environments_txt_lines_cleaned = tuple(\n prefix for prefix in environments_txt_lines\n if prefix != remove_location and is_conda_environment(prefix)\n )\n if environments_txt_lines_cleaned != environments_txt_lines:\n _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)\n return environments_txt_lines_cleaned\n\n\ndef _rewrite_environments_txt(environments_txt_file, prefixes):\n try:\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n except (IOError, OSError) as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "path": "conda/core/envs_manager.py"}]} | 1,998 | 391 |
gh_patches_debug_39414 | rasdani/github-patches | git_diff | buildbot__buildbot-4467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SecretInVault secret provider integration tests no longer work
The test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.
See https://travis-ci.org/buildbot/buildbot/jobs/464401540.
Looks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.
</issue>
<code>
[start of master/buildbot/secrets/providers/vault.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15 """
16 vault based providers
17 """
18
19 from __future__ import absolute_import
20 from __future__ import print_function
21
22 from twisted.internet import defer
23
24 from buildbot import config
25 from buildbot.secrets.providers.base import SecretProviderBase
26 from buildbot.util import httpclientservice
27
28
29 class HashiCorpVaultSecretProvider(SecretProviderBase):
30 """
31 basic provider where each secret is stored in Vault
32 """
33
34 name = 'SecretInVault'
35
36 def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):
37 if not isinstance(vaultServer, str):
38 config.error("vaultServer must be a string while it is %s" % (type(vaultServer,)))
39 if not isinstance(vaultToken, str):
40 config.error("vaultToken must be a string while it is %s" % (type(vaultToken,)))
41
42 @defer.inlineCallbacks
43 def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):
44 if secretsmount is None:
45 self.secretsmount = "secret"
46 else:
47 self.secretsmount = secretsmount
48 self.vaultServer = vaultServer
49 self.vaultToken = vaultToken
50 if vaultServer.endswith('/'):
51 vaultServer = vaultServer[:-1]
52 self._http = yield httpclientservice.HTTPClientService.getService(
53 self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})
54
55 @defer.inlineCallbacks
56 def get(self, entry):
57 """
58 get the value from vault secret backend
59 """
60 path = self.secretsmount + '/' + entry
61 proj = yield self._http.get('/v1/{0}'.format(path))
62 code = yield proj.code
63 if code != 200:
64 raise KeyError("The key %s does not exist in Vault provider: request"
65 " return code:%d." % (entry, code))
66 json = yield proj.json()
67 defer.returnValue(json.get(u'data', {}).get('value'))
68
[end of master/buildbot/secrets/providers/vault.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py
--- a/master/buildbot/secrets/providers/vault.py
+++ b/master/buildbot/secrets/providers/vault.py
@@ -28,25 +28,30 @@
class HashiCorpVaultSecretProvider(SecretProviderBase):
"""
- basic provider where each secret is stored in Vault
+ basic provider where each secret is stored in Vault KV secret engine
"""
name = 'SecretInVault'
- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):
+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,
+ apiVersion=1):
if not isinstance(vaultServer, str):
config.error("vaultServer must be a string while it is %s" % (type(vaultServer,)))
if not isinstance(vaultToken, str):
config.error("vaultToken must be a string while it is %s" % (type(vaultToken,)))
+ if apiVersion not in [1, 2]:
+ config.error("apiVersion %s is not supported" % apiVersion)
@defer.inlineCallbacks
- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):
+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,
+ apiVersion=1):
if secretsmount is None:
self.secretsmount = "secret"
else:
self.secretsmount = secretsmount
self.vaultServer = vaultServer
self.vaultToken = vaultToken
+ self.apiVersion = apiVersion
if vaultServer.endswith('/'):
vaultServer = vaultServer[:-1]
self._http = yield httpclientservice.HTTPClientService.getService(
@@ -57,11 +62,23 @@
"""
get the value from vault secret backend
"""
- path = self.secretsmount + '/' + entry
+ if self.apiVersion == 1:
+ path = self.secretsmount + '/' + entry
+ else:
+ path = self.secretsmount + '/data/' + entry
+
+ # note that the HTTP path contains v1 for both versions of the key-value
+ # secret engine. Different versions of the key-value engine are
+ # effectively separate secret engines in vault, with the same base HTTP
+ # API, but with different paths within it.
proj = yield self._http.get('/v1/{0}'.format(path))
code = yield proj.code
if code != 200:
raise KeyError("The key %s does not exist in Vault provider: request"
" return code:%d." % (entry, code))
json = yield proj.json()
- defer.returnValue(json.get(u'data', {}).get('value'))
+ if self.apiVersion == 1:
+ ret = json.get(u'data', {}).get('value')
+ else:
+ ret = json.get(u'data', {}).get(u'data', {}).get('value')
+ defer.returnValue(ret)
| {"golden_diff": "diff --git a/master/buildbot/secrets/providers/vault.py b/master/buildbot/secrets/providers/vault.py\n--- a/master/buildbot/secrets/providers/vault.py\n+++ b/master/buildbot/secrets/providers/vault.py\n@@ -28,25 +28,30 @@\n \n class HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n- basic provider where each secret is stored in Vault\n+ basic provider where each secret is stored in Vault KV secret engine\n \"\"\"\n \n name = 'SecretInVault'\n \n- def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n+ if apiVersion not in [1, 2]:\n+ config.error(\"apiVersion %s is not supported\" % apiVersion)\n \n @defer.inlineCallbacks\n- def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n+ def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None,\n+ apiVersion=1):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n+ self.apiVersion = apiVersion\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n@@ -57,11 +62,23 @@\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n- path = self.secretsmount + '/' + entry\n+ if self.apiVersion == 1:\n+ path = self.secretsmount + '/' + entry\n+ else:\n+ path = self.secretsmount + '/data/' + entry\n+\n+ # note that the HTTP path contains v1 for both versions of the key-value\n+ # secret engine. Different versions of the key-value engine are\n+ # effectively separate secret engines in vault, with the same base HTTP\n+ # API, but with different paths within it.\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n- defer.returnValue(json.get(u'data', {}).get('value'))\n+ if self.apiVersion == 1:\n+ ret = json.get(u'data', {}).get('value')\n+ else:\n+ ret = json.get(u'data', {}).get(u'data', {}).get('value')\n+ defer.returnValue(ret)\n", "issue": "SecretInVault secret provider integration tests no longer work\nThe test `buildbot.test.integration.test_integration_secrets_with_vault.SecretsConfig.test_secret` no longer works.\r\n\r\nSee https://travis-ci.org/buildbot/buildbot/jobs/464401540.\r\n\r\nLooks like the default kv engine shipping with the `vault` engine is now v2 which we don't support yet.\r\n\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\"\"\"\nvault based providers\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom twisted.internet import defer\n\nfrom buildbot import config\nfrom buildbot.secrets.providers.base import SecretProviderBase\nfrom buildbot.util import httpclientservice\n\n\nclass HashiCorpVaultSecretProvider(SecretProviderBase):\n \"\"\"\n basic provider where each secret is stored in Vault\n \"\"\"\n\n name = 'SecretInVault'\n\n def checkConfig(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if not isinstance(vaultServer, str):\n config.error(\"vaultServer must be a string while it is %s\" % (type(vaultServer,)))\n if not isinstance(vaultToken, str):\n config.error(\"vaultToken must be a string while it is %s\" % (type(vaultToken,)))\n\n @defer.inlineCallbacks\n def reconfigService(self, vaultServer=None, vaultToken=None, secretsmount=None):\n if secretsmount is None:\n self.secretsmount = \"secret\"\n else:\n self.secretsmount = secretsmount\n self.vaultServer = vaultServer\n self.vaultToken = vaultToken\n if vaultServer.endswith('/'):\n vaultServer = vaultServer[:-1]\n self._http = yield httpclientservice.HTTPClientService.getService(\n self.master, self.vaultServer, headers={'X-Vault-Token': self.vaultToken})\n\n @defer.inlineCallbacks\n def get(self, entry):\n \"\"\"\n get the value from vault secret backend\n \"\"\"\n path = self.secretsmount + '/' + entry\n proj = yield self._http.get('/v1/{0}'.format(path))\n code = yield proj.code\n if code != 200:\n raise KeyError(\"The key %s does not exist in Vault provider: request\"\n \" return code:%d.\" % (entry, code))\n json = yield proj.json()\n defer.returnValue(json.get(u'data', {}).get('value'))\n", "path": "master/buildbot/secrets/providers/vault.py"}]} | 1,353 | 685 |
gh_patches_debug_19765 | rasdani/github-patches | git_diff | bokeh__bokeh-10170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `TileRenderer` ignores the `visible` property
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
Bokeh 2.1.0rc1
#### Description of expected behavior and the observed behavior
`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
from bokeh.io import show
from bokeh.plotting import figure
from bokeh.tile_providers import CARTODBPOSITRON, get_provider
p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(get_provider(CARTODBPOSITRON), visible=False)
show(p)
```
The root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?
</issue>
<code>
[start of examples/models/file/latex_extension.py]
1 """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
2 """
3 import numpy as np
4 from scipy.special import jv
5
6 from bokeh.models import Label
7 from bokeh.palettes import Spectral4
8 from bokeh.plotting import figure, output_file, show
9 from bokeh.util.compiler import TypeScript
10
11 output_file('latex_extension.html')
12
13 class LatexLabel(Label):
14 """A subclass of `Label` with all of the same class attributes except
15 canvas mode isn't supported and DOM manipulation happens in the TypeScript
16 superclass implementation that requires setting `render_mode='css'`).
17
18 Only the render method of LabelView is overwritten to perform the
19 text -> latex (via katex) conversion
20 """
21 __javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js"]
22 __css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css"]
23 __implementation__ = TypeScript("""
24 import {Label, LabelView} from "models/annotations/label"
25
26 declare namespace katex {
27 function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void
28 }
29
30 export class LatexLabelView extends LabelView {
31 model: LatexLabel
32
33 render(): void {
34 // Here because AngleSpec does units tranform and label doesn't support specs
35 let angle: number
36 switch (this.model.angle_units) {
37 case "rad": {
38 angle = -1 * this.model.angle
39 break
40 }
41 case "deg": {
42 angle = -1 * this.model.angle * Math.PI/180.0
43 break
44 }
45 default:
46 throw new Error("unreachable")
47 }
48
49 const panel = this.panel || this.plot_view.frame
50
51 const xscale = this.plot_view.frame.xscales[this.model.x_range_name]
52 const yscale = this.plot_view.frame.yscales[this.model.y_range_name]
53
54 const {x, y} = this.model
55 let sx = this.model.x_units == "data" ? xscale.compute(x) : panel.xview.compute(x)
56 let sy = this.model.y_units == "data" ? yscale.compute(y) : panel.yview.compute(y)
57
58 sx += this.model.x_offset
59 sy -= this.model.y_offset
60
61 this._css_text(this.layer.ctx, "", sx, sy, angle)
62 katex.render(this.model.text, this.el, {displayMode: true})
63 }
64 }
65
66 export class LatexLabel extends Label {
67 static init_LatexLabel(): void {
68 this.prototype.default_view = LatexLabelView
69 }
70 }
71 """)
72
73 p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350,
74 background_fill_color="#fafafa")
75 p.x_range.range_padding = 0
76
77 x = np.arange(0.0, 20.0, 0.02)
78
79 for i, n in enumerate([0, 1, 4, 7]):
80 p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label="𝜈=%d" % n)
81
82
83 text = (r"\text{Bessel Functions of the First Kind: }" +
84 r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" +
85 r"\left(\frac{x}{2}\right)^{2m+\nu}")
86 latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',
87 render_mode='css', text_font_size='11px',
88 background_fill_color="white", border_line_color="lightgrey")
89
90 p.add_layout(latex)
91
92 show(p)
93
[end of examples/models/file/latex_extension.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py
--- a/examples/models/file/latex_extension.py
+++ b/examples/models/file/latex_extension.py
@@ -30,7 +30,7 @@
export class LatexLabelView extends LabelView {
model: LatexLabel
- render(): void {
+ protected _render(): void {
// Here because AngleSpec does units tranform and label doesn't support specs
let angle: number
switch (this.model.angle_units) {
@@ -59,13 +59,17 @@
sy -= this.model.y_offset
this._css_text(this.layer.ctx, "", sx, sy, angle)
- katex.render(this.model.text, this.el, {displayMode: true})
+ katex.render(this.model.text, this.el!, {displayMode: true})
}
}
export class LatexLabel extends Label {
static init_LatexLabel(): void {
this.prototype.default_view = LatexLabelView
+
+ this.override({
+ render_mode: "css",
+ })
}
}
""")
| {"golden_diff": "diff --git a/examples/models/file/latex_extension.py b/examples/models/file/latex_extension.py\n--- a/examples/models/file/latex_extension.py\n+++ b/examples/models/file/latex_extension.py\n@@ -30,7 +30,7 @@\n export class LatexLabelView extends LabelView {\n model: LatexLabel\n \n- render(): void {\n+ protected _render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n@@ -59,13 +59,17 @@\n sy -= this.model.y_offset\n \n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n- katex.render(this.model.text, this.el, {displayMode: true})\n+ katex.render(this.model.text, this.el!, {displayMode: true})\n }\n }\n \n export class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n+\n+ this.override({\n+ render_mode: \"css\",\n+ })\n }\n }\n \"\"\")\n", "issue": "[BUG] `TileRenderer` ignores the `visible` property\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nBokeh 2.1.0rc1\r\n\r\n#### Description of expected behavior and the observed behavior\r\n`TileRenderer` should take into account the `visible` property. Both if passed to its constructor and if changed in runtime if using Bokeh server.\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n```python\r\nfrom bokeh.io import show\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.tile_providers import CARTODBPOSITRON, get_provider\r\n\r\np = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),\r\n x_axis_type=\"mercator\", y_axis_type=\"mercator\")\r\n\r\np.add_tile(get_provider(CARTODBPOSITRON), visible=False)\r\n\r\nshow(p)\r\n```\r\nThe root cause is that `TileRenderer` just doesn't check `visible` at all. It seems like every renderer checks this property. Maybe it should be checked at a higher level?\n", "before_files": [{"content": "\"\"\" The LaTex example was derived from: http://matplotlib.org/users/usetex.html\n\"\"\"\nimport numpy as np\nfrom scipy.special import jv\n\nfrom bokeh.models import Label\nfrom bokeh.palettes import Spectral4\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.util.compiler import TypeScript\n\noutput_file('latex_extension.html')\n\nclass LatexLabel(Label):\n \"\"\"A subclass of `Label` with all of the same class attributes except\n canvas mode isn't supported and DOM manipulation happens in the TypeScript\n superclass implementation that requires setting `render_mode='css'`).\n\n Only the render method of LabelView is overwritten to perform the\n text -> latex (via katex) conversion\n \"\"\"\n __javascript__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.js\"]\n __css__ = [\"https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.10.0/katex.min.css\"]\n __implementation__ = TypeScript(\"\"\"\nimport {Label, LabelView} from \"models/annotations/label\"\n\ndeclare namespace katex {\n function render(expression: string, element: HTMLElement, options: {displayMode?: boolean}): void\n}\n\nexport class LatexLabelView extends LabelView {\n model: LatexLabel\n\n render(): void {\n // Here because AngleSpec does units tranform and label doesn't support specs\n let angle: number\n switch (this.model.angle_units) {\n case \"rad\": {\n angle = -1 * this.model.angle\n break\n }\n case \"deg\": {\n angle = -1 * this.model.angle * Math.PI/180.0\n break\n }\n default:\n throw new Error(\"unreachable\")\n }\n\n const panel = this.panel || this.plot_view.frame\n\n const xscale = this.plot_view.frame.xscales[this.model.x_range_name]\n const yscale = this.plot_view.frame.yscales[this.model.y_range_name]\n\n const {x, y} = this.model\n let sx = this.model.x_units == \"data\" ? xscale.compute(x) : panel.xview.compute(x)\n let sy = this.model.y_units == \"data\" ? yscale.compute(y) : panel.yview.compute(y)\n\n sx += this.model.x_offset\n sy -= this.model.y_offset\n\n this._css_text(this.layer.ctx, \"\", sx, sy, angle)\n katex.render(this.model.text, this.el, {displayMode: true})\n }\n}\n\nexport class LatexLabel extends Label {\n static init_LatexLabel(): void {\n this.prototype.default_view = LatexLabelView\n }\n}\n\"\"\")\n\np = figure(title=\"LaTex Extension Demonstration\", plot_width=800, plot_height=350,\n background_fill_color=\"#fafafa\")\np.x_range.range_padding = 0\n\nx = np.arange(0.0, 20.0, 0.02)\n\nfor i, n in enumerate([0, 1, 4, 7]):\n p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend_label=\"\ud835\udf08=%d\" % n)\n\n\ntext = (r\"\\text{Bessel Functions of the First Kind: }\" +\n r\"J_\\nu = \\sum_{m=0}^{\\infty}\\frac{(-1)^m}{m!\\ \\Gamma(m+\\nu+1)}\" +\n r\"\\left(\\frac{x}{2}\\right)^{2m+\\nu}\")\nlatex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',\n render_mode='css', text_font_size='11px',\n background_fill_color=\"white\", border_line_color=\"lightgrey\")\n\np.add_layout(latex)\n\nshow(p)\n", "path": "examples/models/file/latex_extension.py"}]} | 1,823 | 245 |
gh_patches_debug_30984 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-1901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Directories of exploded archives not recognized as cluster
Directories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.
</issue>
<code>
[start of insights/core/hydration.py]
1 import logging
2 import os
3 from itertools import product
4
5 from insights.core import archives
6 from insights.core.context import (ClusterArchiveContext,
7 JDRContext,
8 HostArchiveContext,
9 SosArchiveContext,
10 SerializedArchiveContext)
11
12 log = logging.getLogger(__name__)
13
14
15 def get_all_files(path):
16 all_files = []
17 for f in archives.get_all_files(path):
18 if os.path.isfile(f) and not os.path.islink(f):
19 all_files.append(f)
20 return all_files
21
22
23 def identify(files):
24 markers = {"insights_archive.txt": SerializedArchiveContext,
25 "insights_commands": HostArchiveContext,
26 "sos_commands": SosArchiveContext,
27 "JBOSS_HOME": JDRContext}
28
29 for f, m in product(files, markers):
30 if m in f:
31 i = f.find(m)
32 common_path = os.path.dirname(f[:i])
33 ctx = markers[m]
34 return common_path, ctx
35
36 common_path = os.path.dirname(os.path.commonprefix(files))
37 if not common_path:
38 raise archives.InvalidArchive("Unable to determine common path")
39
40 if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):
41 return common_path, ClusterArchiveContext
42
43 return common_path, HostArchiveContext
44
45
46 def create_context(path, context=None):
47 all_files = get_all_files(path)
48 if not all_files:
49 raise archives.InvalidArchive("No files in archive")
50
51 common_path, ctx = identify(all_files)
52 context = context or ctx
53 return context(common_path, all_files=all_files)
54
[end of insights/core/hydration.py]
[start of insights/core/cluster.py]
1 #!/usr/bin/env python
2 import itertools
3 import pandas as pd
4 from collections import defaultdict
5
6 from ansible.parsing.dataloader import DataLoader
7 from ansible.inventory.manager import InventoryManager
8
9 from insights.core import dr, plugins
10 from insights.core.archives import extract
11 from insights.core.hydration import create_context
12 from insights.specs import Specs
13
14
15 ID_GENERATOR = itertools.count()
16
17
18 class ClusterMeta(dict):
19 def __init__(self, num_members, kwargs):
20 self.num_members = num_members
21 self.update(**kwargs)
22
23
24 @plugins.combiner(optional=[Specs.machine_id, Specs.hostname])
25 def machine_id(mid, hn):
26 ds = mid or hn
27 if ds:
28 return ds.content[0].strip()
29 return str(next(ID_GENERATOR))
30
31
32 def parse_inventory(path):
33 inventory = InventoryManager(loader=DataLoader(), sources=path)
34 return inventory.get_groups_dict()
35
36
37 def attach_machine_id(result, mid):
38 key = "machine_id"
39 if isinstance(result, list):
40 for r in result:
41 r[key] = mid
42 else:
43 result[key] = mid
44 return result
45
46
47 def process_archives(archives):
48 for archive in archives:
49 with extract(archive) as ex:
50 ctx = create_context(ex.tmp_dir)
51 broker = dr.Broker()
52 broker[ctx.__class__] = ctx
53 yield dr.run(broker=broker)
54
55
56 def extract_facts(brokers):
57 results = defaultdict(list)
58 for b in brokers:
59 mid = b[machine_id]
60 for k, v in b.get_by_type(plugins.fact).items():
61 r = attach_machine_id(v, mid)
62 if isinstance(r, list):
63 results[k].extend(r)
64 else:
65 results[k].append(r)
66 return results
67
68
69 def process_facts(facts, meta, broker):
70 broker[ClusterMeta] = meta
71 for k, v in facts.items():
72 broker[k] = pd.DataFrame(v)
73 return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)
74
75
76 def process_cluster(archives, broker, inventory=None):
77 inventory = parse_inventory(inventory) if inventory else {}
78
79 brokers = process_archives(archives)
80 facts = extract_facts(brokers)
81 meta = ClusterMeta(len(archives), inventory)
82
83 return process_facts(facts, meta, broker)
84
[end of insights/core/cluster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/core/cluster.py b/insights/core/cluster.py
--- a/insights/core/cluster.py
+++ b/insights/core/cluster.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python
import itertools
-import pandas as pd
+import os
from collections import defaultdict
+import pandas as pd
+
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager
@@ -46,8 +48,14 @@
def process_archives(archives):
for archive in archives:
- with extract(archive) as ex:
- ctx = create_context(ex.tmp_dir)
+ if os.path.isfile(archive):
+ with extract(archive) as ex:
+ ctx = create_context(ex.tmp_dir)
+ broker = dr.Broker()
+ broker[ctx.__class__] = ctx
+ yield dr.run(broker=broker)
+ else:
+ ctx = create_context(archive)
broker = dr.Broker()
broker[ctx.__class__] = ctx
yield dr.run(broker=broker)
diff --git a/insights/core/hydration.py b/insights/core/hydration.py
--- a/insights/core/hydration.py
+++ b/insights/core/hydration.py
@@ -37,13 +37,15 @@
if not common_path:
raise archives.InvalidArchive("Unable to determine common path")
- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):
- return common_path, ClusterArchiveContext
-
return common_path, HostArchiveContext
def create_context(path, context=None):
+ top = os.listdir(path)
+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]
+ if arc:
+ return ClusterArchiveContext(path, all_files=arc)
+
all_files = get_all_files(path)
if not all_files:
raise archives.InvalidArchive("No files in archive")
| {"golden_diff": "diff --git a/insights/core/cluster.py b/insights/core/cluster.py\n--- a/insights/core/cluster.py\n+++ b/insights/core/cluster.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n import itertools\n-import pandas as pd\n+import os\n from collections import defaultdict\n \n+import pandas as pd\n+\n from ansible.parsing.dataloader import DataLoader\n from ansible.inventory.manager import InventoryManager\n \n@@ -46,8 +48,14 @@\n \n def process_archives(archives):\n for archive in archives:\n- with extract(archive) as ex:\n- ctx = create_context(ex.tmp_dir)\n+ if os.path.isfile(archive):\n+ with extract(archive) as ex:\n+ ctx = create_context(ex.tmp_dir)\n+ broker = dr.Broker()\n+ broker[ctx.__class__] = ctx\n+ yield dr.run(broker=broker)\n+ else:\n+ ctx = create_context(archive)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\ndiff --git a/insights/core/hydration.py b/insights/core/hydration.py\n--- a/insights/core/hydration.py\n+++ b/insights/core/hydration.py\n@@ -37,13 +37,15 @@\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n \n- if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n- return common_path, ClusterArchiveContext\n-\n return common_path, HostArchiveContext\n \n \n def create_context(path, context=None):\n+ top = os.listdir(path)\n+ arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)]\n+ if arc:\n+ return ClusterArchiveContext(path, all_files=arc)\n+\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n", "issue": "Directories of exploded archives not recognized as cluster\nDirectories of exploded archives aren't recognized as a cluster but are erroneously identified as whatever context matches the first marker file we encounter after recursively enumerating every file in all subdirectories.\n", "before_files": [{"content": "import logging\nimport os\nfrom itertools import product\n\nfrom insights.core import archives\nfrom insights.core.context import (ClusterArchiveContext,\n JDRContext,\n HostArchiveContext,\n SosArchiveContext,\n SerializedArchiveContext)\n\nlog = logging.getLogger(__name__)\n\n\ndef get_all_files(path):\n all_files = []\n for f in archives.get_all_files(path):\n if os.path.isfile(f) and not os.path.islink(f):\n all_files.append(f)\n return all_files\n\n\ndef identify(files):\n markers = {\"insights_archive.txt\": SerializedArchiveContext,\n \"insights_commands\": HostArchiveContext,\n \"sos_commands\": SosArchiveContext,\n \"JBOSS_HOME\": JDRContext}\n\n for f, m in product(files, markers):\n if m in f:\n i = f.find(m)\n common_path = os.path.dirname(f[:i])\n ctx = markers[m]\n return common_path, ctx\n\n common_path = os.path.dirname(os.path.commonprefix(files))\n if not common_path:\n raise archives.InvalidArchive(\"Unable to determine common path\")\n\n if any(f.endswith(archives.COMPRESSION_TYPES) for f in os.listdir(common_path)):\n return common_path, ClusterArchiveContext\n\n return common_path, HostArchiveContext\n\n\ndef create_context(path, context=None):\n all_files = get_all_files(path)\n if not all_files:\n raise archives.InvalidArchive(\"No files in archive\")\n\n common_path, ctx = identify(all_files)\n context = context or ctx\n return context(common_path, all_files=all_files)\n", "path": "insights/core/hydration.py"}, {"content": "#!/usr/bin/env python\nimport itertools\nimport pandas as pd\nfrom collections import defaultdict\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nfrom insights.core import dr, plugins\nfrom insights.core.archives import extract\nfrom insights.core.hydration import create_context\nfrom insights.specs import Specs\n\n\nID_GENERATOR = itertools.count()\n\n\nclass ClusterMeta(dict):\n def __init__(self, num_members, kwargs):\n self.num_members = num_members\n self.update(**kwargs)\n\n\[email protected](optional=[Specs.machine_id, Specs.hostname])\ndef machine_id(mid, hn):\n ds = mid or hn\n if ds:\n return ds.content[0].strip()\n return str(next(ID_GENERATOR))\n\n\ndef parse_inventory(path):\n inventory = InventoryManager(loader=DataLoader(), sources=path)\n return inventory.get_groups_dict()\n\n\ndef attach_machine_id(result, mid):\n key = \"machine_id\"\n if isinstance(result, list):\n for r in result:\n r[key] = mid\n else:\n result[key] = mid\n return result\n\n\ndef process_archives(archives):\n for archive in archives:\n with extract(archive) as ex:\n ctx = create_context(ex.tmp_dir)\n broker = dr.Broker()\n broker[ctx.__class__] = ctx\n yield dr.run(broker=broker)\n\n\ndef extract_facts(brokers):\n results = defaultdict(list)\n for b in brokers:\n mid = b[machine_id]\n for k, v in b.get_by_type(plugins.fact).items():\n r = attach_machine_id(v, mid)\n if isinstance(r, list):\n results[k].extend(r)\n else:\n results[k].append(r)\n return results\n\n\ndef process_facts(facts, meta, broker):\n broker[ClusterMeta] = meta\n for k, v in facts.items():\n broker[k] = pd.DataFrame(v)\n return dr.run(dr.COMPONENTS[dr.GROUPS.cluster], broker=broker)\n\n\ndef process_cluster(archives, broker, inventory=None):\n inventory = parse_inventory(inventory) if inventory else {}\n\n brokers = process_archives(archives)\n facts = extract_facts(brokers)\n meta = ClusterMeta(len(archives), inventory)\n\n return process_facts(facts, meta, broker)\n", "path": "insights/core/cluster.py"}]} | 1,717 | 450 |
gh_patches_debug_20507 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5199 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update Ansible due to CVE-2019-14864
## Description
[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.
@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development).
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/ansible_version_check.py]
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, \
3 unicode_literals
4
5 import sys
6
7 import ansible
8
9 try:
10 # Version 2.0+
11 from ansible.plugins.callback import CallbackBase
12 except ImportError:
13 CallbackBase = object
14
15
16 def print_red_bold(text):
17 print('\x1b[31;1m' + text + '\x1b[0m')
18
19
20 class CallbackModule(CallbackBase):
21 def __init__(self):
22 # Can't use `on_X` because this isn't forwards compatible
23 # with Ansible 2.0+
24 required_version = '2.7.13' # Keep synchronized with requirements files
25 if not ansible.__version__.startswith(required_version):
26 print_red_bold(
27 "SecureDrop restriction: only Ansible {version}.*"
28 "is supported."
29 .format(version=required_version)
30 )
31 sys.exit(1)
32
[end of install_files/ansible-base/callback_plugins/ansible_version_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -19,13 +19,18 @@
class CallbackModule(CallbackBase):
def __init__(self):
- # Can't use `on_X` because this isn't forwards compatible
- # with Ansible 2.0+
- required_version = '2.7.13' # Keep synchronized with requirements files
- if not ansible.__version__.startswith(required_version):
+ # The acceptable version range needs to be synchronized with
+ # requirements files.
+ viable_start = [2, 9, 7]
+ viable_end = [2, 10, 0]
+ ansible_version = [int(v) for v in ansible.__version__.split('.')]
+ if not (viable_start <= ansible_version < viable_end):
print_red_bold(
- "SecureDrop restriction: only Ansible {version}.*"
- "is supported."
- .format(version=required_version)
+ "SecureDrop restriction: Ansible version must be at least {viable_start} "
+ "and less than {viable_end}."
+ .format(
+ viable_start='.'.join(str(v) for v in viable_start),
+ viable_end='.'.join(str(v) for v in viable_end),
+ )
)
sys.exit(1)
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -19,13 +19,18 @@\n \n class CallbackModule(CallbackBase):\n def __init__(self):\n- # Can't use `on_X` because this isn't forwards compatible\n- # with Ansible 2.0+\n- required_version = '2.7.13' # Keep synchronized with requirements files\n- if not ansible.__version__.startswith(required_version):\n+ # The acceptable version range needs to be synchronized with\n+ # requirements files.\n+ viable_start = [2, 9, 7]\n+ viable_end = [2, 10, 0]\n+ ansible_version = [int(v) for v in ansible.__version__.split('.')]\n+ if not (viable_start <= ansible_version < viable_end):\n print_red_bold(\n- \"SecureDrop restriction: only Ansible {version}.*\"\n- \"is supported.\"\n- .format(version=required_version)\n+ \"SecureDrop restriction: Ansible version must be at least {viable_start} \"\n+ \"and less than {viable_end}.\"\n+ .format(\n+ viable_start='.'.join(str(v) for v in viable_start),\n+ viable_end='.'.join(str(v) for v in viable_end),\n+ )\n )\n sys.exit(1)\n", "issue": "update Ansible due to CVE-2019-14864 \n## Description\r\n\r\n[CVE-2019-14864](https://nvd.nist.gov/vuln/detail/CVE-2019-14864) is a vulnerability in Ansible's `no_log` flag for the splunk and sumologic plugins (sensitive data is incorrectly logged) but neither of which we're using. Regardless, we should update Ansible to a version that does not have this vulnerability in the next release.\r\n\r\n@emkll also pointed out to me that this is a [good time](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html) to get onto the 2.8 series since the 2.7 series will become unmaintained when 2.10 is released (it's in development). \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.7.13' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]} | 1,000 | 348 |
gh_patches_debug_22997 | rasdani/github-patches | git_diff | liqd__a4-opin-605 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong user in project
When I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project
</issue>
<code>
[start of euth/memberships/views.py]
1 from django.http import Http404
2 from django.shortcuts import redirect
3 from django.views import generic
4 from rules.compat import access_mixins as mixin
5
6 from adhocracy4.projects import models as prj_models
7 from adhocracy4.projects import views as prj_views
8
9 from . import forms, models
10
11
12 class RequestsProjectDetailView(prj_views.ProjectDetailView):
13
14 def handle_no_permission(self):
15 """
16 Check if user clould join
17 """
18 user = self.request.user
19 is_member = user.is_authenticated() and self.project.has_member(user)
20
21 if is_member:
22 return super().handle_no_permission()
23 else:
24 return self.handle_no_membership()
25
26 def handle_no_membership(self):
27 membership_impossible = (
28 not self.request.user.is_authenticated()
29 or self.project.is_draft
30 or self.project.has_member(self.request.user)
31 )
32
33 if membership_impossible:
34 return super().handle_no_permission()
35 else:
36 return redirect('memberships-request',
37 project_slug=self.project.slug)
38
39
40 class InviteView(mixin.LoginRequiredMixin, generic.UpdateView):
41 model = models.Invite
42 form_class = forms.InviteForm
43 slug_field = 'token'
44 slug_url_kwarg = 'invite_token'
45
46 def form_valid(self, form):
47 if form.is_accepted():
48 form.instance.accept(self.request.user)
49 return redirect(form.instance.project.get_absolute_url())
50 else:
51 form.instance.reject()
52 return redirect('/')
53
54
55 class RequestView(mixin.LoginRequiredMixin, generic.DetailView):
56 """
57 Displays membership request if it exists or allows to create one.
58 """
59 model = models.Request
60 slug_field = 'project__slug'
61 slug_url_kwarg = 'project_slug'
62 context_object_name = 'join_request'
63
64 def get_queryset(self):
65 return self.model.objects.filter(creator=self.request.user)
66
67 def get(self, request, *args, **kwargs):
68 if self.project.has_member(request.user):
69 return redirect(self.project.get_absolute_url())
70 else:
71 return super().get(request, *args, **kwargs)
72
73 def post(self, request, *args, **kwargs):
74 user = request.user
75 project = self.project
76 models.Request.objects.request_membership(project, user)
77 return redirect(self.request.path)
78
79 def get_object(self, queryset=None):
80 try:
81 return super().get_object(queryset)
82 except Http404:
83 return None
84
85 @property
86 def project(self):
87 project_slug = self.kwargs[self.slug_url_kwarg]
88 return prj_models.Project.objects.get(slug=project_slug)
89
[end of euth/memberships/views.py]
[start of euth/memberships/forms.py]
1 from django import forms
2 from django.core.exceptions import ValidationError
3
4 from . import models
5
6
7 class InviteForm(forms.ModelForm):
8 accept = forms.CharField(required=False)
9 reject = forms.CharField(required=False)
10
11 class Meta:
12 model = models.Invite
13 fields = ['accept', 'reject']
14
15 def clean(self):
16 data = self.data
17 if 'accept' not in data and 'reject' not in data:
18 raise ValidationError('Reject or accept')
19 return data
20
21 def is_accepted(self):
22 data = self.data
23 return 'accept' in data and 'reject' not in data
24
[end of euth/memberships/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py
--- a/euth/memberships/forms.py
+++ b/euth/memberships/forms.py
@@ -12,10 +12,17 @@
model = models.Invite
fields = ['accept', 'reject']
+ def __init__(self, user=None, **kwargs):
+ super().__init__(**kwargs)
+ self.user = user
+
def clean(self):
data = self.data
if 'accept' not in data and 'reject' not in data:
raise ValidationError('Reject or accept')
+ if 'accept' in data and not self.user.email == self.instance.email:
+ raise ValidationError('This user has another email address than '
+ 'the one that received the invitation.')
return data
def is_accepted(self):
diff --git a/euth/memberships/views.py b/euth/memberships/views.py
--- a/euth/memberships/views.py
+++ b/euth/memberships/views.py
@@ -43,6 +43,11 @@
slug_field = 'token'
slug_url_kwarg = 'invite_token'
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs.update({'user': self.request.user})
+ return kwargs
+
def form_valid(self, form):
if form.is_accepted():
form.instance.accept(self.request.user)
| {"golden_diff": "diff --git a/euth/memberships/forms.py b/euth/memberships/forms.py\n--- a/euth/memberships/forms.py\n+++ b/euth/memberships/forms.py\n@@ -12,10 +12,17 @@\n model = models.Invite\n fields = ['accept', 'reject']\n \n+ def __init__(self, user=None, **kwargs):\n+ super().__init__(**kwargs)\n+ self.user = user\n+\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n+ if 'accept' in data and not self.user.email == self.instance.email:\n+ raise ValidationError('This user has another email address than '\n+ 'the one that received the invitation.')\n return data\n \n def is_accepted(self):\ndiff --git a/euth/memberships/views.py b/euth/memberships/views.py\n--- a/euth/memberships/views.py\n+++ b/euth/memberships/views.py\n@@ -43,6 +43,11 @@\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs.update({'user': self.request.user})\n+ return kwargs\n+\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n", "issue": "Wrong user in project\nWhen I get an invitation to a project via email and I am logged in with a different user and click one the link in the email, the second user is added to the project\n", "before_files": [{"content": "from django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom rules.compat import access_mixins as mixin\n\nfrom adhocracy4.projects import models as prj_models\nfrom adhocracy4.projects import views as prj_views\n\nfrom . import forms, models\n\n\nclass RequestsProjectDetailView(prj_views.ProjectDetailView):\n\n def handle_no_permission(self):\n \"\"\"\n Check if user clould join\n \"\"\"\n user = self.request.user\n is_member = user.is_authenticated() and self.project.has_member(user)\n\n if is_member:\n return super().handle_no_permission()\n else:\n return self.handle_no_membership()\n\n def handle_no_membership(self):\n membership_impossible = (\n not self.request.user.is_authenticated()\n or self.project.is_draft\n or self.project.has_member(self.request.user)\n )\n\n if membership_impossible:\n return super().handle_no_permission()\n else:\n return redirect('memberships-request',\n project_slug=self.project.slug)\n\n\nclass InviteView(mixin.LoginRequiredMixin, generic.UpdateView):\n model = models.Invite\n form_class = forms.InviteForm\n slug_field = 'token'\n slug_url_kwarg = 'invite_token'\n\n def form_valid(self, form):\n if form.is_accepted():\n form.instance.accept(self.request.user)\n return redirect(form.instance.project.get_absolute_url())\n else:\n form.instance.reject()\n return redirect('/')\n\n\nclass RequestView(mixin.LoginRequiredMixin, generic.DetailView):\n \"\"\"\n Displays membership request if it exists or allows to create one.\n \"\"\"\n model = models.Request\n slug_field = 'project__slug'\n slug_url_kwarg = 'project_slug'\n context_object_name = 'join_request'\n\n def get_queryset(self):\n return self.model.objects.filter(creator=self.request.user)\n\n def get(self, request, *args, **kwargs):\n if self.project.has_member(request.user):\n return redirect(self.project.get_absolute_url())\n else:\n return super().get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n user = request.user\n project = self.project\n models.Request.objects.request_membership(project, user)\n return redirect(self.request.path)\n\n def get_object(self, queryset=None):\n try:\n return super().get_object(queryset)\n except Http404:\n return None\n\n @property\n def project(self):\n project_slug = self.kwargs[self.slug_url_kwarg]\n return prj_models.Project.objects.get(slug=project_slug)\n", "path": "euth/memberships/views.py"}, {"content": "from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom . import models\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n class Meta:\n model = models.Invite\n fields = ['accept', 'reject']\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n", "path": "euth/memberships/forms.py"}]} | 1,491 | 317 |
gh_patches_debug_10304 | rasdani/github-patches | git_diff | google__openhtf-393 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nicer failure mode for misuse of StoreInModule.
See #389.
> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?
> @grybmadsci
Document util/argv.py
There are no docs on how to use this module.
</issue>
<code>
[start of openhtf/util/argv.py]
1 """Utilities for handling command line arguments.
2
3 StoreInModule:
4 Enables emulating a gflags-esque API (flag affects global value), but one
5 doesn't necessarily need to use flags to set values.
6
7 Example usage:
8 DEFAULT_VALUE = 0
9 ARG_PARSER = argv.ModuleParser()
10 ARG_PARSER.add_argument(
11 '--override-value', action=argv.StoreInModule,
12 default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)
13
14 Then in an entry point (main() function), use that parser as a parent:
15 parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])
16 parser.parse_args()
17 """
18
19 import argparse
20
21
22 def ModuleParser():
23 return argparse.ArgumentParser(add_help=False)
24
25
26 class StoreInModule(argparse.Action):
27
28 def __init__(self, *args, **kwargs):
29 self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)
30 proxy_cls = kwargs.pop('proxy', None)
31 if proxy_cls is not None:
32 self._proxy = proxy_cls(*args, **kwargs)
33 super(StoreInModule, self).__init__(*args, **kwargs)
34
35 def __call__(self, parser, namespace, values, option_string=None):
36 if hasattr(self, '_proxy'):
37 values = self._proxy(parser, namespace, values)
38 base, mod = self._tgt_mod.rsplit('.', 1)
39 module = getattr(__import__(base, fromlist=[mod]), mod)
40 setattr(module, self._tgt_attr, values)
41
42
[end of openhtf/util/argv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py
--- a/openhtf/util/argv.py
+++ b/openhtf/util/argv.py
@@ -35,7 +35,10 @@
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(self, '_proxy'):
values = self._proxy(parser, namespace, values)
- base, mod = self._tgt_mod.rsplit('.', 1)
- module = getattr(__import__(base, fromlist=[mod]), mod)
+ if '.' in self._tgt_mod:
+ base, mod = self._tgt_mod.rsplit('.', 1)
+ module = getattr(__import__(base, fromlist=[mod]), mod)
+ else:
+ module = __import__(self._tgt_mod)
setattr(module, self._tgt_attr, values)
| {"golden_diff": "diff --git a/openhtf/util/argv.py b/openhtf/util/argv.py\n--- a/openhtf/util/argv.py\n+++ b/openhtf/util/argv.py\n@@ -35,7 +35,10 @@\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n- base, mod = self._tgt_mod.rsplit('.', 1)\n- module = getattr(__import__(base, fromlist=[mod]), mod)\n+ if '.' in self._tgt_mod:\n+ base, mod = self._tgt_mod.rsplit('.', 1)\n+ module = getattr(__import__(base, fromlist=[mod]), mod)\n+ else:\n+ module = __import__(self._tgt_mod)\n setattr(module, self._tgt_attr, values)\n", "issue": "Nicer failure mode for misuse of StoreInModule.\nSee #389.\n\n> ...it's nonsensical to have no dots, but something one might accidentally do if you forget to do the %s/name business, maybe throw in a quick format check on the inputs of add_argument() to fail-fast rather than raise an obscure error here?\n> @grybmadsci\n\nDocument util/argv.py\nThere are no docs on how to use this module.\n\n", "before_files": [{"content": "\"\"\"Utilities for handling command line arguments.\n\nStoreInModule:\n Enables emulating a gflags-esque API (flag affects global value), but one\n doesn't necessarily need to use flags to set values.\n \n Example usage:\n DEFAULT_VALUE = 0\n ARG_PARSER = argv.ModuleParser()\n ARG_PARSER.add_argument(\n '--override-value', action=argv.StoreInModule,\n default=DEFAULT_VALUE, target='%s.DEFAULT_VALUE' % __name__)\n\n Then in an entry point (main() function), use that parser as a parent:\n parser = argparse.ArgumentParser(parents=[other_module.ARG_PARSER])\n parser.parse_args()\n\"\"\"\n\nimport argparse\n\n\ndef ModuleParser():\n return argparse.ArgumentParser(add_help=False)\n\n\nclass StoreInModule(argparse.Action):\n\n def __init__(self, *args, **kwargs):\n self._tgt_mod, self._tgt_attr = kwargs.pop('target').rsplit('.', 1)\n proxy_cls = kwargs.pop('proxy', None)\n if proxy_cls is not None:\n self._proxy = proxy_cls(*args, **kwargs)\n super(StoreInModule, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n if hasattr(self, '_proxy'):\n values = self._proxy(parser, namespace, values)\n base, mod = self._tgt_mod.rsplit('.', 1)\n module = getattr(__import__(base, fromlist=[mod]), mod)\n setattr(module, self._tgt_attr, values)\n\n", "path": "openhtf/util/argv.py"}]} | 1,040 | 194 |
gh_patches_debug_39557 | rasdani/github-patches | git_diff | geopandas__geopandas-1093 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a show_versions function
Similarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples.
It could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.
</issue>
<code>
[start of geopandas/tools/_show_versions.py]
1 import platform
2 import sys
3 import importlib
4
5
6 def _get_sys_info():
7 """System information
8
9 Returns
10 -------
11 sys_info : dict
12 system and Python version information
13 """
14 python = sys.version.replace('\n', ' ')
15
16 blob = [
17 ("python", python),
18 ('executable', sys.executable),
19 ("machine", platform.platform()),
20 ]
21
22 return dict(blob)
23
24
25 def _get_deps_info():
26 """Overview of the installed version of main dependencies
27
28 Returns
29 -------
30 deps_info: dict
31 version information on relevant Python libraries
32 """
33 deps = [
34 "geopandas",
35 "pandas",
36 "fiona",
37 "osgeo.gdal",
38 "numpy",
39 "shapely",
40 "rtree",
41 "pyproj",
42 "matplotlib",
43 "mapclassify",
44 "pysal",
45 "geopy",
46 "psycopg2",
47 "descartes"
48 ]
49
50 def get_version(module):
51 return module.__version__
52
53 deps_info = {}
54
55 for modname in deps:
56 try:
57 if modname in sys.modules:
58 mod = sys.modules[modname]
59 else:
60 mod = importlib.import_module(modname)
61 ver = get_version(mod)
62 deps_info[modname] = ver
63 except ImportError:
64 deps_info[modname] = None
65 except AttributeError:
66 deps_info[modname] = None
67
68 return deps_info
69
70
71 def show_versions():
72 """
73 Print system information and installed module versions.
74
75 Example
76 -------
77 > python -c "import geopandas; geopandas.show_versions()"
78 """
79 sys_info = _get_sys_info()
80 deps_info = _get_deps_info()
81
82 maxlen = max(len(x) for x in deps_info)
83 tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
84 print("\nSYSTEM INFO")
85 print("-----------")
86 for k, stat in sys_info.items():
87 print(tpl.format(k=k, stat=stat))
88 print("\nPYTHON DEPENDENCIES")
89 print("-------------------")
90 for k, stat in deps_info.items():
91 print(tpl.format(k=k, stat=stat))
92
[end of geopandas/tools/_show_versions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py
--- a/geopandas/tools/_show_versions.py
+++ b/geopandas/tools/_show_versions.py
@@ -22,6 +22,53 @@
return dict(blob)
+def _get_C_info():
+ """Information on system PROJ, GDAL, GEOS
+ Returns
+ -------
+ c_info: dict
+ system PROJ information
+ """
+ try:
+ import pyproj
+ from pyproj.exceptions import DataDirError
+ proj = pyproj.proj_version_str
+ try:
+ proj_dir = pyproj.datadir.get_data_dir()
+ except DataDirError:
+ proj_dir = None
+ except Exception:
+ proj = None
+ proj_dir = None
+
+ try:
+ import shapely._buildcfg
+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)
+ geos_dir = shapely._buildcfg.geos_library_path
+ except Exception:
+ geos = None
+ geos_dir = None
+
+ try:
+ import fiona
+ gdal = fiona.env.get_gdal_release_name()
+ gdal_dir = fiona.env.GDALDataFinder().search()
+ except Exception:
+ gdal = None
+ gdal_dir = None
+
+ blob = [
+ ("GEOS", geos),
+ ("GEOS lib", geos_dir),
+ ("GDAL", gdal),
+ ("GDAL dir", gdal_dir),
+ ("PROJ", proj),
+ ("PROJ data dir", proj_dir)
+ ]
+
+ return dict(blob)
+
+
def _get_deps_info():
"""Overview of the installed version of main dependencies
@@ -34,7 +81,6 @@
"geopandas",
"pandas",
"fiona",
- "osgeo.gdal",
"numpy",
"shapely",
"rtree",
@@ -43,8 +89,7 @@
"mapclassify",
"pysal",
"geopy",
- "psycopg2",
- "descartes"
+ "psycopg2"
]
def get_version(module):
@@ -60,9 +105,7 @@
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
- except ImportError:
- deps_info[modname] = None
- except AttributeError:
+ except Exception:
deps_info[modname] = None
return deps_info
@@ -78,6 +121,7 @@
"""
sys_info = _get_sys_info()
deps_info = _get_deps_info()
+ proj_info = _get_C_info()
maxlen = max(len(x) for x in deps_info)
tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
@@ -85,6 +129,10 @@
print("-----------")
for k, stat in sys_info.items():
print(tpl.format(k=k, stat=stat))
+ print("\nGEOS, GDAL, PROJ INFO")
+ print("---------------------")
+ for k, stat in proj_info.items():
+ print(tpl.format(k=k, stat=stat))
print("\nPYTHON DEPENDENCIES")
print("-------------------")
for k, stat in deps_info.items():
| {"golden_diff": "diff --git a/geopandas/tools/_show_versions.py b/geopandas/tools/_show_versions.py\n--- a/geopandas/tools/_show_versions.py\n+++ b/geopandas/tools/_show_versions.py\n@@ -22,6 +22,53 @@\n return dict(blob)\n \n \n+def _get_C_info():\n+ \"\"\"Information on system PROJ, GDAL, GEOS\n+ Returns\n+ -------\n+ c_info: dict\n+ system PROJ information\n+ \"\"\"\n+ try:\n+ import pyproj\n+ from pyproj.exceptions import DataDirError\n+ proj = pyproj.proj_version_str\n+ try:\n+ proj_dir = pyproj.datadir.get_data_dir()\n+ except DataDirError:\n+ proj_dir = None\n+ except Exception:\n+ proj = None\n+ proj_dir = None\n+\n+ try:\n+ import shapely._buildcfg\n+ geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)\n+ geos_dir = shapely._buildcfg.geos_library_path\n+ except Exception:\n+ geos = None\n+ geos_dir = None\n+\n+ try:\n+ import fiona\n+ gdal = fiona.env.get_gdal_release_name()\n+ gdal_dir = fiona.env.GDALDataFinder().search()\n+ except Exception:\n+ gdal = None\n+ gdal_dir = None\n+\n+ blob = [\n+ (\"GEOS\", geos),\n+ (\"GEOS lib\", geos_dir),\n+ (\"GDAL\", gdal),\n+ (\"GDAL dir\", gdal_dir),\n+ (\"PROJ\", proj),\n+ (\"PROJ data dir\", proj_dir)\n+ ]\n+\n+ return dict(blob)\n+\n+\n def _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n \n@@ -34,7 +81,6 @@\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n- \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n@@ -43,8 +89,7 @@\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n- \"psycopg2\",\n- \"descartes\"\n+ \"psycopg2\"\n ]\n \n def get_version(module):\n@@ -60,9 +105,7 @@\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n- except ImportError:\n- deps_info[modname] = None\n- except AttributeError:\n+ except Exception:\n deps_info[modname] = None\n \n return deps_info\n@@ -78,6 +121,7 @@\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n+ proj_info = _get_C_info()\n \n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n@@ -85,6 +129,10 @@\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n+ print(\"\\nGEOS, GDAL, PROJ INFO\")\n+ print(\"---------------------\")\n+ for k, stat in proj_info.items():\n+ print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n", "issue": "Add a show_versions function\nSimilarly to `pandas.show_versions()` and `sklearn.show_versions()`, it would be nice to have a function like that for GeoPandas. We can probably base the code on those examples. \r\n\r\nIt could list the versions of the direct python dependencies, but also try to show the versions of the underlying GEOS / GDAL libraries.\n", "before_files": [{"content": "import platform\nimport sys\nimport importlib\n\n\ndef _get_sys_info():\n \"\"\"System information\n\n Returns\n -------\n sys_info : dict\n system and Python version information\n \"\"\"\n python = sys.version.replace('\\n', ' ')\n\n blob = [\n (\"python\", python),\n ('executable', sys.executable),\n (\"machine\", platform.platform()),\n ]\n\n return dict(blob)\n\n\ndef _get_deps_info():\n \"\"\"Overview of the installed version of main dependencies\n\n Returns\n -------\n deps_info: dict\n version information on relevant Python libraries\n \"\"\"\n deps = [\n \"geopandas\",\n \"pandas\",\n \"fiona\",\n \"osgeo.gdal\",\n \"numpy\",\n \"shapely\",\n \"rtree\",\n \"pyproj\",\n \"matplotlib\",\n \"mapclassify\",\n \"pysal\",\n \"geopy\",\n \"psycopg2\",\n \"descartes\"\n ]\n\n def get_version(module):\n return module.__version__\n\n deps_info = {}\n\n for modname in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = get_version(mod)\n deps_info[modname] = ver\n except ImportError:\n deps_info[modname] = None\n except AttributeError:\n deps_info[modname] = None\n\n return deps_info\n\n\ndef show_versions():\n \"\"\"\n Print system information and installed module versions.\n\n Example\n -------\n > python -c \"import geopandas; geopandas.show_versions()\"\n \"\"\"\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n maxlen = max(len(x) for x in deps_info)\n tpl = \"{{k:<{maxlen}}}: {{stat}}\".format(maxlen=maxlen)\n print(\"\\nSYSTEM INFO\")\n print(\"-----------\")\n for k, stat in sys_info.items():\n print(tpl.format(k=k, stat=stat))\n print(\"\\nPYTHON DEPENDENCIES\")\n print(\"-------------------\")\n for k, stat in deps_info.items():\n print(tpl.format(k=k, stat=stat))\n", "path": "geopandas/tools/_show_versions.py"}]} | 1,280 | 796 |
gh_patches_debug_26059 | rasdani/github-patches | git_diff | DDMAL__CantusDB-192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make a test case to test the permissions
we have implemented a lot of different restrictions to views. create a unit test to automate the testing process.
</issue>
<code>
[start of django/cantusdb_project/main_app/views/sequence.py]
1 from django.views.generic import DetailView, ListView, UpdateView
2 from main_app.models import Sequence
3 from django.db.models import Q
4 from main_app.forms import SequenceEditForm
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.contrib import messages
7 from django.contrib.auth.mixins import UserPassesTestMixin
8 from django.core.exceptions import PermissionDenied
9 from django.http import Http404
10
11
12
13 class SequenceDetailView(DetailView):
14 """
15 Displays a single Sequence object. Accessed with ``sequences/<int:pk>``
16 """
17
18 model = Sequence
19 context_object_name = "sequence"
20 template_name = "sequence_detail.html"
21
22 def get_context_data(self, **kwargs):
23
24 # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page
25 sequence = self.get_object()
26 source = sequence.source
27 if (source.public is False) and (not self.request.user.is_authenticated):
28 raise PermissionDenied()
29
30 context = super().get_context_data(**kwargs)
31 context["concordances"] = Sequence.objects.filter(
32 cantus_id=self.get_object().cantus_id
33 ).order_by("siglum")
34 return context
35
36
37 class SequenceListView(ListView):
38 """
39 Displays a list of Sequence objects. Accessed with ``sequences/``
40 """
41
42 model = Sequence
43 paginate_by = 100
44 context_object_name = "sequences"
45 template_name = "sequence_list.html"
46
47 def get_queryset(self):
48 queryset = super().get_queryset()
49 q_obj_filter = Q(source__visible=True)
50 q_obj_filter &= Q(source__public=True)
51
52 if self.request.GET.get("incipit"):
53 incipit = self.request.GET.get("incipit")
54 q_obj_filter &= Q(incipit__icontains=incipit)
55 if self.request.GET.get("siglum"):
56 siglum = self.request.GET.get("siglum")
57 q_obj_filter &= Q(siglum__icontains=siglum)
58 if self.request.GET.get("cantus_id"):
59 cantus_id = self.request.GET.get("cantus_id")
60 q_obj_filter &= Q(cantus_id__icontains=cantus_id)
61
62 return queryset.filter(q_obj_filter).order_by("siglum", "sequence")
63
64 class SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
65 template_name = "sequence_edit.html"
66 model = Sequence
67 form_class = SequenceEditForm
68 pk_url_kwarg = "sequence_id"
69
70 def form_valid(self, form):
71 messages.success(
72 self.request,
73 "Sequence updated successfully!",
74 )
75 return super().form_valid(form)
76
77 def test_func(self):
78 user = self.request.user
79 sequence_id = self.kwargs.get(self.pk_url_kwarg)
80 try:
81 sequence = Sequence.objects.get(id=sequence_id)
82 except:
83 raise Http404("This sequence does not exist")
84 # find the source of this sequence
85 source = sequence.source
86 # checks if the user is an editor or a proofreader,
87 # and if the user is given privilege to edit this source and thus, it's sequences
88 is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists()
89 can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)
90 # checks if the user is a project manager (they should have the privilege to edit any sequence)
91 is_project_manager = user.groups.filter(name="project manager").exists()
92 # checks if the user is a contributor,
93 # and if the user is the creator of this source
94 # (they should only have the privilege to edit sequences in a source they have created)
95 is_contributor = user.groups.filter(name="contributor").exists()
96
97 if ((is_editor_proofreader and can_edit_sequences_in_source)
98 or (is_project_manager)
99 or (is_contributor and source.created_by == user)):
100 return True
101 else:
102 return False
103
[end of django/cantusdb_project/main_app/views/sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py
--- a/django/cantusdb_project/main_app/views/sequence.py
+++ b/django/cantusdb_project/main_app/views/sequence.py
@@ -76,27 +76,10 @@
def test_func(self):
user = self.request.user
- sequence_id = self.kwargs.get(self.pk_url_kwarg)
- try:
- sequence = Sequence.objects.get(id=sequence_id)
- except:
- raise Http404("This sequence does not exist")
- # find the source of this sequence
- source = sequence.source
- # checks if the user is an editor or a proofreader,
- # and if the user is given privilege to edit this source and thus, it's sequences
- is_editor_proofreader = user.groups.filter(Q(name="editor")|Q(name="proofreader")).exists()
- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)
# checks if the user is a project manager (they should have the privilege to edit any sequence)
is_project_manager = user.groups.filter(name="project manager").exists()
- # checks if the user is a contributor,
- # and if the user is the creator of this source
- # (they should only have the privilege to edit sequences in a source they have created)
- is_contributor = user.groups.filter(name="contributor").exists()
-
- if ((is_editor_proofreader and can_edit_sequences_in_source)
- or (is_project_manager)
- or (is_contributor and source.created_by == user)):
+
+ if is_project_manager:
return True
else:
return False
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py\n--- a/django/cantusdb_project/main_app/views/sequence.py\n+++ b/django/cantusdb_project/main_app/views/sequence.py\n@@ -76,27 +76,10 @@\n \n def test_func(self):\n user = self.request.user\n- sequence_id = self.kwargs.get(self.pk_url_kwarg)\n- try:\n- sequence = Sequence.objects.get(id=sequence_id)\n- except:\n- raise Http404(\"This sequence does not exist\")\n- # find the source of this sequence\n- source = sequence.source\n- # checks if the user is an editor or a proofreader,\n- # and if the user is given privilege to edit this source and thus, it's sequences\n- is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n- can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n- # checks if the user is a contributor,\n- # and if the user is the creator of this source \n- # (they should only have the privilege to edit sequences in a source they have created)\n- is_contributor = user.groups.filter(name=\"contributor\").exists()\n-\n- if ((is_editor_proofreader and can_edit_sequences_in_source) \n- or (is_project_manager) \n- or (is_contributor and source.created_by == user)):\n+\n+ if is_project_manager:\n return True\n else:\n return False\n", "issue": "make a test case to test the permissions\nwe have implemented a lot of different restrictions to views. create a unit test to automate the testing process.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\n\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n\n # if the sequence's source isn't public, only logged-in users should be able to view the sequence's detail page\n sequence = self.get_object()\n source = sequence.source\n if (source.public is False) and (not self.request.user.is_authenticated):\n raise PermissionDenied()\n \n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = Sequence.objects.filter(\n cantus_id=self.get_object().cantus_id\n ).order_by(\"siglum\")\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n model = Sequence\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset()\n q_obj_filter = Q(source__visible=True)\n q_obj_filter &= Q(source__public=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"sequence\")\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n sequence_id = self.kwargs.get(self.pk_url_kwarg)\n try:\n sequence = Sequence.objects.get(id=sequence_id)\n except:\n raise Http404(\"This sequence does not exist\")\n # find the source of this sequence\n source = sequence.source\n # checks if the user is an editor or a proofreader,\n # and if the user is given privilege to edit this source and thus, it's sequences\n is_editor_proofreader = user.groups.filter(Q(name=\"editor\")|Q(name=\"proofreader\")).exists()\n can_edit_sequences_in_source = user.sources_user_can_edit.filter(id=source.id)\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n # checks if the user is a contributor,\n # and if the user is the creator of this source \n # (they should only have the privilege to edit sequences in a source they have created)\n is_contributor = user.groups.filter(name=\"contributor\").exists()\n\n if ((is_editor_proofreader and can_edit_sequences_in_source) \n or (is_project_manager) \n or (is_contributor and source.created_by == user)):\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}]} | 1,644 | 393 |
gh_patches_debug_12143 | rasdani/github-patches | git_diff | google__turbinia-294 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Programatically setting config file
I am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.
I tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.
What do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?
</issue>
<code>
[start of turbinia/config/__init__.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Basic Turbinia config."""
16
17 from __future__ import unicode_literals
18
19 import imp
20 import itertools
21 import logging
22 import os
23 import sys
24
25 log = logging.getLogger('turbinia')
26
27 # Look for config files with these names
28 CONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']
29 # Look in homedir first, then /etc/turbinia, and finally in the source
30 # config dir for config files
31 CONFIGPATH = [
32 os.path.expanduser('~'),
33 '/etc/turbinia',
34 os.path.dirname(os.path.abspath(__file__))]
35 # Config vars that we expect to exist in the configuration
36 CONFIGVARS = [
37 # Turbinia Config
38 'TASK_MANAGER',
39 'LOG_FILE',
40 'LOCK_FILE',
41 'OUTPUT_DIR',
42 'SLEEP_TIME',
43 'SINGLE_RUN',
44 'MOUNT_DIR_PREFIX',
45 'SHARED_FILESYSTEM',
46 # TODO(aarontp): Move this to the recipe config when it's available.
47 'DEBUG_TASKS',
48 # GCE CONFIG
49 'PROJECT',
50 'ZONE',
51 'TURBINIA_REGION',
52 'BUCKET_NAME',
53 'PSQ_TOPIC',
54 'PUBSUB_TOPIC',
55 'GCS_OUTPUT_PATH',
56 'STATE_MANAGER',
57 'INSTANCE_ID',
58 # REDIS CONFIG
59 'REDIS_HOST',
60 'REDIS_PORT',
61 'REDIS_DB',
62 # Celery config
63 'CELERY_BROKER',
64 'CELERY_BACKEND',
65 'KOMBU_BROKER',
66 'KOMBU_CHANNEL',
67 'KOMBU_DURABLE',]
68 # Environment variable to look for path data in
69 ENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'
70
71 CONFIG = None
72
73
74 class TurbiniaConfigException(Exception):
75 """Exception for Turbinia configuration."""
76 pass
77
78
79 def LoadConfig():
80 """Finds Turbinia config file and loads it."""
81 # TODO(aarontp): Find way to not require global var here. Maybe a singleton
82 # pattern on the config class.
83 # pylint: disable=global-statement
84 global CONFIG
85 if CONFIG:
86 return CONFIG
87
88 if ENVCONFIGVAR in os.environ:
89 CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))
90
91 config_file = None
92 # Load first file found
93 for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):
94 if os.path.exists(os.path.join(_dir, _file)):
95 config_file = os.path.join(_dir, _file)
96 break
97
98 if config_file is None:
99 raise TurbiniaConfigException('No config files found')
100
101 log.info('Loading config from {0:s}'.format(config_file))
102 _config = imp.load_source('config', config_file)
103 _config.configSource = config_file
104 ValidateAndSetConfig(_config)
105 CONFIG = _config
106 return _config
107
108
109 def ValidateAndSetConfig(_config):
110 """Makes sure that the config has the vars loaded and set in the module."""
111 # TODO(aarontp): Allow for non-mandatory config options
112 for var in CONFIGVARS:
113 if not hasattr(_config, var):
114 raise TurbiniaConfigException(
115 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))
116 if getattr(_config, var) is None:
117 raise TurbiniaConfigException(
118 'Config attribute {0:s}:{1:s} is not set'.format(
119 _config.configSource, var))
120
121 # Set the attribute in the current module
122 setattr(sys.modules[__name__], var, getattr(_config, var))
123
[end of turbinia/config/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py
--- a/turbinia/config/__init__.py
+++ b/turbinia/config/__init__.py
@@ -85,12 +85,16 @@
if CONFIG:
return CONFIG
+ # If the environment variable is set, take precedence over the pre-defined
+ # CONFIGPATHs.
+ configpath = CONFIGPATH
if ENVCONFIGVAR in os.environ:
- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))
+ configpath = os.environ[ENVCONFIGVAR].split(':')
+
config_file = None
# Load first file found
- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):
+ for _dir, _file in itertools.product(configpath, CONFIGFILES):
if os.path.exists(os.path.join(_dir, _file)):
config_file = os.path.join(_dir, _file)
break
| {"golden_diff": "diff --git a/turbinia/config/__init__.py b/turbinia/config/__init__.py\n--- a/turbinia/config/__init__.py\n+++ b/turbinia/config/__init__.py\n@@ -85,12 +85,16 @@\n if CONFIG:\n return CONFIG\n \n+ # If the environment variable is set, take precedence over the pre-defined\n+ # CONFIGPATHs.\n+ configpath = CONFIGPATH\n if ENVCONFIGVAR in os.environ:\n- CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n+ configpath = os.environ[ENVCONFIGVAR].split(':')\n+\n \n config_file = None\n # Load first file found\n- for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n+ for _dir, _file in itertools.product(configpath, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n", "issue": "Programatically setting config file\nI am trying to write tests for the dftimewolf turbinia module. Loading the configuration on a system that doesn't have one will fail, so I am trying to manually feed in a test config data to see if the module behaves as expected.\r\n\r\nI tried setting the `TURBINIA_CONFIG_PATH` environment variable, but this just *adds* the path the list of possible config paths. This would work in a pristine test environment, but it will break in my dev setup where I already have a production turbinia config file set up.\r\n\r\nWhat do you think of giving `TURBINIA_CONFIG_PATH` environment variable precedence over the other potential config locations?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic Turbinia config.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport imp\nimport itertools\nimport logging\nimport os\nimport sys\n\nlog = logging.getLogger('turbinia')\n\n# Look for config files with these names\nCONFIGFILES = ['.turbiniarc', 'turbinia.conf', 'turbinia_config.py']\n# Look in homedir first, then /etc/turbinia, and finally in the source\n# config dir for config files\nCONFIGPATH = [\n os.path.expanduser('~'),\n '/etc/turbinia',\n os.path.dirname(os.path.abspath(__file__))]\n# Config vars that we expect to exist in the configuration\nCONFIGVARS = [\n # Turbinia Config\n 'TASK_MANAGER',\n 'LOG_FILE',\n 'LOCK_FILE',\n 'OUTPUT_DIR',\n 'SLEEP_TIME',\n 'SINGLE_RUN',\n 'MOUNT_DIR_PREFIX',\n 'SHARED_FILESYSTEM',\n # TODO(aarontp): Move this to the recipe config when it's available.\n 'DEBUG_TASKS',\n # GCE CONFIG\n 'PROJECT',\n 'ZONE',\n 'TURBINIA_REGION',\n 'BUCKET_NAME',\n 'PSQ_TOPIC',\n 'PUBSUB_TOPIC',\n 'GCS_OUTPUT_PATH',\n 'STATE_MANAGER',\n 'INSTANCE_ID',\n # REDIS CONFIG\n 'REDIS_HOST',\n 'REDIS_PORT',\n 'REDIS_DB',\n # Celery config\n 'CELERY_BROKER',\n 'CELERY_BACKEND',\n 'KOMBU_BROKER',\n 'KOMBU_CHANNEL',\n 'KOMBU_DURABLE',]\n# Environment variable to look for path data in\nENVCONFIGVAR = 'TURBINIA_CONFIG_PATH'\n\nCONFIG = None\n\n\nclass TurbiniaConfigException(Exception):\n \"\"\"Exception for Turbinia configuration.\"\"\"\n pass\n\n\ndef LoadConfig():\n \"\"\"Finds Turbinia config file and loads it.\"\"\"\n # TODO(aarontp): Find way to not require global var here. Maybe a singleton\n # pattern on the config class.\n # pylint: disable=global-statement\n global CONFIG\n if CONFIG:\n return CONFIG\n\n if ENVCONFIGVAR in os.environ:\n CONFIGPATH.extend(os.environ[ENVCONFIGVAR].split(':'))\n\n config_file = None\n # Load first file found\n for _dir, _file in itertools.product(CONFIGPATH, CONFIGFILES):\n if os.path.exists(os.path.join(_dir, _file)):\n config_file = os.path.join(_dir, _file)\n break\n\n if config_file is None:\n raise TurbiniaConfigException('No config files found')\n\n log.info('Loading config from {0:s}'.format(config_file))\n _config = imp.load_source('config', config_file)\n _config.configSource = config_file\n ValidateAndSetConfig(_config)\n CONFIG = _config\n return _config\n\n\ndef ValidateAndSetConfig(_config):\n \"\"\"Makes sure that the config has the vars loaded and set in the module.\"\"\"\n # TODO(aarontp): Allow for non-mandatory config options\n for var in CONFIGVARS:\n if not hasattr(_config, var):\n raise TurbiniaConfigException(\n 'No config attribute {0:s}:{1:s}'.format(_config.configSource, var))\n if getattr(_config, var) is None:\n raise TurbiniaConfigException(\n 'Config attribute {0:s}:{1:s} is not set'.format(\n _config.configSource, var))\n\n # Set the attribute in the current module\n setattr(sys.modules[__name__], var, getattr(_config, var))\n", "path": "turbinia/config/__init__.py"}]} | 1,878 | 221 |
gh_patches_debug_5678 | rasdani/github-patches | git_diff | stephenmcd__mezzanine-1954 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Small typo in mezzanine/core/auth_backends.py
Should read verification rather than verficiation.
</issue>
<code>
[start of mezzanine/core/auth_backends.py]
1 from __future__ import unicode_literals
2
3 from django.contrib.auth import get_user_model
4 from django.contrib.auth.backends import ModelBackend
5 from django.contrib.auth.tokens import default_token_generator
6 from django.db.models import Q
7 from django.utils.http import base36_to_int
8
9
10 User = get_user_model()
11
12
13 class MezzanineBackend(ModelBackend):
14 """
15 Extends Django's ``ModelBackend`` to allow login via username,
16 email, or verification token.
17
18 Args are either ``username`` and ``password``, or ``uidb36``
19 and ``token``. In either case, ``is_active`` can also be given.
20
21 For login, is_active is not given, so that the login form can
22 raise a specific error for inactive users.
23 For password reset, True is given for is_active.
24 For signup verficiation, False is given for is_active.
25 """
26
27 def authenticate(self, *args, **kwargs):
28 if kwargs:
29 username = kwargs.pop("username", None)
30 if username:
31 username_or_email = Q(username=username) | Q(email=username)
32 password = kwargs.pop("password", None)
33 try:
34 user = User.objects.get(username_or_email, **kwargs)
35 except User.DoesNotExist:
36 pass
37 else:
38 if user.check_password(password):
39 return user
40 else:
41 if 'uidb36' not in kwargs:
42 return
43 kwargs["id"] = base36_to_int(kwargs.pop("uidb36"))
44 token = kwargs.pop("token")
45 try:
46 user = User.objects.get(**kwargs)
47 except User.DoesNotExist:
48 pass
49 else:
50 if default_token_generator.check_token(user, token):
51 return user
52
[end of mezzanine/core/auth_backends.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mezzanine/core/auth_backends.py b/mezzanine/core/auth_backends.py
--- a/mezzanine/core/auth_backends.py
+++ b/mezzanine/core/auth_backends.py
@@ -21,7 +21,7 @@
For login, is_active is not given, so that the login form can
raise a specific error for inactive users.
For password reset, True is given for is_active.
- For signup verficiation, False is given for is_active.
+ For signup verification, False is given for is_active.
"""
def authenticate(self, *args, **kwargs):
| {"golden_diff": "diff --git a/mezzanine/core/auth_backends.py b/mezzanine/core/auth_backends.py\n--- a/mezzanine/core/auth_backends.py\n+++ b/mezzanine/core/auth_backends.py\n@@ -21,7 +21,7 @@\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n- For signup verficiation, False is given for is_active.\n+ For signup verification, False is given for is_active.\n \"\"\"\n \n def authenticate(self, *args, **kwargs):\n", "issue": "Small typo in mezzanine/core/auth_backends.py\nShould read verification rather than verficiation.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.db.models import Q\nfrom django.utils.http import base36_to_int\n\n\nUser = get_user_model()\n\n\nclass MezzanineBackend(ModelBackend):\n \"\"\"\n Extends Django's ``ModelBackend`` to allow login via username,\n email, or verification token.\n\n Args are either ``username`` and ``password``, or ``uidb36``\n and ``token``. In either case, ``is_active`` can also be given.\n\n For login, is_active is not given, so that the login form can\n raise a specific error for inactive users.\n For password reset, True is given for is_active.\n For signup verficiation, False is given for is_active.\n \"\"\"\n\n def authenticate(self, *args, **kwargs):\n if kwargs:\n username = kwargs.pop(\"username\", None)\n if username:\n username_or_email = Q(username=username) | Q(email=username)\n password = kwargs.pop(\"password\", None)\n try:\n user = User.objects.get(username_or_email, **kwargs)\n except User.DoesNotExist:\n pass\n else:\n if user.check_password(password):\n return user\n else:\n if 'uidb36' not in kwargs:\n return\n kwargs[\"id\"] = base36_to_int(kwargs.pop(\"uidb36\"))\n token = kwargs.pop(\"token\")\n try:\n user = User.objects.get(**kwargs)\n except User.DoesNotExist:\n pass\n else:\n if default_token_generator.check_token(user, token):\n return user\n", "path": "mezzanine/core/auth_backends.py"}]} | 1,025 | 138 |
gh_patches_debug_794 | rasdani/github-patches | git_diff | scikit-image__scikit-image-3650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tifffile: try to use the one in the user's install first
Should we try importing tifffile before using the one we versionned it?
</issue>
<code>
[start of skimage/io/_plugins/tifffile_plugin.py]
1 from ...external.tifffile import TiffFile, imsave, parse_kwargs
2
3
4 def imread(fname, dtype=None, **kwargs):
5 """Load a tiff image from file.
6
7 Parameters
8 ----------
9 fname : str or file
10 File name or file-like-object.
11 dtype : numpy dtype object or string specifier
12 Specifies data type of array elements (Not currently used).
13 kwargs : keyword pairs, optional
14 Additional keyword arguments to pass through (see ``tifffile``'s
15 ``imread`` function).
16
17 Notes
18 -----
19 Provided by Christophe Golhke's tifffile.py [1]_, and supports many
20 advanced image types including multi-page and floating point.
21
22 References
23 ----------
24 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py
25
26 """
27
28 if 'img_num' in kwargs:
29 kwargs['key'] = kwargs.pop('img_num')
30
31 # parse_kwargs will extract keyword arguments intended for the TiffFile
32 # class and remove them from the kwargs dictionary in-place
33 tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']
34 kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)
35
36 # read and return tiff as numpy array
37 with TiffFile(fname, **kwargs_tiff) as tif:
38 return tif.asarray(**kwargs)
39
[end of skimage/io/_plugins/tifffile_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py
--- a/skimage/io/_plugins/tifffile_plugin.py
+++ b/skimage/io/_plugins/tifffile_plugin.py
@@ -1,4 +1,7 @@
-from ...external.tifffile import TiffFile, imsave, parse_kwargs
+try:
+ from tifffile import TiffFile, imsave, parse_kwargs
+except ImportError:
+ from ...external.tifffile import TiffFile, imsave, parse_kwargs
def imread(fname, dtype=None, **kwargs):
| {"golden_diff": "diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,4 +1,7 @@\n-from ...external.tifffile import TiffFile, imsave, parse_kwargs\n+try:\n+ from tifffile import TiffFile, imsave, parse_kwargs\n+except ImportError:\n+ from ...external.tifffile import TiffFile, imsave, parse_kwargs\n \n \n def imread(fname, dtype=None, **kwargs):\n", "issue": "tifffile: try to use the one in the user's install first\nShould we try importing tifffile before using the one we versionned it?\n", "before_files": [{"content": "from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]} | 963 | 143 |
gh_patches_debug_1461 | rasdani/github-patches | git_diff | kartoza__prj.app-346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Display thumbnails in a modal window when we click on fullscreen
We can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif
The user needs to use the "previous" button in the web browser to come back to the changelog.
It would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/
</issue>
<code>
[start of django_project/core/settings/project.py]
1 # coding=utf-8
2
3 """Project level settings.
4
5 Adjust these values as needed but don't commit passwords etc. to any public
6 repository!
7 """
8
9 import os # noqa
10 from django.utils.translation import ugettext_lazy as _
11 from .utils import absolute_path
12 from .contrib import * # noqa
13
14 # Project apps
15 INSTALLED_APPS += (
16 'base',
17 'changes',
18 'github_issue',
19 'vota',
20 )
21
22 # Due to profile page does not available,
23 # this will redirect to home page after login
24 LOGIN_REDIRECT_URL = '/'
25
26 # How many versions to list in each project box
27 PROJECT_VERSION_LIST_SIZE = 10
28
29 # Set debug to false for production
30 DEBUG = TEMPLATE_DEBUG = False
31
32 SOUTH_TESTS_MIGRATE = False
33
34
35 # Set languages which want to be translated
36 LANGUAGES = (
37 ('en', _('English')),
38 ('af', _('Afrikaans')),
39 ('id', _('Indonesian')),
40 ('ko', _('Korean')),
41 )
42
43 # Set storage path for the translation files
44 LOCALE_PATHS = (absolute_path('locale'),)
45
46
47 MIDDLEWARE_CLASSES = (
48 # For nav bar generation
49 'core.custom_middleware.NavContextMiddleware',
50 ) + MIDDLEWARE_CLASSES
51
52 # Project specific javascript files to be pipelined
53 # For third party libs like jquery should go in contrib.py
54 PIPELINE_JS['project'] = {
55 'source_filenames': (
56 'js/csrf-ajax.js',
57 'js/changelog.js',
58 'js/github-issue.js'
59 ),
60 'output_filename': 'js/project.js',
61 }
62
63 # Project specific css files to be pipelined
64 # For third party libs like bootstrap should go in contrib.py
65 PIPELINE_CSS['project'] = {
66 'source_filenames': (
67 'css/changelog.css',
68 'css/form.css',
69 'css/fonts.css'
70 ),
71 'output_filename': 'css/project.css',
72 'extra_context': {
73 'media': 'screen, projection',
74 },
75 }
76
[end of django_project/core/settings/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py
--- a/django_project/core/settings/project.py
+++ b/django_project/core/settings/project.py
@@ -55,7 +55,8 @@
'source_filenames': (
'js/csrf-ajax.js',
'js/changelog.js',
- 'js/github-issue.js'
+ 'js/github-issue.js',
+ 'js/entry.js',
),
'output_filename': 'js/project.js',
}
| {"golden_diff": "diff --git a/django_project/core/settings/project.py b/django_project/core/settings/project.py\n--- a/django_project/core/settings/project.py\n+++ b/django_project/core/settings/project.py\n@@ -55,7 +55,8 @@\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n- 'js/github-issue.js'\n+ 'js/github-issue.js',\n+ 'js/entry.js',\n ),\n 'output_filename': 'js/project.js',\n }\n", "issue": "Display thumbnails in a modal window when we click on fullscreen\nWe can see a lot of GIF in the QGIS changelog. These thumbnails are too small to see so I have to click on the button to see it fullscreen. For now, it redirects to the GIF url like http://changelog.qgis.org/media/images/entries/53f72a9cf1bf32d73eb5174c37e54c60002b9707.gif\nThe user needs to use the \"previous\" button in the web browser to come back to the changelog.\n\nIt would be better to implement a javascript modal window to show the GIF and to stay on the URL http://changelog.qgis.org/en/qgis/version/2.16.0/\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Project level settings.\n\nAdjust these values as needed but don't commit passwords etc. to any public\nrepository!\n\"\"\"\n\nimport os # noqa\nfrom django.utils.translation import ugettext_lazy as _\nfrom .utils import absolute_path\nfrom .contrib import * # noqa\n\n# Project apps\nINSTALLED_APPS += (\n 'base',\n 'changes',\n 'github_issue',\n 'vota',\n)\n\n# Due to profile page does not available,\n# this will redirect to home page after login\nLOGIN_REDIRECT_URL = '/'\n\n# How many versions to list in each project box\nPROJECT_VERSION_LIST_SIZE = 10\n\n# Set debug to false for production\nDEBUG = TEMPLATE_DEBUG = False\n\nSOUTH_TESTS_MIGRATE = False\n\n\n# Set languages which want to be translated\nLANGUAGES = (\n ('en', _('English')),\n ('af', _('Afrikaans')),\n ('id', _('Indonesian')),\n ('ko', _('Korean')),\n)\n\n# Set storage path for the translation files\nLOCALE_PATHS = (absolute_path('locale'),)\n\n\nMIDDLEWARE_CLASSES = (\n # For nav bar generation\n 'core.custom_middleware.NavContextMiddleware',\n) + MIDDLEWARE_CLASSES\n\n# Project specific javascript files to be pipelined\n# For third party libs like jquery should go in contrib.py\nPIPELINE_JS['project'] = {\n 'source_filenames': (\n 'js/csrf-ajax.js',\n 'js/changelog.js',\n 'js/github-issue.js'\n ),\n 'output_filename': 'js/project.js',\n}\n\n# Project specific css files to be pipelined\n# For third party libs like bootstrap should go in contrib.py\nPIPELINE_CSS['project'] = {\n 'source_filenames': (\n 'css/changelog.css',\n 'css/form.css',\n 'css/fonts.css'\n ),\n 'output_filename': 'css/project.css',\n 'extra_context': {\n 'media': 'screen, projection',\n },\n}\n", "path": "django_project/core/settings/project.py"}]} | 1,280 | 111 |
gh_patches_debug_13638 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-2503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Current `tox` configuration ends up testing old code
For example:
``` bash
$ rm -r .tox/
$ tox -e system-tests --notest
GLOB sdist-make: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/setup.py
system-tests create: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/system-tests
system-tests inst: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/dist/google-cloud-0.20.0.zip
system-tests installed: -f file:///home/tseaver/.pip/wheels,enum34==1.1.6,future==0.15.2,futures==3.0.5,gapic-google-logging-v2==0.10.1,gapic-google-pubsub-v1==0.10.1,google-cloud==0.20.0,google-cloud-bigquery==0.20.0,google-cloud-bigtable==0.20.0,google-cloud-core==0.20.0,google-cloud-datastore==0.20.0,google-cloud-dns==0.20.0,google-cloud-error-reporting==0.20.0,google-cloud-happybase==0.20.0,google-cloud-language==0.20.0,google-cloud-logging==0.20.0,google-cloud-monitoring==0.20.0,google-cloud-pubsub==0.20.0,google-cloud-resource-manager==0.20.0,google-cloud-storage==0.20.0,google-cloud-translate==0.20.0,google-cloud-vision==0.20.0,google-gax==0.14.1,googleapis-common-protos==1.3.5,grpc-google-iam-v1==0.10.1,grpc-google-logging-v2==0.10.1,grpc-google-pubsub-v1==0.10.1,grpcio==1.0.0,httplib2==0.9.2,oauth2client==3.0.0,ply==3.8,protobuf==3.1.0.post1,pyasn1==0.1.9,pyasn1-modules==0.0.8,rsa==3.4.2,six==1.10.0
___________________________________ summary ____________________________________
system-tests: skipped tests
congratulations :)
$ diff -ru .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/ storage/google/cloud/storage/ --exclude="*.pyc"
diff -ru '--exclude=*.pyc' .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py storage/google/cloud/storage/blob.py
--- .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py 2016-10-05 18:15:48.724796000 -0400
+++ storage/google/cloud/storage/blob.py 2016-10-05 18:02:55.872830411 -0400
@@ -655,6 +655,32 @@
self.acl.all().grant_read()
self.acl.save(client=client)
+ def compose(self, sources, client=None):
+ """Concatenate source blobs into this one.
+
+ :type sources: list of :class:`Blob`
+ :param sources: blobs whose contents will be composed into this blob.
+
+ :type client: :class:`~google.cloud.storage.client.Client` or
+ ``NoneType``
+ :param client: Optional. The client to use. If not passed, falls back
+ to the ``client`` stored on the blob's bucket.
+
+ :raises: :exc:`ValueError` if this blob does not have its
+ :attr:`content_type` set.
+ """
+ if self.content_type is None:
+ raise ValueError("Destination 'content_type' not set.")
+ client = self._require_client(client)
+ request = {
+ 'sourceObjects': [{'name': source.name} for source in sources],
+ 'destination': self._properties.copy(),
+ }
+ api_response = client.connection.api_request(
+ method='POST', path=self.path + '/compose', data=request,
+ _target_object=self)
+ self._set_properties(api_response)
+
cache_control = _scalar_property('cacheControl')
"""HTTP 'Cache-Control' header for this object.
```
Somehow, the tarball / wheel is being cached.
</issue>
<code>
[start of scripts/pycodestyle_on_repo.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Custom script to run pycodestyle on google-cloud codebase.
16
17 This runs pycodestyle as a script via subprocess but only runs it on the
18 .py files that are checked in to the repository.
19 """
20
21
22 import os
23 import subprocess
24 import sys
25
26 from script_utils import get_affected_files
27
28
29 def main():
30 """Run pycodestyle on all Python files in the repository."""
31 git_root = subprocess.check_output(
32 ['git', 'rev-parse', '--show-toplevel']).strip()
33 os.chdir(git_root)
34 candidates, _ = get_affected_files()
35 python_files = [
36 candidate for candidate in candidates if candidate.endswith('.py')]
37
38 pycodestyle_command = ['pycodestyle'] + python_files
39 status_code = subprocess.call(pycodestyle_command)
40 sys.exit(status_code)
41
42
43 if __name__ == '__main__':
44 main()
45
[end of scripts/pycodestyle_on_repo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py
--- a/scripts/pycodestyle_on_repo.py
+++ b/scripts/pycodestyle_on_repo.py
@@ -19,6 +19,8 @@
"""
+from __future__ import print_function
+
import os
import subprocess
import sys
@@ -35,9 +37,12 @@
python_files = [
candidate for candidate in candidates if candidate.endswith('.py')]
- pycodestyle_command = ['pycodestyle'] + python_files
- status_code = subprocess.call(pycodestyle_command)
- sys.exit(status_code)
+ if not python_files:
+ print('No Python files to lint, exiting.')
+ else:
+ pycodestyle_command = ['pycodestyle'] + python_files
+ status_code = subprocess.call(pycodestyle_command)
+ sys.exit(status_code)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/scripts/pycodestyle_on_repo.py b/scripts/pycodestyle_on_repo.py\n--- a/scripts/pycodestyle_on_repo.py\n+++ b/scripts/pycodestyle_on_repo.py\n@@ -19,6 +19,8 @@\n \"\"\"\n \n \n+from __future__ import print_function\n+\n import os\n import subprocess\n import sys\n@@ -35,9 +37,12 @@\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n \n- pycodestyle_command = ['pycodestyle'] + python_files\n- status_code = subprocess.call(pycodestyle_command)\n- sys.exit(status_code)\n+ if not python_files:\n+ print('No Python files to lint, exiting.')\n+ else:\n+ pycodestyle_command = ['pycodestyle'] + python_files\n+ status_code = subprocess.call(pycodestyle_command)\n+ sys.exit(status_code)\n \n \n if __name__ == '__main__':\n", "issue": "Current `tox` configuration ends up testing old code\nFor example:\n\n``` bash\n$ rm -r .tox/\n$ tox -e system-tests --notest\nGLOB sdist-make: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/setup.py\nsystem-tests create: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/system-tests\nsystem-tests inst: /home/tseaver/projects/agendaless/Google/src/google-cloud-python/.tox/dist/google-cloud-0.20.0.zip\nsystem-tests installed: -f file:///home/tseaver/.pip/wheels,enum34==1.1.6,future==0.15.2,futures==3.0.5,gapic-google-logging-v2==0.10.1,gapic-google-pubsub-v1==0.10.1,google-cloud==0.20.0,google-cloud-bigquery==0.20.0,google-cloud-bigtable==0.20.0,google-cloud-core==0.20.0,google-cloud-datastore==0.20.0,google-cloud-dns==0.20.0,google-cloud-error-reporting==0.20.0,google-cloud-happybase==0.20.0,google-cloud-language==0.20.0,google-cloud-logging==0.20.0,google-cloud-monitoring==0.20.0,google-cloud-pubsub==0.20.0,google-cloud-resource-manager==0.20.0,google-cloud-storage==0.20.0,google-cloud-translate==0.20.0,google-cloud-vision==0.20.0,google-gax==0.14.1,googleapis-common-protos==1.3.5,grpc-google-iam-v1==0.10.1,grpc-google-logging-v2==0.10.1,grpc-google-pubsub-v1==0.10.1,grpcio==1.0.0,httplib2==0.9.2,oauth2client==3.0.0,ply==3.8,protobuf==3.1.0.post1,pyasn1==0.1.9,pyasn1-modules==0.0.8,rsa==3.4.2,six==1.10.0\n___________________________________ summary ____________________________________\n system-tests: skipped tests\n congratulations :)\n$ diff -ru .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/ storage/google/cloud/storage/ --exclude=\"*.pyc\"\ndiff -ru '--exclude=*.pyc' .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py storage/google/cloud/storage/blob.py\n--- .tox/system-tests/lib/python2.7/site-packages/google/cloud/storage/blob.py 2016-10-05 18:15:48.724796000 -0400\n+++ storage/google/cloud/storage/blob.py 2016-10-05 18:02:55.872830411 -0400\n@@ -655,6 +655,32 @@\n self.acl.all().grant_read()\n self.acl.save(client=client)\n\n+ def compose(self, sources, client=None):\n+ \"\"\"Concatenate source blobs into this one.\n+\n+ :type sources: list of :class:`Blob`\n+ :param sources: blobs whose contents will be composed into this blob.\n+\n+ :type client: :class:`~google.cloud.storage.client.Client` or\n+ ``NoneType``\n+ :param client: Optional. The client to use. If not passed, falls back\n+ to the ``client`` stored on the blob's bucket.\n+\n+ :raises: :exc:`ValueError` if this blob does not have its\n+ :attr:`content_type` set.\n+ \"\"\"\n+ if self.content_type is None:\n+ raise ValueError(\"Destination 'content_type' not set.\")\n+ client = self._require_client(client)\n+ request = {\n+ 'sourceObjects': [{'name': source.name} for source in sources],\n+ 'destination': self._properties.copy(),\n+ }\n+ api_response = client.connection.api_request(\n+ method='POST', path=self.path + '/compose', data=request,\n+ _target_object=self)\n+ self._set_properties(api_response)\n+\n cache_control = _scalar_property('cacheControl')\n \"\"\"HTTP 'Cache-Control' header for this object.\n```\n\nSomehow, the tarball / wheel is being cached.\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom script to run pycodestyle on google-cloud codebase.\n\nThis runs pycodestyle as a script via subprocess but only runs it on the\n.py files that are checked in to the repository.\n\"\"\"\n\n\nimport os\nimport subprocess\nimport sys\n\nfrom script_utils import get_affected_files\n\n\ndef main():\n \"\"\"Run pycodestyle on all Python files in the repository.\"\"\"\n git_root = subprocess.check_output(\n ['git', 'rev-parse', '--show-toplevel']).strip()\n os.chdir(git_root)\n candidates, _ = get_affected_files()\n python_files = [\n candidate for candidate in candidates if candidate.endswith('.py')]\n\n pycodestyle_command = ['pycodestyle'] + python_files\n status_code = subprocess.call(pycodestyle_command)\n sys.exit(status_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "scripts/pycodestyle_on_repo.py"}]} | 1,981 | 211 |
gh_patches_debug_8101 | rasdani/github-patches | git_diff | scrapy__scrapy-1983 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
empty WARNING message in scrapy.core.downloader.tls (1.1.0rc4/master)
Sometimes I'm getting empty warnings now, on 1.1.0rc4 and master branch.
(at least on rc3 as well)
```
2016-05-07 00:33:46 [scrapy.core.downloader.tls] WARNING:
2016-05-07 00:33:47 [scrapy.core.downloader.tls] WARNING:
2016-05-07 00:33:48 [scrapy.core.downloader.tls] WARNING:
```
It happens in a broad linkcheck crawl; so I couldn't pinpoint what URLs might be responsible for that, at this time. The only other observation so far is, that it doesn't happen on a cache-replayed run (which might be obvious, as there is no TLS there).
</issue>
<code>
[start of scrapy/core/downloader/tls.py]
1 import logging
2 from OpenSSL import SSL
3
4
5 logger = logging.getLogger(__name__)
6
7 METHOD_SSLv3 = 'SSLv3'
8 METHOD_TLS = 'TLS'
9 METHOD_TLSv10 = 'TLSv1.0'
10 METHOD_TLSv11 = 'TLSv1.1'
11 METHOD_TLSv12 = 'TLSv1.2'
12
13 openssl_methods = {
14 METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
15 METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)
16 METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
17 METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
18 METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
19 }
20
21 # ClientTLSOptions requires a recent-enough version of Twisted
22 try:
23
24 # taken from twisted/twisted/internet/_sslverify.py
25 try:
26 from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START
27 except ImportError:
28 SSL_CB_HANDSHAKE_START = 0x10
29 SSL_CB_HANDSHAKE_DONE = 0x20
30
31 from twisted.internet._sslverify import (ClientTLSOptions,
32 _maybeSetHostNameIndication,
33 verifyHostname,
34 VerificationError)
35
36 class ScrapyClientTLSOptions(ClientTLSOptions):
37 # same as Twisted's ClientTLSOptions,
38 # except that VerificationError is caught
39 # and doesn't close the connection
40 def _identityVerifyingInfoCallback(self, connection, where, ret):
41 if where & SSL_CB_HANDSHAKE_START:
42 _maybeSetHostNameIndication(connection, self._hostnameBytes)
43 elif where & SSL_CB_HANDSHAKE_DONE:
44 try:
45 verifyHostname(connection, self._hostnameASCII)
46 except VerificationError as e:
47 logger.warning(e)
48
49 except ImportError:
50 # ImportError should not matter for older Twisted versions
51 # as the above is not used in the fallback ScrapyClientContextFactory
52 pass
53
[end of scrapy/core/downloader/tls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py
--- a/scrapy/core/downloader/tls.py
+++ b/scrapy/core/downloader/tls.py
@@ -44,7 +44,9 @@
try:
verifyHostname(connection, self._hostnameASCII)
except VerificationError as e:
- logger.warning(e)
+ logger.warning(
+ 'Remote certificate is not valid for hostname "{}"; {}'.format(
+ self._hostnameASCII, e))
except ImportError:
# ImportError should not matter for older Twisted versions
| {"golden_diff": "diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py\n--- a/scrapy/core/downloader/tls.py\n+++ b/scrapy/core/downloader/tls.py\n@@ -44,7 +44,9 @@\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n- logger.warning(e)\n+ logger.warning(\n+ 'Remote certificate is not valid for hostname \"{}\"; {}'.format(\n+ self._hostnameASCII, e))\n \n except ImportError:\n # ImportError should not matter for older Twisted versions\n", "issue": "empty WARNING message in scrapy.core.downloader.tls (1.1.0rc4/master)\nSometimes I'm getting empty warnings now, on 1.1.0rc4 and master branch.\n(at least on rc3 as well)\n\n```\n2016-05-07 00:33:46 [scrapy.core.downloader.tls] WARNING: \n2016-05-07 00:33:47 [scrapy.core.downloader.tls] WARNING: \n2016-05-07 00:33:48 [scrapy.core.downloader.tls] WARNING: \n```\n\nIt happens in a broad linkcheck crawl; so I couldn't pinpoint what URLs might be responsible for that, at this time. The only other observation so far is, that it doesn't happen on a cache-replayed run (which might be obvious, as there is no TLS there).\n\n", "before_files": [{"content": "import logging\nfrom OpenSSL import SSL\n\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n# ClientTLSOptions requires a recent-enough version of Twisted\ntry:\n\n # taken from twisted/twisted/internet/_sslverify.py\n try:\n from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n except ImportError:\n SSL_CB_HANDSHAKE_START = 0x10\n SSL_CB_HANDSHAKE_DONE = 0x20\n\n from twisted.internet._sslverify import (ClientTLSOptions,\n _maybeSetHostNameIndication,\n verifyHostname,\n VerificationError)\n\n class ScrapyClientTLSOptions(ClientTLSOptions):\n # same as Twisted's ClientTLSOptions,\n # except that VerificationError is caught\n # and doesn't close the connection\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL_CB_HANDSHAKE_START:\n _maybeSetHostNameIndication(connection, self._hostnameBytes)\n elif where & SSL_CB_HANDSHAKE_DONE:\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n logger.warning(e)\n\nexcept ImportError:\n # ImportError should not matter for older Twisted versions\n # as the above is not used in the fallback ScrapyClientContextFactory\n pass\n", "path": "scrapy/core/downloader/tls.py"}]} | 1,320 | 128 |
gh_patches_debug_7730 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SecureDrop backups from previous versions don't work if database migration has occurred
## Description
Backup restore (https://github.com/freedomofpress/securedrop/blob/develop/install_files/ansible-base/roles/restore/files/restore.py) script does not apply database migrations, and as such breaks the application upon backup restore. Manual workaround that appears to have no side-effects is to run `sudo dpkg-reconfigure securedrop-app-code` on the app server.
## Steps to Reproduce
1. Install SecureDrop 0.8.0
2. `securedrop-admin backup`
3. Upgrade to 0.9.0 (or higher)
4. `securedrop-admin restore`
5. Observe source and journalist interface return error 500
## Expected Behavior
The application should be operational.
## Actual Behavior
The source and journalist interfaces return error 500s.
## Comments
Running `sudo dpkg-reconfigure securedrop-app-code` calls the postinst script which will apply migration. Based on my testing, this seems to work reliably.
</issue>
<code>
[start of install_files/ansible-base/roles/restore/files/restore.py]
1 #!/usr/bin/python2.7
2 """
3 This script and backup archive should be copied to the App server and run by
4 the Ansible playbook. When run (as root), it restores the contents of the 0.3
5 backup file to the machine it's run on.
6
7 python restore.py sd-backup-TIMESTAMP.tar.gz
8 """
9
10 import os
11 import subprocess
12 import sys
13 import tarfile
14
15
16 def verify_args():
17 usage = """
18 Usage: restore.py <backup file>
19
20 <backup file> Path to a SecureDrop 0.3 backup created by backup.py"
21 """
22 if len(sys.argv) != 2:
23 print(usage)
24 sys.exit(1)
25
26 if not os.path.exists(sys.argv[1]):
27 print("<backup file> '{}' not found".format(sys.argv[1]))
28 sys.exit(1)
29
30 if os.geteuid() != 0:
31 print("This program must be run as root!")
32 sys.exit(1)
33
34
35 def main():
36 verify_args()
37
38 with tarfile.open(sys.argv[1], 'r:*') as backup:
39 # This assumes that both the old installation (source of the backup)
40 # and the new installation (destination of the restore) used the
41 # default paths for various locations.
42 backup.extractall(path='/')
43
44 # Reload Tor and the web server so they pick up the new configuration
45 # If the process exits with a non-zero return code, raises an exception.
46 subprocess.check_call(['service', 'apache2', 'restart'])
47 subprocess.check_call(['service', 'tor', 'reload'])
48
49
50 if __name__ == "__main__":
51 main()
52
[end of install_files/ansible-base/roles/restore/files/restore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py
--- a/install_files/ansible-base/roles/restore/files/restore.py
+++ b/install_files/ansible-base/roles/restore/files/restore.py
@@ -45,6 +45,8 @@
# If the process exits with a non-zero return code, raises an exception.
subprocess.check_call(['service', 'apache2', 'restart'])
subprocess.check_call(['service', 'tor', 'reload'])
+ # Apply database migrations (if backed-up version < version to restore)
+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])
if __name__ == "__main__":
| {"golden_diff": "diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py\n--- a/install_files/ansible-base/roles/restore/files/restore.py\n+++ b/install_files/ansible-base/roles/restore/files/restore.py\n@@ -45,6 +45,8 @@\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n+ # Apply database migrations (if backed-up version < version to restore)\n+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n \n \n if __name__ == \"__main__\":\n", "issue": "SecureDrop backups from previous versions don't work if database migration has occurred\n## Description\r\n\r\nBackup restore (https://github.com/freedomofpress/securedrop/blob/develop/install_files/ansible-base/roles/restore/files/restore.py) script does not apply database migrations, and as such breaks the application upon backup restore. Manual workaround that appears to have no side-effects is to run `sudo dpkg-reconfigure securedrop-app-code` on the app server.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Install SecureDrop 0.8.0\r\n2. `securedrop-admin backup`\r\n3. Upgrade to 0.9.0 (or higher)\r\n4. `securedrop-admin restore`\r\n5. Observe source and journalist interface return error 500\r\n\r\n## Expected Behavior\r\n\r\nThe application should be operational.\r\n\r\n## Actual Behavior\r\n\r\nThe source and journalist interfaces return error 500s.\r\n\r\n## Comments\r\n\r\nRunning `sudo dpkg-reconfigure securedrop-app-code` calls the postinst script which will apply migration. Based on my testing, this seems to work reliably.\n", "before_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\nThis script and backup archive should be copied to the App server and run by\nthe Ansible playbook. When run (as root), it restores the contents of the 0.3\nbackup file to the machine it's run on.\n\npython restore.py sd-backup-TIMESTAMP.tar.gz\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\n\ndef verify_args():\n usage = \"\"\"\nUsage: restore.py <backup file>\n\n <backup file> Path to a SecureDrop 0.3 backup created by backup.py\"\n \"\"\"\n if len(sys.argv) != 2:\n print(usage)\n sys.exit(1)\n\n if not os.path.exists(sys.argv[1]):\n print(\"<backup file> '{}' not found\".format(sys.argv[1]))\n sys.exit(1)\n\n if os.geteuid() != 0:\n print(\"This program must be run as root!\")\n sys.exit(1)\n\n\ndef main():\n verify_args()\n\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/restore/files/restore.py"}]} | 1,226 | 163 |
gh_patches_debug_5171 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2581 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing type annotation in `strawberry.fastapi.BaseContext` causes mypy to trip
## Describe the Bug
I built a custom context based on [the guide in the docs](https://strawberry.rocks/docs/guides/authentication):
```python
from strawberry.fastapi import BaseContext
class CustomContext(BaseContext):
@property
def user(self) -> User:
...
async def get_context() -> CustomContext:
return CustomContext()
```
With that I receive the following mypy error:
```shell
error: Call to untyped function "CustomContext" in typed context [no-untyped-call]
```
For now, I added the following workaround to my code:
```python
class CustomContext(BaseContext):
if typing.TYPE_CHECKING:
def __init__(self) -> None:
pass
...
```
## System Information
- Operating system: macOS Monterey
- Strawberry version (if applicable): `0.158.1`
- FastAPI version (if applicable): `0.92.0`
- mypy version (if applicable): `0.991` (also tested with `1.0.1`)
## Additional Context
I'm happy to provide a PR to address the issue myself.
</issue>
<code>
[start of strawberry/fastapi/context.py]
1 from typing import Any, Dict, Optional, Union
2
3 from starlette.background import BackgroundTasks
4 from starlette.requests import Request
5 from starlette.responses import Response
6 from starlette.websockets import WebSocket
7
8 CustomContext = Union["BaseContext", Dict[str, Any]]
9 MergedContext = Union[
10 "BaseContext", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]
11 ]
12
13
14 class BaseContext:
15 connection_params: Optional[Any] = None
16
17 def __init__(self):
18 self.request: Optional[Union[Request, WebSocket]] = None
19 self.background_tasks: Optional[BackgroundTasks] = None
20 self.response: Optional[Response] = None
21
[end of strawberry/fastapi/context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/fastapi/context.py b/strawberry/fastapi/context.py
--- a/strawberry/fastapi/context.py
+++ b/strawberry/fastapi/context.py
@@ -14,7 +14,7 @@
class BaseContext:
connection_params: Optional[Any] = None
- def __init__(self):
+ def __init__(self) -> None:
self.request: Optional[Union[Request, WebSocket]] = None
self.background_tasks: Optional[BackgroundTasks] = None
self.response: Optional[Response] = None
| {"golden_diff": "diff --git a/strawberry/fastapi/context.py b/strawberry/fastapi/context.py\n--- a/strawberry/fastapi/context.py\n+++ b/strawberry/fastapi/context.py\n@@ -14,7 +14,7 @@\n class BaseContext:\n connection_params: Optional[Any] = None\n \n- def __init__(self):\n+ def __init__(self) -> None:\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "issue": "Missing type annotation in `strawberry.fastapi.BaseContext` causes mypy to trip\n## Describe the Bug\r\n\r\nI built a custom context based on [the guide in the docs](https://strawberry.rocks/docs/guides/authentication):\r\n\r\n```python\r\nfrom strawberry.fastapi import BaseContext\r\n\r\nclass CustomContext(BaseContext):\r\n @property\r\n def user(self) -> User:\r\n ...\r\n\r\nasync def get_context() -> CustomContext:\r\n return CustomContext()\r\n```\r\n\r\nWith that I receive the following mypy error:\r\n```shell\r\n error: Call to untyped function \"CustomContext\" in typed context [no-untyped-call]\r\n```\r\n\r\nFor now, I added the following workaround to my code:\r\n```python\r\nclass CustomContext(BaseContext):\r\n if typing.TYPE_CHECKING:\r\n def __init__(self) -> None:\r\n pass\r\n \r\n ...\r\n```\r\n\r\n## System Information\r\n\r\n - Operating system: macOS Monterey\r\n - Strawberry version (if applicable): `0.158.1`\r\n - FastAPI version (if applicable): `0.92.0`\r\n - mypy version (if applicable): `0.991` (also tested with `1.0.1`)\r\n\r\n## Additional Context\r\n\r\nI'm happy to provide a PR to address the issue myself.\r\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Union\n\nfrom starlette.background import BackgroundTasks\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.websockets import WebSocket\n\nCustomContext = Union[\"BaseContext\", Dict[str, Any]]\nMergedContext = Union[\n \"BaseContext\", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]\n]\n\n\nclass BaseContext:\n connection_params: Optional[Any] = None\n\n def __init__(self):\n self.request: Optional[Union[Request, WebSocket]] = None\n self.background_tasks: Optional[BackgroundTasks] = None\n self.response: Optional[Response] = None\n", "path": "strawberry/fastapi/context.py"}]} | 982 | 129 |
gh_patches_debug_19558 | rasdani/github-patches | git_diff | open-mmlab__mmcv-97 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mmcv error
My environment is macOS Mojave 10.14.4, Anaconda 4.4.0,Python 3.6.1.
I directly use "pip install mmcv and got:
"Running setup.py clean for mmcv
Failed to build mmcv
Installing collected packages: mmcv
Running setup.py install for mmcv ... error" and :
"In file included from ./mmcv/video/optflow_warp/flow_warp.cpp:1:
./mmcv/video/optflow_warp/flow_warp.hpp:3:10: fatal error: 'iostream' file not found
#include <iostream>"
Anybody help? Thank you very much.
</issue>
<code>
[start of setup.py]
1 import sys
2 from io import open # for Python 2 (identical to builtin in Python 3)
3
4 from setuptools import Extension, find_packages, setup
5
6 import numpy
7 from Cython.Distutils import build_ext
8
9 install_requires = [
10 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python',
11 'Cython'
12 ]
13 if sys.version_info < (3, 3):
14 install_requires.append('backports.shutil_get_terminal_size')
15 if sys.version_info < (3, 4):
16 install_requires.extend(['enum34', 'pathlib'])
17
18
19 def readme():
20 with open('README.rst', encoding='utf-8') as f:
21 content = f.read()
22 return content
23
24
25 def get_version():
26 version_file = 'mmcv/version.py'
27 with open(version_file, 'r', encoding='utf-8') as f:
28 exec(compile(f.read(), version_file, 'exec'))
29 return locals()['__version__']
30
31
32 EXT_MODULES = [
33 Extension(
34 name='mmcv._ext',
35 sources=[
36 './mmcv/video/optflow_warp/flow_warp.cpp',
37 './mmcv/video/optflow_warp/flow_warp_module.pyx'
38 ],
39 include_dirs=[numpy.get_include()],
40 language="c++",
41 ),
42 ]
43
44 setup(
45 name='mmcv',
46 version=get_version(),
47 description='Open MMLab Computer Vision Foundation',
48 long_description=readme(),
49 keywords='computer vision',
50 packages=find_packages(),
51 classifiers=[
52 'Development Status :: 4 - Beta',
53 'License :: OSI Approved :: Apache Software License',
54 'Operating System :: OS Independent',
55 'Programming Language :: Python :: 2',
56 'Programming Language :: Python :: 2.7',
57 'Programming Language :: Python :: 3',
58 'Programming Language :: Python :: 3.4',
59 'Programming Language :: Python :: 3.5',
60 'Programming Language :: Python :: 3.6',
61 'Programming Language :: Python :: 3.7',
62 'Topic :: Utilities',
63 ],
64 url='https://github.com/open-mmlab/mmcv',
65 author='Kai Chen',
66 author_email='[email protected]',
67 setup_requires=['pytest-runner'],
68 tests_require=['pytest'],
69 install_requires=install_requires,
70 ext_modules=EXT_MODULES,
71 cmdclass={'build_ext': build_ext},
72 zip_safe=False)
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,6 @@
+import platform
import sys
from io import open # for Python 2 (identical to builtin in Python 3)
-
from setuptools import Extension, find_packages, setup
import numpy
@@ -29,6 +29,13 @@
return locals()['__version__']
+if platform.system() == 'Darwin':
+ extra_compile_args = ['-stdlib=libc++']
+ extra_link_args = ['-stdlib=libc++']
+else:
+ extra_compile_args = []
+ extra_link_args = []
+
EXT_MODULES = [
Extension(
name='mmcv._ext',
@@ -37,7 +44,9 @@
'./mmcv/video/optflow_warp/flow_warp_module.pyx'
],
include_dirs=[numpy.get_include()],
- language="c++",
+ language='c++',
+ extra_compile_args=extra_compile_args,
+ extra_link_args=extra_link_args,
),
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,6 @@\n+import platform\n import sys\n from io import open # for Python 2 (identical to builtin in Python 3)\n-\n from setuptools import Extension, find_packages, setup\n \n import numpy\n@@ -29,6 +29,13 @@\n return locals()['__version__']\n \n \n+if platform.system() == 'Darwin':\n+ extra_compile_args = ['-stdlib=libc++']\n+ extra_link_args = ['-stdlib=libc++']\n+else:\n+ extra_compile_args = []\n+ extra_link_args = []\n+\n EXT_MODULES = [\n Extension(\n name='mmcv._ext',\n@@ -37,7 +44,9 @@\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n- language=\"c++\",\n+ language='c++',\n+ extra_compile_args=extra_compile_args,\n+ extra_link_args=extra_link_args,\n ),\n ]\n", "issue": "mmcv error\nMy environment is macOS Mojave 10.14.4, Anaconda 4.4.0,Python 3.6.1.\r\n I directly use \"pip install mmcv and got:\r\n\"Running setup.py clean for mmcv\r\nFailed to build mmcv\r\nInstalling collected packages: mmcv\r\nRunning setup.py install for mmcv ... error\" and :\r\n\"In file included from ./mmcv/video/optflow_warp/flow_warp.cpp:1:\r\n./mmcv/video/optflow_warp/flow_warp.hpp:3:10: fatal error: 'iostream' file not found\r\n#include <iostream>\"\r\nAnybody help? Thank you very much.\n", "before_files": [{"content": "import sys\nfrom io import open # for Python 2 (identical to builtin in Python 3)\n\nfrom setuptools import Extension, find_packages, setup\n\nimport numpy\nfrom Cython.Distutils import build_ext\n\ninstall_requires = [\n 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python',\n 'Cython'\n]\nif sys.version_info < (3, 3):\n install_requires.append('backports.shutil_get_terminal_size')\nif sys.version_info < (3, 4):\n install_requires.extend(['enum34', 'pathlib'])\n\n\ndef readme():\n with open('README.rst', encoding='utf-8') as f:\n content = f.read()\n return content\n\n\ndef get_version():\n version_file = 'mmcv/version.py'\n with open(version_file, 'r', encoding='utf-8') as f:\n exec(compile(f.read(), version_file, 'exec'))\n return locals()['__version__']\n\n\nEXT_MODULES = [\n Extension(\n name='mmcv._ext',\n sources=[\n './mmcv/video/optflow_warp/flow_warp.cpp',\n './mmcv/video/optflow_warp/flow_warp_module.pyx'\n ],\n include_dirs=[numpy.get_include()],\n language=\"c++\",\n ),\n]\n\nsetup(\n name='mmcv',\n version=get_version(),\n description='Open MMLab Computer Vision Foundation',\n long_description=readme(),\n keywords='computer vision',\n packages=find_packages(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Utilities',\n ],\n url='https://github.com/open-mmlab/mmcv',\n author='Kai Chen',\n author_email='[email protected]',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=install_requires,\n ext_modules=EXT_MODULES,\n cmdclass={'build_ext': build_ext},\n zip_safe=False)\n", "path": "setup.py"}]} | 1,341 | 237 |
gh_patches_debug_19740 | rasdani/github-patches | git_diff | tough-dev-school__education-backend-180 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Кастомные теги в мейлчимпе
Чтобы можно было в лид-магните указать теги, которые пробрасываются в аудиторию мейлчимпа
</issue>
<code>
[start of src/magnets/creator.py]
1 from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry
2 from users.creator import UserCreator
3 from users.models import User
4
5
6 class LeadCreator:
7 def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None):
8 self.data = {
9 'name': name,
10 'email': email,
11 }
12
13 self.campaign = campaign
14
15 def __call__(self):
16 self.user = self._create_user()
17 self._create_log_entry()
18
19 self.campaign.execute(self.user)
20
21 def _create_user(self) -> User:
22 return UserCreator(
23 name=self.data['name'],
24 email=self.data['email'],
25 subscribe=True,
26 )()
27
28 def _create_log_entry(self):
29 LeadCampaignLogEntry.objects.create(
30 user=self.user,
31 campaign=self.campaign,
32 )
33
[end of src/magnets/creator.py]
[start of src/shipping/shipments/course.py]
1 from typing import Optional
2
3 from app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp
4 from products.models import Course
5 from shipping import factory
6 from shipping.shipments.base import BaseShipment
7
8
9 @factory.register(Course)
10 class CourseShipment(BaseShipment):
11 @property
12 def course(self):
13 return self.stuff_to_ship
14
15 def ship(self):
16 self.invite_to_clickmeeting()
17 self.invite_to_zoomus()
18 self.subscribe_to_mailchimp()
19
20 self.send_welcome_letter()
21
22 def subscribe_to_mailchimp(self):
23 if self.course.mailchimp_list_id is not None:
24 subscribe_to_mailchimp.delay(
25 list_id=self.course.mailchimp_list_id,
26 user_id=self.user.pk,
27 tags=[self.course.slug],
28 )
29
30 def invite_to_clickmeeting(self):
31 if self.course.clickmeeting_room_url is not None:
32 invite_to_clickmeeting.delay(
33 room_url=self.course.clickmeeting_room_url,
34 email=self.user.email,
35 )
36
37 def invite_to_zoomus(self):
38 if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id):
39 invite_to_zoomus.delay(
40 webinar_id=self.course.zoomus_webinar_id,
41 user_id=self.user.id,
42 )
43
44 def send_welcome_letter(self):
45 if self.welcome_letter_template_id is not None:
46 send_mail.delay(
47 to=self.user.email,
48 template_id=self.welcome_letter_template_id,
49 ctx=self.get_template_context(),
50 disable_antispam=True,
51 )
52
53 def get_template_context(self) -> dict:
54 return {
55 'name': self.course.name,
56 'slug': self.course.slug,
57 'name_genitive': self.course.name_genitive,
58 **self.get_gift_template_context(),
59 }
60
61 @property
62 def welcome_letter_template_id(self) -> Optional[str]:
63 """Get special gift template letter id if order is a gift and it is present"""
64 template_id = self.course.welcome_letter_template_id
65
66 if self.order is not None and self.order.giver is not None: # this is a gift
67 template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id
68
69 if template_id is None or not len(template_id): # fuck this null=True in CharFields
70 return None
71
72 return template_id
73
[end of src/shipping/shipments/course.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/magnets/creator.py b/src/magnets/creator.py
--- a/src/magnets/creator.py
+++ b/src/magnets/creator.py
@@ -23,6 +23,7 @@
name=self.data['name'],
email=self.data['email'],
subscribe=True,
+ tags=self.tags,
)()
def _create_log_entry(self):
@@ -30,3 +31,7 @@
user=self.user,
campaign=self.campaign,
)
+
+ @property
+ def tags(self):
+ return [f'{self.campaign.slug}-lead-magnet']
diff --git a/src/shipping/shipments/course.py b/src/shipping/shipments/course.py
--- a/src/shipping/shipments/course.py
+++ b/src/shipping/shipments/course.py
@@ -24,7 +24,7 @@
subscribe_to_mailchimp.delay(
list_id=self.course.mailchimp_list_id,
user_id=self.user.pk,
- tags=[self.course.slug],
+ tags=[self.course.slug, f'{self.course.slug}-purchased'],
)
def invite_to_clickmeeting(self):
| {"golden_diff": "diff --git a/src/magnets/creator.py b/src/magnets/creator.py\n--- a/src/magnets/creator.py\n+++ b/src/magnets/creator.py\n@@ -23,6 +23,7 @@\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n+ tags=self.tags,\n )()\n \n def _create_log_entry(self):\n@@ -30,3 +31,7 @@\n user=self.user,\n campaign=self.campaign,\n )\n+\n+ @property\n+ def tags(self):\n+ return [f'{self.campaign.slug}-lead-magnet']\ndiff --git a/src/shipping/shipments/course.py b/src/shipping/shipments/course.py\n--- a/src/shipping/shipments/course.py\n+++ b/src/shipping/shipments/course.py\n@@ -24,7 +24,7 @@\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n- tags=[self.course.slug],\n+ tags=[self.course.slug, f'{self.course.slug}-purchased'],\n )\n \n def invite_to_clickmeeting(self):\n", "issue": "\u041a\u0430\u0441\u0442\u043e\u043c\u043d\u044b\u0435 \u0442\u0435\u0433\u0438 \u0432 \u043c\u0435\u0439\u043b\u0447\u0438\u043c\u043f\u0435\n\u0427\u0442\u043e\u0431\u044b \u043c\u043e\u0436\u043d\u043e \u0431\u044b\u043b\u043e \u0432 \u043b\u0438\u0434-\u043c\u0430\u0433\u043d\u0438\u0442\u0435 \u0443\u043a\u0430\u0437\u0430\u0442\u044c \u0442\u0435\u0433\u0438, \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u043f\u0440\u043e\u0431\u0440\u0430\u0441\u044b\u0432\u0430\u044e\u0442\u0441\u044f \u0432 \u0430\u0443\u0434\u0438\u0442\u043e\u0440\u0438\u044e \u043c\u0435\u0439\u043b\u0447\u0438\u043c\u043f\u0430\n", "before_files": [{"content": "from magnets.models import EmailLeadMagnetCampaign, LeadCampaignLogEntry\nfrom users.creator import UserCreator\nfrom users.models import User\n\n\nclass LeadCreator:\n def __init__(self, campaign: EmailLeadMagnetCampaign, email: str, name: str = None):\n self.data = {\n 'name': name,\n 'email': email,\n }\n\n self.campaign = campaign\n\n def __call__(self):\n self.user = self._create_user()\n self._create_log_entry()\n\n self.campaign.execute(self.user)\n\n def _create_user(self) -> User:\n return UserCreator(\n name=self.data['name'],\n email=self.data['email'],\n subscribe=True,\n )()\n\n def _create_log_entry(self):\n LeadCampaignLogEntry.objects.create(\n user=self.user,\n campaign=self.campaign,\n )\n", "path": "src/magnets/creator.py"}, {"content": "from typing import Optional\n\nfrom app.tasks import invite_to_clickmeeting, invite_to_zoomus, send_mail, subscribe_to_mailchimp\nfrom products.models import Course\nfrom shipping import factory\nfrom shipping.shipments.base import BaseShipment\n\n\[email protected](Course)\nclass CourseShipment(BaseShipment):\n @property\n def course(self):\n return self.stuff_to_ship\n\n def ship(self):\n self.invite_to_clickmeeting()\n self.invite_to_zoomus()\n self.subscribe_to_mailchimp()\n\n self.send_welcome_letter()\n\n def subscribe_to_mailchimp(self):\n if self.course.mailchimp_list_id is not None:\n subscribe_to_mailchimp.delay(\n list_id=self.course.mailchimp_list_id,\n user_id=self.user.pk,\n tags=[self.course.slug],\n )\n\n def invite_to_clickmeeting(self):\n if self.course.clickmeeting_room_url is not None:\n invite_to_clickmeeting.delay(\n room_url=self.course.clickmeeting_room_url,\n email=self.user.email,\n )\n\n def invite_to_zoomus(self):\n if self.course.zoomus_webinar_id is not None and len(self.course.zoomus_webinar_id):\n invite_to_zoomus.delay(\n webinar_id=self.course.zoomus_webinar_id,\n user_id=self.user.id,\n )\n\n def send_welcome_letter(self):\n if self.welcome_letter_template_id is not None:\n send_mail.delay(\n to=self.user.email,\n template_id=self.welcome_letter_template_id,\n ctx=self.get_template_context(),\n disable_antispam=True,\n )\n\n def get_template_context(self) -> dict:\n return {\n 'name': self.course.name,\n 'slug': self.course.slug,\n 'name_genitive': self.course.name_genitive,\n **self.get_gift_template_context(),\n }\n\n @property\n def welcome_letter_template_id(self) -> Optional[str]:\n \"\"\"Get special gift template letter id if order is a gift and it is present\"\"\"\n template_id = self.course.welcome_letter_template_id\n\n if self.order is not None and self.order.giver is not None: # this is a gift\n template_id = self.course.gift_welcome_letter_template_id or self.course.welcome_letter_template_id\n\n if template_id is None or not len(template_id): # fuck this null=True in CharFields\n return None\n\n return template_id\n", "path": "src/shipping/shipments/course.py"}]} | 1,506 | 257 |
gh_patches_debug_4029 | rasdani/github-patches | git_diff | saleor__saleor-723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add checkout steps navigation
Now there's no explicit navigation. Using browser back button can be dangerous in some cases and it's not a common behavior in ecommerce
</issue>
<code>
[start of saleor/order/views.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages, auth
5 from django.db import transaction
6 from django.http import Http404, HttpResponseForbidden
7 from django.shortcuts import get_object_or_404, redirect
8 from django.utils.translation import ugettext as _
9 from django.template.response import TemplateResponse
10 from payments import RedirectNeeded
11
12 from .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm
13 from .models import Order, Payment
14 from ..core.utils import get_client_ip
15 from .utils import check_order_status
16
17 logger = logging.getLogger(__name__)
18
19
20 def details(request, token):
21 orders = Order.objects.prefetch_related('groups__items')
22 order = get_object_or_404(orders, token=token)
23 groups = order.groups.all()
24 return TemplateResponse(request, 'order/details.html',
25 {'order': order, 'groups': groups})
26
27
28 def payment(request, token):
29 orders = Order.objects.prefetch_related('groups__items')
30 order = get_object_or_404(orders, token=token)
31 groups = order.groups.all()
32 payments = order.payments.all()
33 form_data = request.POST or None
34 try:
35 waiting_payment = order.payments.get(status='waiting')
36 except Payment.DoesNotExist:
37 waiting_payment = None
38 waiting_payment_form = None
39 else:
40 form_data = None
41 waiting_payment_form = PaymentDeleteForm(
42 None, order=order, initial={'payment_id': waiting_payment.id})
43 if order.is_fully_paid():
44 form_data = None
45 payment_form = None
46 if not order.is_pre_authorized():
47 payment_form = PaymentMethodsForm(form_data)
48 # FIXME: redirect if there is only one payment method
49 if payment_form.is_valid():
50 payment_method = payment_form.cleaned_data['method']
51 return redirect('order:payment', token=order.token,
52 variant=payment_method)
53 return TemplateResponse(request, 'order/payment.html',
54 {'order': order, 'groups': groups,
55 'payment_form': payment_form,
56 'waiting_payment': waiting_payment,
57 'waiting_payment_form': waiting_payment_form,
58 'payments': payments})
59
60
61 @check_order_status
62 def start_payment(request, order, variant):
63 waiting_payments = order.payments.filter(status='waiting').exists()
64 if waiting_payments:
65 return redirect('order:payment', token=order.token)
66 billing = order.billing_address
67 total = order.get_total()
68 defaults = {'total': total.gross,
69 'tax': total.tax, 'currency': total.currency,
70 'delivery': order.get_delivery_total().gross,
71 'billing_first_name': billing.first_name,
72 'billing_last_name': billing.last_name,
73 'billing_address_1': billing.street_address_1,
74 'billing_address_2': billing.street_address_2,
75 'billing_city': billing.city,
76 'billing_postcode': billing.postal_code,
77 'billing_country_code': billing.country,
78 'billing_email': order.user_email,
79 'description': _('Order %(order_number)s') % {
80 'order_number': order},
81 'billing_country_area': billing.country_area,
82 'customer_ip_address': get_client_ip(request)}
83 variant_choices = settings.CHECKOUT_PAYMENT_CHOICES
84 if variant not in [code for code, dummy_name in variant_choices]:
85 raise Http404('%r is not a valid payment variant' % (variant,))
86 with transaction.atomic():
87 order.change_status('payment-pending')
88 payment, dummy_created = Payment.objects.get_or_create(
89 variant=variant, status='waiting', order=order, defaults=defaults)
90 try:
91 form = payment.get_form(data=request.POST or None)
92 except RedirectNeeded as redirect_to:
93 return redirect(str(redirect_to))
94 except Exception:
95 logger.exception('Error communicating with the payment gateway')
96 messages.error(
97 request,
98 _('Oops, it looks like we were unable to contact the selected'
99 ' payment service'))
100 payment.change_status('error')
101 return redirect('order:payment', token=order.token)
102 template = 'order/payment/%s.html' % variant
103 return TemplateResponse(request, [template, 'order/payment/default.html'],
104 {'form': form, 'payment': payment})
105
106
107 @check_order_status
108 def cancel_payment(request, order):
109 form = PaymentDeleteForm(request.POST or None, order=order)
110 if form.is_valid():
111 with transaction.atomic():
112 form.save()
113 return redirect('order:payment', token=order.token)
114 return HttpResponseForbidden()
115
116
117 def create_password(request, token):
118 if request.user.is_authenticated():
119 return redirect('order:details', token=token)
120 order = get_object_or_404(Order, token=token)
121 email = order.user_email
122 form_data = request.POST.copy()
123 if form_data:
124 form_data.update({'email': email})
125 form = PasswordForm(form_data or None)
126
127 if form.is_valid():
128 user = form.save(request)
129 order.user = user
130 order.save(update_fields=['user'])
131 password = form_data.get('password1')
132 auth_user = auth.authenticate(email=email, password=password)
133 if auth_user is not None:
134 auth.login(request, auth_user)
135 return redirect('order:details', token=token)
136 ctx = {'form': form, 'email': email}
137 return TemplateResponse(request, 'order/create_password.html', ctx)
138
[end of saleor/order/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/order/views.py b/saleor/order/views.py
--- a/saleor/order/views.py
+++ b/saleor/order/views.py
@@ -133,5 +133,5 @@
if auth_user is not None:
auth.login(request, auth_user)
return redirect('order:details', token=token)
- ctx = {'form': form, 'email': email}
+ ctx = {'form': form, 'email': email, 'order': order}
return TemplateResponse(request, 'order/create_password.html', ctx)
| {"golden_diff": "diff --git a/saleor/order/views.py b/saleor/order/views.py\n--- a/saleor/order/views.py\n+++ b/saleor/order/views.py\n@@ -133,5 +133,5 @@\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n- ctx = {'form': form, 'email': email}\n+ ctx = {'form': form, 'email': email, 'order': order}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "issue": "Add checkout steps navigation\nNow there's no explicit navigation. Using browser back button can be dangerous in some cases and it's not a common behavior in ecommerce\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\nfrom django.template.response import TemplateResponse\nfrom payments import RedirectNeeded\n\nfrom .forms import PaymentDeleteForm, PaymentMethodsForm, PasswordForm\nfrom .models import Order, Payment\nfrom ..core.utils import get_client_ip\nfrom .utils import check_order_status\n\nlogger = logging.getLogger(__name__)\n\n\ndef details(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n return TemplateResponse(request, 'order/details.html',\n {'order': order, 'groups': groups})\n\n\ndef payment(request, token):\n orders = Order.objects.prefetch_related('groups__items')\n order = get_object_or_404(orders, token=token)\n groups = order.groups.all()\n payments = order.payments.all()\n form_data = request.POST or None\n try:\n waiting_payment = order.payments.get(status='waiting')\n except Payment.DoesNotExist:\n waiting_payment = None\n waiting_payment_form = None\n else:\n form_data = None\n waiting_payment_form = PaymentDeleteForm(\n None, order=order, initial={'payment_id': waiting_payment.id})\n if order.is_fully_paid():\n form_data = None\n payment_form = None\n if not order.is_pre_authorized():\n payment_form = PaymentMethodsForm(form_data)\n # FIXME: redirect if there is only one payment method\n if payment_form.is_valid():\n payment_method = payment_form.cleaned_data['method']\n return redirect('order:payment', token=order.token,\n variant=payment_method)\n return TemplateResponse(request, 'order/payment.html',\n {'order': order, 'groups': groups,\n 'payment_form': payment_form,\n 'waiting_payment': waiting_payment,\n 'waiting_payment_form': waiting_payment_form,\n 'payments': payments})\n\n\n@check_order_status\ndef start_payment(request, order, variant):\n waiting_payments = order.payments.filter(status='waiting').exists()\n if waiting_payments:\n return redirect('order:payment', token=order.token)\n billing = order.billing_address\n total = order.get_total()\n defaults = {'total': total.gross,\n 'tax': total.tax, 'currency': total.currency,\n 'delivery': order.get_delivery_total().gross,\n 'billing_first_name': billing.first_name,\n 'billing_last_name': billing.last_name,\n 'billing_address_1': billing.street_address_1,\n 'billing_address_2': billing.street_address_2,\n 'billing_city': billing.city,\n 'billing_postcode': billing.postal_code,\n 'billing_country_code': billing.country,\n 'billing_email': order.user_email,\n 'description': _('Order %(order_number)s') % {\n 'order_number': order},\n 'billing_country_area': billing.country_area,\n 'customer_ip_address': get_client_ip(request)}\n variant_choices = settings.CHECKOUT_PAYMENT_CHOICES\n if variant not in [code for code, dummy_name in variant_choices]:\n raise Http404('%r is not a valid payment variant' % (variant,))\n with transaction.atomic():\n order.change_status('payment-pending')\n payment, dummy_created = Payment.objects.get_or_create(\n variant=variant, status='waiting', order=order, defaults=defaults)\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeeded as redirect_to:\n return redirect(str(redirect_to))\n except Exception:\n logger.exception('Error communicating with the payment gateway')\n messages.error(\n request,\n _('Oops, it looks like we were unable to contact the selected'\n ' payment service'))\n payment.change_status('error')\n return redirect('order:payment', token=order.token)\n template = 'order/payment/%s.html' % variant\n return TemplateResponse(request, [template, 'order/payment/default.html'],\n {'form': form, 'payment': payment})\n\n\n@check_order_status\ndef cancel_payment(request, order):\n form = PaymentDeleteForm(request.POST or None, order=order)\n if form.is_valid():\n with transaction.atomic():\n form.save()\n return redirect('order:payment', token=order.token)\n return HttpResponseForbidden()\n\n\ndef create_password(request, token):\n if request.user.is_authenticated():\n return redirect('order:details', token=token)\n order = get_object_or_404(Order, token=token)\n email = order.user_email\n form_data = request.POST.copy()\n if form_data:\n form_data.update({'email': email})\n form = PasswordForm(form_data or None)\n\n if form.is_valid():\n user = form.save(request)\n order.user = user\n order.save(update_fields=['user'])\n password = form_data.get('password1')\n auth_user = auth.authenticate(email=email, password=password)\n if auth_user is not None:\n auth.login(request, auth_user)\n return redirect('order:details', token=token)\n ctx = {'form': form, 'email': email}\n return TemplateResponse(request, 'order/create_password.html', ctx)\n", "path": "saleor/order/views.py"}]} | 2,027 | 126 |
gh_patches_debug_15822 | rasdani/github-patches | git_diff | goauthentik__authentik-7315 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login fails at GET /api/v3/flows/executor/default-authentication-flow/
**Describe the bug**
Authentication fails. I've tried my own auth flow, which includes a passwordless option. This log is from switching back to the default auth flow.
**To Reproduce**
Steps to reproduce the behavior:
1. Enter Username
2. Enter Password
3. Press Enter
4. See error
**Expected behavior**
This stage would normally be to select the authenticator to use. (Key or TOTP, in my case)
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
File "/authentik/flows/views/executor.py", line 287, in get
stage_response = self.current_stage_view.dispatch(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py", line 143, in dispatch
return handler(request, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator_validate/stage.py", line 222, in get
challenges = self.get_device_challenges()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator_validate/stage.py", line 157, in get_device_challenges
user_devices = list(devices_for_user(self.get_pending_user()))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/stages/authenticator/__init__.py", line 93, in devices_for_user
yield from device_set
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 398, in __iter__
self._fetch_all()
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 1881, in _fetch_all
self._result_cache = list(self._iterable_class(self))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py", line 91, in __iter__
results = compiler.execute_sql(
^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/models/sql/compiler.py", line 1562, in execute_sql
cursor.execute(sql, params)
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 80, in _execute_with_wrappers
return executor(sql, params, many, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 84, in _execute
with self.db.wrap_database_errors:
File "/ak-root/venv/lib/python3.11/site-packages/django/db/utils.py", line 91, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/django_prometheus/db/common.py", line 69, in execute
return super().execute(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/ak-root/venv/lib/python3.11/site-packages/psycopg/cursor.py", line 737, in execute
raise ex.with_traceback(None)
django.db.utils.ProgrammingError: relation "authentik_stages_authenticator_static_staticdevice" does not exist
LINE 1: ...tic_staticdevice"."throttling_failure_count" FROM "authentik...
^
```
</details>
**Version and Deployment (please complete the following information):**
- authentik version: gh-next as of 10/21/2023
- Deployment: docker-compose
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of lifecycle/system_migrations/otp_merge.py]
1 # flake8: noqa
2 from lifecycle.migrate import BaseMigration
3
4 SQL_STATEMENT = """
5 DELETE FROM django_migrations WHERE app = 'otp_static';
6 DELETE FROM django_migrations WHERE app = 'otp_totp';
7 -- Rename tables (static)
8 ALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice;
9 ALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken;
10 ALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq;
11 ALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq;
12 -- Rename tables (totp)
13 ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;
14 ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;
15 """
16
17
18 class Migration(BaseMigration):
19 def needs_migration(self) -> bool:
20 self.cur.execute(
21 "select * from information_schema.tables WHERE table_name='otp_static_staticdevice'"
22 )
23 return bool(self.cur.rowcount)
24
25 def run(self):
26 self.cur.execute(SQL_STATEMENT)
27 self.fake_migration(
28 (
29 "authentik_stages_authenticator_static",
30 "0008_initial",
31 ),
32 (
33 "authentik_stages_authenticator_static",
34 "0009_throttling",
35 ),
36 (
37 "authentik_stages_authenticator_totp",
38 "0008_initial",
39 ),
40 (
41 "authentik_stages_authenticator_totp",
42 "0009_auto_20190420_0723",
43 ),
44 )
45
[end of lifecycle/system_migrations/otp_merge.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lifecycle/system_migrations/otp_merge.py b/lifecycle/system_migrations/otp_merge.py
--- a/lifecycle/system_migrations/otp_merge.py
+++ b/lifecycle/system_migrations/otp_merge.py
@@ -2,6 +2,7 @@
from lifecycle.migrate import BaseMigration
SQL_STATEMENT = """
+BEGIN TRANSACTION;
DELETE FROM django_migrations WHERE app = 'otp_static';
DELETE FROM django_migrations WHERE app = 'otp_totp';
-- Rename tables (static)
@@ -12,6 +13,7 @@
-- Rename tables (totp)
ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;
ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;
+COMMIT;
"""
| {"golden_diff": "diff --git a/lifecycle/system_migrations/otp_merge.py b/lifecycle/system_migrations/otp_merge.py\n--- a/lifecycle/system_migrations/otp_merge.py\n+++ b/lifecycle/system_migrations/otp_merge.py\n@@ -2,6 +2,7 @@\n from lifecycle.migrate import BaseMigration\n \n SQL_STATEMENT = \"\"\"\n+BEGIN TRANSACTION;\n DELETE FROM django_migrations WHERE app = 'otp_static';\n DELETE FROM django_migrations WHERE app = 'otp_totp';\n -- Rename tables (static)\n@@ -12,6 +13,7 @@\n -- Rename tables (totp)\n ALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\n ALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\n+COMMIT;\n \"\"\"\n", "issue": "Login fails at GET /api/v3/flows/executor/default-authentication-flow/ \n**Describe the bug**\r\nAuthentication fails. I've tried my own auth flow, which includes a passwordless option. This log is from switching back to the default auth flow.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Enter Username\r\n2. Enter Password\r\n3. Press Enter\r\n4. See error\r\n\r\n**Expected behavior**\r\nThis stage would normally be to select the authenticator to use. (Key or TOTP, in my case)\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 287, in get\r\n stage_response = self.current_stage_view.dispatch(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/views/generic/base.py\", line 143, in dispatch\r\n return handler(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_validate/stage.py\", line 222, in get\r\n challenges = self.get_device_challenges()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_validate/stage.py\", line 157, in get_device_challenges\r\n user_devices = list(devices_for_user(self.get_pending_user()))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator/__init__.py\", line 93, in devices_for_user\r\n yield from device_set\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 398, in __iter__\r\n self._fetch_all()\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 1881, in _fetch_all\r\n self._result_cache = list(self._iterable_class(self))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/query.py\", line 91, in __iter__\r\n results = compiler.execute_sql(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/models/sql/compiler.py\", line 1562, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 67, in execute\r\n return self._execute_with_wrappers(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 80, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n with self.db.wrap_database_errors:\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/utils.py\", line 91, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/django_prometheus/db/common.py\", line 69, in execute\r\n return super().execute(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/ak-root/venv/lib/python3.11/site-packages/psycopg/cursor.py\", line 737, in execute\r\n raise ex.with_traceback(None)\r\ndjango.db.utils.ProgrammingError: relation \"authentik_stages_authenticator_static_staticdevice\" does not exist\r\nLINE 1: ...tic_staticdevice\".\"throttling_failure_count\" FROM \"authentik...\r\n ^\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: gh-next as of 10/21/2023\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n \n", "before_files": [{"content": "# flake8: noqa\nfrom lifecycle.migrate import BaseMigration\n\nSQL_STATEMENT = \"\"\"\nDELETE FROM django_migrations WHERE app = 'otp_static';\nDELETE FROM django_migrations WHERE app = 'otp_totp';\n-- Rename tables (static)\nALTER TABLE otp_static_staticdevice RENAME TO authentik_stages_authenticator_static_staticdevice;\nALTER TABLE otp_static_statictoken RENAME TO authentik_stages_authenticator_static_statictoken;\nALTER SEQUENCE otp_static_statictoken_id_seq RENAME TO authentik_stages_authenticator_static_statictoken_id_seq;\nALTER SEQUENCE otp_static_staticdevice_id_seq RENAME TO authentik_stages_authenticator_static_staticdevice_id_seq;\n-- Rename tables (totp)\nALTER TABLE otp_totp_totpdevice RENAME TO authentik_stages_authenticator_totp_totpdevice;\nALTER SEQUENCE otp_totp_totpdevice_id_seq RENAME TO authentik_stages_authenticator_totp_totpdevice_id_seq;\n\"\"\"\n\n\nclass Migration(BaseMigration):\n def needs_migration(self) -> bool:\n self.cur.execute(\n \"select * from information_schema.tables WHERE table_name='otp_static_staticdevice'\"\n )\n return bool(self.cur.rowcount)\n\n def run(self):\n self.cur.execute(SQL_STATEMENT)\n self.fake_migration(\n (\n \"authentik_stages_authenticator_static\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_static\",\n \"0009_throttling\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0008_initial\",\n ),\n (\n \"authentik_stages_authenticator_totp\",\n \"0009_auto_20190420_0723\",\n ),\n )\n", "path": "lifecycle/system_migrations/otp_merge.py"}]} | 2,039 | 184 |
gh_patches_debug_35030 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-582 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Align terminology (`option` v.s. `config`)
</issue>
<code>
[start of pytorch_pfn_extras/runtime/_to.py]
1 from typing import Any, Dict, Optional, Type, TypeVar
2
3 import torch
4
5 import pytorch_pfn_extras as ppe
6 from pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime
7
8
9 ModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor)
10
11
12 def to(
13 module_or_tensor: ModuleOrTensor,
14 device: DeviceLike,
15 *,
16 config: Optional[Dict[str, Any]] = None,
17 runtime_class: Optional[Type[BaseRuntime]] = None,
18 ) -> ModuleOrTensor:
19 """A function to transfer the given object to the given device.
20
21 If PyTorch's device type is given as the ``device`` argument,
22 the behavior of this function is equivalent to
23 ``module_or_tensor.to(module_or_tensor, device)``.
24
25 Otherwise, this function uses the **Runtime** mechanism.
26 This function looks for the Runtime for the device from the RuntimeRegistry
27 and delegates the actual transfer operation to it.
28
29 See also the documentation of ``ppe.runtime.BaseRuntime`` for details.
30
31 Args:
32 module_or_tensor (torch.nn.Module or torch.Tensor):
33 An object to be transferred.
34 device (torch.device or str):
35 The device that the input object is transferred to.
36 config (dict, optional):
37 A config of dictionary type that is passed to
38 ``runtime_class.__init__`` as an argument.
39 runtime_class:
40 A runtime class inherited from `BaseRuntime` class.
41 If ``None``, a runtime class is automatically selected
42 based on the ``device`` argument from the runtime registry.
43
44 Returns:
45 A `torch.Tensor` with the specified device.
46 """
47 if config is None:
48 config = {}
49 if runtime_class is None:
50 registry = ppe.runtime.runtime_registry
51 runtime_class = registry.get_runtime_class_for_device_spec(device)
52 runtime = runtime_class(device, config)
53 obj = module_or_tensor
54 if isinstance(obj, torch.nn.Module):
55 ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)
56 return runtime.move_module(obj)
57 elif isinstance(obj, torch.Tensor):
58 return runtime.move_tensor(obj)
59 else:
60 raise ValueError('Unsupported type for module_or_tensor')
61
[end of pytorch_pfn_extras/runtime/_to.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_pfn_extras/runtime/_to.py b/pytorch_pfn_extras/runtime/_to.py
--- a/pytorch_pfn_extras/runtime/_to.py
+++ b/pytorch_pfn_extras/runtime/_to.py
@@ -13,8 +13,9 @@
module_or_tensor: ModuleOrTensor,
device: DeviceLike,
*,
- config: Optional[Dict[str, Any]] = None,
+ options: Optional[Dict[str, Any]] = None,
runtime_class: Optional[Type[BaseRuntime]] = None,
+ config: Optional[Dict[str, Any]] = None,
) -> ModuleOrTensor:
"""A function to transfer the given object to the given device.
@@ -33,23 +34,30 @@
An object to be transferred.
device (torch.device or str):
The device that the input object is transferred to.
- config (dict, optional):
- A config of dictionary type that is passed to
+ options (dict, optional):
+ An options of dictionary type that is passed to
``runtime_class.__init__`` as an argument.
runtime_class:
A runtime class inherited from `BaseRuntime` class.
If ``None``, a runtime class is automatically selected
based on the ``device`` argument from the runtime registry.
+ config (dict, optional):
+ DEPRECATED. Use `options`.
Returns:
A `torch.Tensor` with the specified device.
"""
- if config is None:
- config = {}
+ if options is None:
+ options = {}
+ if config is not None:
+ options = config
+ elif config is not None:
+ raise ValueError('options and config cannot be specified together')
+
if runtime_class is None:
registry = ppe.runtime.runtime_registry
runtime_class = registry.get_runtime_class_for_device_spec(device)
- runtime = runtime_class(device, config)
+ runtime = runtime_class(device, options)
obj = module_or_tensor
if isinstance(obj, torch.nn.Module):
ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)
| {"golden_diff": "diff --git a/pytorch_pfn_extras/runtime/_to.py b/pytorch_pfn_extras/runtime/_to.py\n--- a/pytorch_pfn_extras/runtime/_to.py\n+++ b/pytorch_pfn_extras/runtime/_to.py\n@@ -13,8 +13,9 @@\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n- config: Optional[Dict[str, Any]] = None,\n+ options: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n+ config: Optional[Dict[str, Any]] = None,\n ) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n \n@@ -33,23 +34,30 @@\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n- config (dict, optional):\n- A config of dictionary type that is passed to\n+ options (dict, optional):\n+ An options of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n+ config (dict, optional):\n+ DEPRECATED. Use `options`.\n \n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n- if config is None:\n- config = {}\n+ if options is None:\n+ options = {}\n+ if config is not None:\n+ options = config\n+ elif config is not None:\n+ raise ValueError('options and config cannot be specified together')\n+\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n- runtime = runtime_class(device, config)\n+ runtime = runtime_class(device, options)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n", "issue": "Align terminology (`option` v.s. `config`)\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional, Type, TypeVar\n\nimport torch\n\nimport pytorch_pfn_extras as ppe\nfrom pytorch_pfn_extras.runtime._runtime import DeviceLike, BaseRuntime\n\n\nModuleOrTensor = TypeVar('ModuleOrTensor', torch.nn.Module, torch.Tensor)\n\n\ndef to(\n module_or_tensor: ModuleOrTensor,\n device: DeviceLike,\n *,\n config: Optional[Dict[str, Any]] = None,\n runtime_class: Optional[Type[BaseRuntime]] = None,\n) -> ModuleOrTensor:\n \"\"\"A function to transfer the given object to the given device.\n\n If PyTorch's device type is given as the ``device`` argument,\n the behavior of this function is equivalent to\n ``module_or_tensor.to(module_or_tensor, device)``.\n\n Otherwise, this function uses the **Runtime** mechanism.\n This function looks for the Runtime for the device from the RuntimeRegistry\n and delegates the actual transfer operation to it.\n\n See also the documentation of ``ppe.runtime.BaseRuntime`` for details.\n\n Args:\n module_or_tensor (torch.nn.Module or torch.Tensor):\n An object to be transferred.\n device (torch.device or str):\n The device that the input object is transferred to.\n config (dict, optional):\n A config of dictionary type that is passed to\n ``runtime_class.__init__`` as an argument.\n runtime_class:\n A runtime class inherited from `BaseRuntime` class.\n If ``None``, a runtime class is automatically selected\n based on the ``device`` argument from the runtime registry.\n\n Returns:\n A `torch.Tensor` with the specified device.\n \"\"\"\n if config is None:\n config = {}\n if runtime_class is None:\n registry = ppe.runtime.runtime_registry\n runtime_class = registry.get_runtime_class_for_device_spec(device)\n runtime = runtime_class(device, config)\n obj = module_or_tensor\n if isinstance(obj, torch.nn.Module):\n ppe.runtime._runtime._set_module_runtime_tag(obj, runtime)\n return runtime.move_module(obj)\n elif isinstance(obj, torch.Tensor):\n return runtime.move_tensor(obj)\n else:\n raise ValueError('Unsupported type for module_or_tensor')\n", "path": "pytorch_pfn_extras/runtime/_to.py"}]} | 1,149 | 466 |
gh_patches_debug_353 | rasdani/github-patches | git_diff | sopel-irc__sopel-1044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[announce] Send confirmation to caller after all channels announced
When Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice.
It's an easy fix, and I'll open a PR for it tomorrow if there are no objections.
I am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging.
</issue>
<code>
[start of sopel/modules/announce.py]
1 # coding=utf-8
2 """
3 announce.py - Send a message to all channels
4 Copyright © 2013, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 """
8 from __future__ import unicode_literals, absolute_import, print_function, division
9
10 from sopel.module import commands, example
11
12
13 @commands('announce')
14 @example('.announce Some important message here')
15 def announce(bot, trigger):
16 """
17 Send an announcement to all channels the bot is in
18 """
19 if not trigger.admin:
20 bot.reply('Sorry, I can\'t let you do that')
21 return
22 for channel in bot.channels:
23 bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))
24
[end of sopel/modules/announce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py
--- a/sopel/modules/announce.py
+++ b/sopel/modules/announce.py
@@ -21,3 +21,4 @@
return
for channel in bot.channels:
bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))
+ bot.reply('Announce complete.')
| {"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -21,3 +21,4 @@\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n+ bot.reply('Announce complete.')\n", "issue": "[announce] Send confirmation to caller after all channels announced\nWhen Sopel is in many channels, announces are likely to be rate-limited. This makes it hard to know, for example, when it's safe to shut down the bot if announce is being used to broadcast an upgrade notice.\n\nIt's an easy fix, and I'll open a PR for it tomorrow if there are no objections.\n\nI am as-yet undecided whether it's best to use `bot.reply()` or `bot.notice()` for this (or even `bot.msg()` via PM to the caller), but I'll think about it between now and when I open the PR, and it can always be changed before merging.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nannounce.py - Send a message to all channels\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, example\n\n\n@commands('announce')\n@example('.announce Some important message here')\ndef announce(bot, trigger):\n \"\"\"\n Send an announcement to all channels the bot is in\n \"\"\"\n if not trigger.admin:\n bot.reply('Sorry, I can\\'t let you do that')\n return\n for channel in bot.channels:\n bot.msg(channel, '[ANNOUNCEMENT] %s' % trigger.group(2))\n", "path": "sopel/modules/announce.py"}]} | 885 | 91 |
gh_patches_debug_60583 | rasdani/github-patches | git_diff | fonttools__fonttools-1715 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ascender and ascent
The [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table "`ascender`" and "`descender`". fonttools calls them "`ascent`" and "`descent`".
This was surprising! Maybe it's too late to change then but can we at least have an alias?
</issue>
<code>
[start of Lib/fontTools/ttLib/tables/_h_h_e_a.py]
1 from fontTools.misc.py23 import *
2 from fontTools.misc import sstruct
3 from fontTools.misc.textTools import safeEval
4 from fontTools.misc.fixedTools import (
5 ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
6 from . import DefaultTable
7 import math
8
9
10 hheaFormat = """
11 > # big endian
12 tableVersion: L
13 ascent: h
14 descent: h
15 lineGap: h
16 advanceWidthMax: H
17 minLeftSideBearing: h
18 minRightSideBearing: h
19 xMaxExtent: h
20 caretSlopeRise: h
21 caretSlopeRun: h
22 caretOffset: h
23 reserved0: h
24 reserved1: h
25 reserved2: h
26 reserved3: h
27 metricDataFormat: h
28 numberOfHMetrics: H
29 """
30
31
32 class table__h_h_e_a(DefaultTable.DefaultTable):
33
34 # Note: Keep in sync with table__v_h_e_a
35
36 dependencies = ['hmtx', 'glyf', 'CFF ']
37
38 def decompile(self, data, ttFont):
39 sstruct.unpack(hheaFormat, data, self)
40
41 def compile(self, ttFont):
42 if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):
43 self.recalc(ttFont)
44 self.tableVersion = fi2ve(self.tableVersion)
45 return sstruct.pack(hheaFormat, self)
46
47 def recalc(self, ttFont):
48 if 'hmtx' in ttFont:
49 hmtxTable = ttFont['hmtx']
50 self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
51
52 boundsWidthDict = {}
53 if 'glyf' in ttFont:
54 glyfTable = ttFont['glyf']
55 for name in ttFont.getGlyphOrder():
56 g = glyfTable[name]
57 if g.numberOfContours == 0:
58 continue
59 if g.numberOfContours < 0 and not hasattr(g, "xMax"):
60 # Composite glyph without extents set.
61 # Calculate those.
62 g.recalcBounds(glyfTable)
63 boundsWidthDict[name] = g.xMax - g.xMin
64 elif 'CFF ' in ttFont:
65 topDict = ttFont['CFF '].cff.topDictIndex[0]
66 charStrings = topDict.CharStrings
67 for name in ttFont.getGlyphOrder():
68 cs = charStrings[name]
69 bounds = cs.calcBounds(charStrings)
70 if bounds is not None:
71 boundsWidthDict[name] = int(
72 math.ceil(bounds[2]) - math.floor(bounds[0]))
73
74 if boundsWidthDict:
75 minLeftSideBearing = float('inf')
76 minRightSideBearing = float('inf')
77 xMaxExtent = -float('inf')
78 for name, boundsWidth in boundsWidthDict.items():
79 advanceWidth, lsb = hmtxTable[name]
80 rsb = advanceWidth - lsb - boundsWidth
81 extent = lsb + boundsWidth
82 minLeftSideBearing = min(minLeftSideBearing, lsb)
83 minRightSideBearing = min(minRightSideBearing, rsb)
84 xMaxExtent = max(xMaxExtent, extent)
85 self.minLeftSideBearing = minLeftSideBearing
86 self.minRightSideBearing = minRightSideBearing
87 self.xMaxExtent = xMaxExtent
88
89 else: # No glyph has outlines.
90 self.minLeftSideBearing = 0
91 self.minRightSideBearing = 0
92 self.xMaxExtent = 0
93
94 def toXML(self, writer, ttFont):
95 formatstring, names, fixes = sstruct.getformat(hheaFormat)
96 for name in names:
97 value = getattr(self, name)
98 if name == "tableVersion":
99 value = fi2ve(value)
100 value = "0x%08x" % value
101 writer.simpletag(name, value=value)
102 writer.newline()
103
104 def fromXML(self, name, attrs, content, ttFont):
105 if name == "tableVersion":
106 setattr(self, name, ve2fi(attrs["value"]))
107 return
108 setattr(self, name, safeEval(attrs["value"]))
109
[end of Lib/fontTools/ttLib/tables/_h_h_e_a.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
@@ -35,6 +35,19 @@
dependencies = ['hmtx', 'glyf', 'CFF ']
+ # OpenType spec renamed these, add aliases for compatibility
+ @property
+ def ascender(self): return self.ascent
+
+ @ascender.setter
+ def ascender(self,value): self.ascent = value
+
+ @property
+ def descender(self): return self.descent
+
+ @descender.setter
+ def descender(self,value): self.descent = value
+
def decompile(self, data, ttFont):
sstruct.unpack(hheaFormat, data, self)
| {"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n--- a/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py\n@@ -35,6 +35,19 @@\n \n \tdependencies = ['hmtx', 'glyf', 'CFF ']\n \n+\t# OpenType spec renamed these, add aliases for compatibility\n+\t@property\n+\tdef ascender(self): return self.ascent\n+\n+\[email protected]\n+\tdef ascender(self,value): self.ascent = value\n+\n+\t@property\n+\tdef descender(self): return self.descent\n+\n+\[email protected]\n+\tdef descender(self,value): self.descent = value\n+\n \tdef decompile(self, data, ttFont):\n \t\tsstruct.unpack(hheaFormat, data, self)\n", "issue": "ascender and ascent\nThe [opentype spec ](https://docs.microsoft.com/en-gb/typography/opentype/spec/hhea) calls the first two substantive entries in the `hhea` table \"`ascender`\" and \"`descender`\". fonttools calls them \"`ascent`\" and \"`descent`\".\r\n\r\nThis was surprising! Maybe it's too late to change then but can we at least have an alias?\n", "before_files": [{"content": "from fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.misc.fixedTools import (\n\tensureVersionIsLong as fi2ve, versionToFixed as ve2fi)\nfrom . import DefaultTable\nimport math\n\n\nhheaFormat = \"\"\"\n\t\t> # big endian\n\t\ttableVersion: L\n\t\tascent: h\n\t\tdescent: h\n\t\tlineGap: h\n\t\tadvanceWidthMax: H\n\t\tminLeftSideBearing: h\n\t\tminRightSideBearing: h\n\t\txMaxExtent: h\n\t\tcaretSlopeRise: h\n\t\tcaretSlopeRun: h\n\t\tcaretOffset: h\n\t\treserved0: h\n\t\treserved1: h\n\t\treserved2: h\n\t\treserved3: h\n\t\tmetricDataFormat: h\n\t\tnumberOfHMetrics: H\n\"\"\"\n\n\nclass table__h_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__v_h_e_a\n\n\tdependencies = ['hmtx', 'glyf', 'CFF ']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(hheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')):\n\t\t\tself.recalc(ttFont)\n\t\tself.tableVersion = fi2ve(self.tableVersion)\n\t\treturn sstruct.pack(hheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tif 'hmtx' in ttFont:\n\t\t\thmtxTable = ttFont['hmtx']\n\t\t\tself.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())\n\n\t\tboundsWidthDict = {}\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"xMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tboundsWidthDict[name] = g.xMax - g.xMin\n\t\telif 'CFF ' in ttFont:\n\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\tcharStrings = topDict.CharStrings\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\tcs = charStrings[name]\n\t\t\t\tbounds = cs.calcBounds(charStrings)\n\t\t\t\tif bounds is not None:\n\t\t\t\t\tboundsWidthDict[name] = int(\n\t\t\t\t\t\tmath.ceil(bounds[2]) - math.floor(bounds[0]))\n\n\t\tif boundsWidthDict:\n\t\t\tminLeftSideBearing = float('inf')\n\t\t\tminRightSideBearing = float('inf')\n\t\t\txMaxExtent = -float('inf')\n\t\t\tfor name, boundsWidth in boundsWidthDict.items():\n\t\t\t\tadvanceWidth, lsb = hmtxTable[name]\n\t\t\t\trsb = advanceWidth - lsb - boundsWidth\n\t\t\t\textent = lsb + boundsWidth\n\t\t\t\tminLeftSideBearing = min(minLeftSideBearing, lsb)\n\t\t\t\tminRightSideBearing = min(minRightSideBearing, rsb)\n\t\t\t\txMaxExtent = max(xMaxExtent, extent)\n\t\t\tself.minLeftSideBearing = minLeftSideBearing\n\t\t\tself.minRightSideBearing = minRightSideBearing\n\t\t\tself.xMaxExtent = xMaxExtent\n\n\t\telse: # No glyph has outlines.\n\t\t\tself.minLeftSideBearing = 0\n\t\t\tself.minRightSideBearing = 0\n\t\t\tself.xMaxExtent = 0\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(hheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name == \"tableVersion\":\n\t\t\t\tvalue = fi2ve(value)\n\t\t\t\tvalue = \"0x%08x\" % value\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"tableVersion\":\n\t\t\tsetattr(self, name, ve2fi(attrs[\"value\"]))\n\t\t\treturn\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_h_h_e_a.py"}]} | 1,846 | 216 |
gh_patches_debug_8925 | rasdani/github-patches | git_diff | freqtrade__freqtrade-3200 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docker image making logfiles in user_data
docker image places error logfiles in user_data by default. (apparently it should not be doing that)
Maybe cud have it put them in a log dir?
docker-compose.yml
command: >
trade
--logfile /freqtrade/user_data/freqtrade.log
can maybe be changed to
--logfile /freqtrade/user_data/log/freqtrade.log
</issue>
<code>
[start of freqtrade/configuration/directory_operations.py]
1 import logging
2 import shutil
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from freqtrade.exceptions import OperationalException
7 from freqtrade.constants import USER_DATA_FILES
8
9 logger = logging.getLogger(__name__)
10
11
12 def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:
13
14 folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
15 if not datadir:
16 # set datadir
17 exchange_name = config.get('exchange', {}).get('name').lower()
18 folder = folder.joinpath(exchange_name)
19
20 if not folder.is_dir():
21 folder.mkdir(parents=True)
22 logger.info(f'Created data directory: {datadir}')
23 return folder
24
25
26 def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:
27 """
28 Create userdata directory structure.
29 if create_dir is True, then the parent-directory will be created if it does not exist.
30 Sub-directories will always be created if the parent directory exists.
31 Raises OperationalException if given a non-existing directory.
32 :param directory: Directory to check
33 :param create_dir: Create directory if it does not exist.
34 :return: Path object containing the directory
35 """
36 sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks",
37 "plot", "strategies", ]
38 folder = Path(directory)
39 if not folder.is_dir():
40 if create_dir:
41 folder.mkdir(parents=True)
42 logger.info(f'Created user-data directory: {folder}')
43 else:
44 raise OperationalException(
45 f"Directory `{folder}` does not exist. "
46 "Please use `freqtrade create-userdir` to create a user directory")
47
48 # Create required subdirectories
49 for f in sub_dirs:
50 subfolder = folder / f
51 if not subfolder.is_dir():
52 subfolder.mkdir(parents=False)
53 return folder
54
55
56 def copy_sample_files(directory: Path, overwrite: bool = False) -> None:
57 """
58 Copy files from templates to User data directory.
59 :param directory: Directory to copy data to
60 :param overwrite: Overwrite existing sample files
61 """
62 if not directory.is_dir():
63 raise OperationalException(f"Directory `{directory}` does not exist.")
64 sourcedir = Path(__file__).parents[1] / "templates"
65 for source, target in USER_DATA_FILES.items():
66 targetdir = directory / target
67 if not targetdir.is_dir():
68 raise OperationalException(f"Directory `{targetdir}` does not exist.")
69 targetfile = targetdir / source
70 if targetfile.exists():
71 if not overwrite:
72 logger.warning(f"File `{targetfile}` exists already, not deploying sample file.")
73 continue
74 else:
75 logger.warning(f"File `{targetfile}` exists already, overwriting.")
76 shutil.copy(str(sourcedir / source), str(targetfile))
77
[end of freqtrade/configuration/directory_operations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py
--- a/freqtrade/configuration/directory_operations.py
+++ b/freqtrade/configuration/directory_operations.py
@@ -33,8 +33,8 @@
:param create_dir: Create directory if it does not exist.
:return: Path object containing the directory
"""
- sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "notebooks",
- "plot", "strategies", ]
+ sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "logs",
+ "notebooks", "plot", "strategies", ]
folder = Path(directory)
if not folder.is_dir():
if create_dir:
| {"golden_diff": "diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py\n--- a/freqtrade/configuration/directory_operations.py\n+++ b/freqtrade/configuration/directory_operations.py\n@@ -33,8 +33,8 @@\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n- sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"notebooks\",\n- \"plot\", \"strategies\", ]\n+ sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n+ \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n", "issue": " Docker image making logfiles in user_data\ndocker image places error logfiles in user_data by default. (apparently it should not be doing that)\r\nMaybe cud have it put them in a log dir?\r\n\r\n\r\ndocker-compose.yml\r\n\r\ncommand: >\r\n trade\r\n --logfile /freqtrade/user_data/freqtrade.log\r\n\r\ncan maybe be changed to \r\n --logfile /freqtrade/user_data/log/freqtrade.log\r\n\n", "before_files": [{"content": "import logging\nimport shutil\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.constants import USER_DATA_FILES\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:\n\n folder = Path(datadir) if datadir else Path(f\"{config['user_data_dir']}/data\")\n if not datadir:\n # set datadir\n exchange_name = config.get('exchange', {}).get('name').lower()\n folder = folder.joinpath(exchange_name)\n\n if not folder.is_dir():\n folder.mkdir(parents=True)\n logger.info(f'Created data directory: {datadir}')\n return folder\n\n\ndef create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n if create_dir is True, then the parent-directory will be created if it does not exist.\n Sub-directories will always be created if the parent directory exists.\n Raises OperationalException if given a non-existing directory.\n :param directory: Directory to check\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"notebooks\",\n \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n logger.info(f'Created user-data directory: {folder}')\n else:\n raise OperationalException(\n f\"Directory `{folder}` does not exist. \"\n \"Please use `freqtrade create-userdir` to create a user directory\")\n\n # Create required subdirectories\n for f in sub_dirs:\n subfolder = folder / f\n if not subfolder.is_dir():\n subfolder.mkdir(parents=False)\n return folder\n\n\ndef copy_sample_files(directory: Path, overwrite: bool = False) -> None:\n \"\"\"\n Copy files from templates to User data directory.\n :param directory: Directory to copy data to\n :param overwrite: Overwrite existing sample files\n \"\"\"\n if not directory.is_dir():\n raise OperationalException(f\"Directory `{directory}` does not exist.\")\n sourcedir = Path(__file__).parents[1] / \"templates\"\n for source, target in USER_DATA_FILES.items():\n targetdir = directory / target\n if not targetdir.is_dir():\n raise OperationalException(f\"Directory `{targetdir}` does not exist.\")\n targetfile = targetdir / source\n if targetfile.exists():\n if not overwrite:\n logger.warning(f\"File `{targetfile}` exists already, not deploying sample file.\")\n continue\n else:\n logger.warning(f\"File `{targetfile}` exists already, overwriting.\")\n shutil.copy(str(sourcedir / source), str(targetfile))\n", "path": "freqtrade/configuration/directory_operations.py"}]} | 1,405 | 176 |
gh_patches_debug_34955 | rasdani/github-patches | git_diff | elastic__apm-agent-python-881 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'FragmentDefinition' object has no attribute 'operation'"
**Describe the bug**:
I'm using elastic APM with Django 3.1.2 and graphql.
On every GraphQL Query, I'm seeing now this error: `AttributeError: 'FragmentDefinition' object has no attribute 'operation'`
The relevant file is: `elasticapm/instrumentation/packages/graphql.py in get_graphql_tx_name at line 99`
**To Reproduce**
I'm not sure yet, why the error is occurring and I'm just getting started with the service. If you can guide me to the right direction, I can create a reproducible example.
**Environment (please complete the following information)**
- OS: Linux docker Container
- Python version:
- Framework and version : Django 3.1.2
- APM Server version:
- Agent version: 5.9.0
**Additional context**
Add any other context about the problem here.
- Agent config options <!-- be careful not to post sensitive information -->
<details>
<summary>Click to expand</summary>
```
replace this line with your agent config options
remember to mask any sensitive fields like tokens
```
</details>
- `requirements.txt`:
<details>
<summary>Click to expand</summary>
```
replace this line with your `requirements.txt`
```
</details>
</issue>
<code>
[start of elasticapm/instrumentation/packages/graphql.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm import set_transaction_name
32 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
33 from elasticapm.traces import capture_span
34
35
36 class GraphQLExecutorInstrumentation(AbstractInstrumentedModule):
37 name = "graphql"
38
39 instrument_list = [
40 ("graphql.execution.executors.sync", "SyncExecutor.execute"),
41 ("graphql.execution.executors.gevent", "GeventExecutor.execute"),
42 ("graphql.execution.executors.asyncio", "AsyncioExecutor.execute"),
43 ("graphql.execution.executors.process", "ProcessExecutor.execute"),
44 ("graphql.execution.executors.thread", "ThreadExecutor.execute_in_thread"),
45 ("graphql.execution.executors.thread", "ThreadExecutor.execute_in_pool"),
46 ]
47
48 def call(self, module, method, wrapped, instance, args, kwargs):
49 name = "GraphQL"
50
51 info = ""
52 query = args[2]
53
54 if "ResolveInfo" == type(query).__name__:
55 if str(query.return_type) in [
56 'Boolean',
57 'Context',
58 'Date',
59 'DateTime',
60 'Decimal',
61 'Dynamic',
62 'Float',
63 'ID',
64 'Int',
65 'String',
66 'Time',
67 'UUID',
68 'Boolean',
69 'String'
70 ]:
71 return wrapped(*args, **kwargs)
72
73 op = query.operation.operation
74 field = query.field_name
75 info = "%s %s" % (op, field)
76 elif "RequestParams" == type(query).__name__:
77 info = "%s %s" % ("request", query.query)
78 else:
79 info = str(query)
80
81 with capture_span(
82 "%s.%s" % (name, info),
83 span_type="external",
84 span_subtype="graphql",
85 span_action="query"
86 ):
87 return wrapped(*args, **kwargs)
88
89
90 class GraphQLBackendInstrumentation(AbstractInstrumentedModule):
91 name = "graphql"
92
93 instrument_list = [
94 ("graphql.backend.core", "GraphQLCoreBackend.document_from_string"),
95 ("graphql.backend.cache", "GraphQLCachedBackend.document_from_string"),
96 ]
97
98 def get_graphql_tx_name(self, graphql_doc):
99 op = graphql_doc.definitions[0].operation
100 fields = graphql_doc.definitions[0].selection_set.selections
101 return "GraphQL %s %s" % (op.upper(), "+".join([f.name.value for f in fields]))
102
103 def call(self, module, method, wrapped, instance, args, kwargs):
104 graphql_document = wrapped(*args, **kwargs)
105 transaction_name = self.get_graphql_tx_name(graphql_document.document_ast)
106 set_transaction_name(transaction_name)
107 return graphql_document
108
[end of elasticapm/instrumentation/packages/graphql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/graphql.py b/elasticapm/instrumentation/packages/graphql.py
--- a/elasticapm/instrumentation/packages/graphql.py
+++ b/elasticapm/instrumentation/packages/graphql.py
@@ -53,20 +53,20 @@
if "ResolveInfo" == type(query).__name__:
if str(query.return_type) in [
- 'Boolean',
- 'Context',
- 'Date',
- 'DateTime',
- 'Decimal',
- 'Dynamic',
- 'Float',
- 'ID',
- 'Int',
- 'String',
- 'Time',
- 'UUID',
- 'Boolean',
- 'String'
+ "Boolean",
+ "Context",
+ "Date",
+ "DateTime",
+ "Decimal",
+ "Dynamic",
+ "Float",
+ "ID",
+ "Int",
+ "String",
+ "Time",
+ "UUID",
+ "Boolean",
+ "String",
]:
return wrapped(*args, **kwargs)
@@ -78,12 +78,7 @@
else:
info = str(query)
- with capture_span(
- "%s.%s" % (name, info),
- span_type="external",
- span_subtype="graphql",
- span_action="query"
- ):
+ with capture_span("%s.%s" % (name, info), span_type="external", span_subtype="graphql", span_action="query"):
return wrapped(*args, **kwargs)
@@ -96,9 +91,15 @@
]
def get_graphql_tx_name(self, graphql_doc):
- op = graphql_doc.definitions[0].operation
- fields = graphql_doc.definitions[0].selection_set.selections
- return "GraphQL %s %s" % (op.upper(), "+".join([f.name.value for f in fields]))
+ try:
+ op_def = [i for i in graphql_doc.definitions if type(i).__name__ == "OperationDefinition"][0]
+ except KeyError:
+ return "GraphQL unknown operation"
+
+ op = op_def.operation
+ name = op_def.name
+ fields = op_def.selection_set.selections
+ return "GraphQL %s %s" % (op.upper(), name if name else "+".join([f.name.value for f in fields]))
def call(self, module, method, wrapped, instance, args, kwargs):
graphql_document = wrapped(*args, **kwargs)
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/graphql.py b/elasticapm/instrumentation/packages/graphql.py\n--- a/elasticapm/instrumentation/packages/graphql.py\n+++ b/elasticapm/instrumentation/packages/graphql.py\n@@ -53,20 +53,20 @@\n \n if \"ResolveInfo\" == type(query).__name__:\n if str(query.return_type) in [\n- 'Boolean',\n- 'Context',\n- 'Date',\n- 'DateTime',\n- 'Decimal',\n- 'Dynamic',\n- 'Float',\n- 'ID',\n- 'Int',\n- 'String',\n- 'Time',\n- 'UUID',\n- 'Boolean',\n- 'String'\n+ \"Boolean\",\n+ \"Context\",\n+ \"Date\",\n+ \"DateTime\",\n+ \"Decimal\",\n+ \"Dynamic\",\n+ \"Float\",\n+ \"ID\",\n+ \"Int\",\n+ \"String\",\n+ \"Time\",\n+ \"UUID\",\n+ \"Boolean\",\n+ \"String\",\n ]:\n return wrapped(*args, **kwargs)\n \n@@ -78,12 +78,7 @@\n else:\n info = str(query)\n \n- with capture_span(\n- \"%s.%s\" % (name, info),\n- span_type=\"external\",\n- span_subtype=\"graphql\",\n- span_action=\"query\"\n- ):\n+ with capture_span(\"%s.%s\" % (name, info), span_type=\"external\", span_subtype=\"graphql\", span_action=\"query\"):\n return wrapped(*args, **kwargs)\n \n \n@@ -96,9 +91,15 @@\n ]\n \n def get_graphql_tx_name(self, graphql_doc):\n- op = graphql_doc.definitions[0].operation\n- fields = graphql_doc.definitions[0].selection_set.selections\n- return \"GraphQL %s %s\" % (op.upper(), \"+\".join([f.name.value for f in fields]))\n+ try:\n+ op_def = [i for i in graphql_doc.definitions if type(i).__name__ == \"OperationDefinition\"][0]\n+ except KeyError:\n+ return \"GraphQL unknown operation\"\n+\n+ op = op_def.operation\n+ name = op_def.name\n+ fields = op_def.selection_set.selections\n+ return \"GraphQL %s %s\" % (op.upper(), name if name else \"+\".join([f.name.value for f in fields]))\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n graphql_document = wrapped(*args, **kwargs)\n", "issue": "'FragmentDefinition' object has no attribute 'operation'\"\n**Describe the bug**: \r\nI'm using elastic APM with Django 3.1.2 and graphql.\r\nOn every GraphQL Query, I'm seeing now this error: `AttributeError: 'FragmentDefinition' object has no attribute 'operation'`\r\n\r\nThe relevant file is: `elasticapm/instrumentation/packages/graphql.py in get_graphql_tx_name at line 99`\r\n\r\n**To Reproduce**\r\nI'm not sure yet, why the error is occurring and I'm just getting started with the service. If you can guide me to the right direction, I can create a reproducible example.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux docker Container\r\n- Python version:\r\n- Framework and version : Django 3.1.2\r\n- APM Server version: \r\n- Agent version: 5.9.0\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- Agent config options <!-- be careful not to post sensitive information -->\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n replace this line with your agent config options\r\n remember to mask any sensitive fields like tokens\r\n ```\r\n </details>\r\n- `requirements.txt`:\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n replace this line with your `requirements.txt`\r\n ```\r\n </details>\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm import set_transaction_name\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\n\n\nclass GraphQLExecutorInstrumentation(AbstractInstrumentedModule):\n name = \"graphql\"\n\n instrument_list = [\n (\"graphql.execution.executors.sync\", \"SyncExecutor.execute\"),\n (\"graphql.execution.executors.gevent\", \"GeventExecutor.execute\"),\n (\"graphql.execution.executors.asyncio\", \"AsyncioExecutor.execute\"),\n (\"graphql.execution.executors.process\", \"ProcessExecutor.execute\"),\n (\"graphql.execution.executors.thread\", \"ThreadExecutor.execute_in_thread\"),\n (\"graphql.execution.executors.thread\", \"ThreadExecutor.execute_in_pool\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n name = \"GraphQL\"\n\n info = \"\"\n query = args[2]\n\n if \"ResolveInfo\" == type(query).__name__:\n if str(query.return_type) in [\n 'Boolean',\n 'Context',\n 'Date',\n 'DateTime',\n 'Decimal',\n 'Dynamic',\n 'Float',\n 'ID',\n 'Int',\n 'String',\n 'Time',\n 'UUID',\n 'Boolean',\n 'String'\n ]:\n return wrapped(*args, **kwargs)\n\n op = query.operation.operation\n field = query.field_name\n info = \"%s %s\" % (op, field)\n elif \"RequestParams\" == type(query).__name__:\n info = \"%s %s\" % (\"request\", query.query)\n else:\n info = str(query)\n\n with capture_span(\n \"%s.%s\" % (name, info),\n span_type=\"external\",\n span_subtype=\"graphql\",\n span_action=\"query\"\n ):\n return wrapped(*args, **kwargs)\n\n\nclass GraphQLBackendInstrumentation(AbstractInstrumentedModule):\n name = \"graphql\"\n\n instrument_list = [\n (\"graphql.backend.core\", \"GraphQLCoreBackend.document_from_string\"),\n (\"graphql.backend.cache\", \"GraphQLCachedBackend.document_from_string\"),\n ]\n\n def get_graphql_tx_name(self, graphql_doc):\n op = graphql_doc.definitions[0].operation\n fields = graphql_doc.definitions[0].selection_set.selections\n return \"GraphQL %s %s\" % (op.upper(), \"+\".join([f.name.value for f in fields]))\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n graphql_document = wrapped(*args, **kwargs)\n transaction_name = self.get_graphql_tx_name(graphql_document.document_ast)\n set_transaction_name(transaction_name)\n return graphql_document\n", "path": "elasticapm/instrumentation/packages/graphql.py"}]} | 1,962 | 566 |
gh_patches_debug_37682 | rasdani/github-patches | git_diff | apluslms__a-plus-1005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A+ Security logs, CEF format
After security audit in spring 2021, some new security-related log events were added, using SecurityLog class. The log output format should be converted to CEF format that can be exported to Aalto ITS logging systems. Also, the current log events should be reviewed: do they contain sufficient information, and should some additional events be added. Note that security log should contain only relevant events, that can be justified from security point of view.
</issue>
<code>
[start of lib/logging.py]
1 from django.http import UnreadablePostError
2 import logging
3 from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
4 from django.dispatch import receiver
5 from django.http.request import HttpRequest
6
7 def skip_unreadable_post(record):
8 """Skips log records of unfinished post requests."""
9 return not record.exc_info or not issubclass(record.exc_info[0], UnreadablePostError)
10
11 class SecurityLog:
12 """
13 Static singleton class used for A+ - wide security logging.
14 Django signals are used to track login/logout events.
15 """
16
17 seclogger = logging.getLogger('aplus.security')
18
19 @staticmethod
20 def logger() -> logging.Logger:
21 return SecurityLog.seclogger
22
23 @staticmethod
24 def logevent(request: HttpRequest, type: str, message: str) -> None:
25 # Unit tests do not have user defined in request object.
26 if request and hasattr(request, 'user'):
27 user=request.user
28 ip=request.META.get('REMOTE_ADDR')
29 else:
30 user='?'
31 ip='?'
32 SecurityLog.logger().info("({}/{}): {}: {}".format(
33 user,
34 ip,
35 type,
36 message
37 ))
38
39
40 # This example was used as an inspiration for the following functions:
41 # https://stackoverflow.com/questions/37618473/how-can-i-log-both-successful-and-failed-login-and-logout-attempts-in-django
42 @receiver(user_logged_in)
43 def user_logged_in_callback(sender, request, user, **kwargs):
44 SecurityLog.logevent(request, "login", "")
45
46 @receiver(user_logged_out)
47 def user_logged_out_callback(sender, request, user, **kwargs):
48 SecurityLog.logevent(request, "logout", "")
49
50 # Unfortunately the request object is not passed with this signal,
51 # so we cannot get the IP.
52 @receiver(user_login_failed)
53 def user_login_failed_callback(sender, credentials, **kwargs):
54 SecurityLog.logevent(None, "login-failed","{credentials}".format(
55 credentials=credentials)
56 )
57
[end of lib/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/logging.py b/lib/logging.py
--- a/lib/logging.py
+++ b/lib/logging.py
@@ -3,6 +3,12 @@
from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
from django.dispatch import receiver
from django.http.request import HttpRequest
+from format_cef import format_cef
+from aplus import VERSION
+
+
+CEF_VENDOR = 'Aalto'
+CEF_PRODUCT = 'aplus'
def skip_unreadable_post(record):
"""Skips log records of unfinished post requests."""
@@ -10,7 +16,8 @@
class SecurityLog:
"""
- Static singleton class used for A+ - wide security logging.
+ Static singleton class used for A+ - wide security logging,
+ to produce ArcSight Common Event Format (CEF) log.
Django signals are used to track login/logout events.
"""
@@ -21,20 +28,32 @@
return SecurityLog.seclogger
@staticmethod
- def logevent(request: HttpRequest, type: str, message: str) -> None:
- # Unit tests do not have user defined in request object.
- if request and hasattr(request, 'user'):
- user=request.user
- ip=request.META.get('REMOTE_ADDR')
- else:
- user='?'
- ip='?'
- SecurityLog.logger().info("({}/{}): {}: {}".format(
- user,
- ip,
- type,
- message
- ))
+ def logevent(
+ request: HttpRequest,
+ type: str,
+ message: str,
+ severity: int = 5,
+ ) -> None:
+ extensions = {}
+ # Unit tests may not have user or source address defined.
+ if request:
+ if hasattr(request, 'user'):
+ extensions['sourceUserName'] = str(request.user)
+ extensions['sourceUserId'] = str(request.user.id)
+ if (addr := request.META.get('REMOTE_ADDR')):
+ extensions['sourceAddress'] = addr
+
+ SecurityLog.logger().info(
+ format_cef(
+ CEF_VENDOR,
+ CEF_PRODUCT,
+ VERSION,
+ type,
+ message,
+ severity,
+ extensions,
+ ).decode("utf-8")
+ )
# This example was used as an inspiration for the following functions:
@@ -51,6 +70,8 @@
# so we cannot get the IP.
@receiver(user_login_failed)
def user_login_failed_callback(sender, credentials, **kwargs):
- SecurityLog.logevent(None, "login-failed","{credentials}".format(
- credentials=credentials)
- )
+ try:
+ SecurityLog.logevent(None, "login-failed", f"username: {credentials['username']}")
+ except KeyError:
+ # Unit tests do not have 'username' in credentials, let's not fail them for that
+ pass
| {"golden_diff": "diff --git a/lib/logging.py b/lib/logging.py\n--- a/lib/logging.py\n+++ b/lib/logging.py\n@@ -3,6 +3,12 @@\n from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed\n from django.dispatch import receiver\n from django.http.request import HttpRequest\n+from format_cef import format_cef\n+from aplus import VERSION\n+\n+\n+CEF_VENDOR = 'Aalto'\n+CEF_PRODUCT = 'aplus'\n \n def skip_unreadable_post(record):\n \"\"\"Skips log records of unfinished post requests.\"\"\"\n@@ -10,7 +16,8 @@\n \n class SecurityLog:\n \"\"\"\n- Static singleton class used for A+ - wide security logging.\n+ Static singleton class used for A+ - wide security logging,\n+ to produce ArcSight Common Event Format (CEF) log.\n Django signals are used to track login/logout events.\n \"\"\"\n \n@@ -21,20 +28,32 @@\n return SecurityLog.seclogger\n \n @staticmethod\n- def logevent(request: HttpRequest, type: str, message: str) -> None:\n- # Unit tests do not have user defined in request object.\n- if request and hasattr(request, 'user'):\n- user=request.user\n- ip=request.META.get('REMOTE_ADDR')\n- else:\n- user='?'\n- ip='?'\n- SecurityLog.logger().info(\"({}/{}): {}: {}\".format(\n- user,\n- ip,\n- type,\n- message\n- ))\n+ def logevent(\n+ request: HttpRequest,\n+ type: str,\n+ message: str,\n+ severity: int = 5,\n+ ) -> None:\n+ extensions = {}\n+ # Unit tests may not have user or source address defined.\n+ if request:\n+ if hasattr(request, 'user'):\n+ extensions['sourceUserName'] = str(request.user)\n+ extensions['sourceUserId'] = str(request.user.id)\n+ if (addr := request.META.get('REMOTE_ADDR')):\n+ extensions['sourceAddress'] = addr\n+\n+ SecurityLog.logger().info(\n+ format_cef(\n+ CEF_VENDOR,\n+ CEF_PRODUCT,\n+ VERSION,\n+ type,\n+ message,\n+ severity,\n+ extensions,\n+ ).decode(\"utf-8\")\n+ )\n \n \n # This example was used as an inspiration for the following functions:\n@@ -51,6 +70,8 @@\n # so we cannot get the IP.\n @receiver(user_login_failed)\n def user_login_failed_callback(sender, credentials, **kwargs):\n- SecurityLog.logevent(None, \"login-failed\",\"{credentials}\".format(\n- credentials=credentials)\n- )\n+ try:\n+ SecurityLog.logevent(None, \"login-failed\", f\"username: {credentials['username']}\")\n+ except KeyError:\n+ # Unit tests do not have 'username' in credentials, let's not fail them for that\n+ pass\n", "issue": "A+ Security logs, CEF format\nAfter security audit in spring 2021, some new security-related log events were added, using SecurityLog class. The log output format should be converted to CEF format that can be exported to Aalto ITS logging systems. Also, the current log events should be reviewed: do they contain sufficient information, and should some additional events be added. Note that security log should contain only relevant events, that can be justified from security point of view.\n", "before_files": [{"content": "from django.http import UnreadablePostError\nimport logging\nfrom django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed\nfrom django.dispatch import receiver\nfrom django.http.request import HttpRequest\n\ndef skip_unreadable_post(record):\n \"\"\"Skips log records of unfinished post requests.\"\"\"\n return not record.exc_info or not issubclass(record.exc_info[0], UnreadablePostError)\n\nclass SecurityLog:\n \"\"\"\n Static singleton class used for A+ - wide security logging.\n Django signals are used to track login/logout events.\n \"\"\"\n\n seclogger = logging.getLogger('aplus.security')\n\n @staticmethod\n def logger() -> logging.Logger:\n return SecurityLog.seclogger\n\n @staticmethod\n def logevent(request: HttpRequest, type: str, message: str) -> None:\n # Unit tests do not have user defined in request object.\n if request and hasattr(request, 'user'):\n user=request.user\n ip=request.META.get('REMOTE_ADDR')\n else:\n user='?'\n ip='?'\n SecurityLog.logger().info(\"({}/{}): {}: {}\".format(\n user,\n ip,\n type,\n message\n ))\n\n\n# This example was used as an inspiration for the following functions:\n# https://stackoverflow.com/questions/37618473/how-can-i-log-both-successful-and-failed-login-and-logout-attempts-in-django\n@receiver(user_logged_in)\ndef user_logged_in_callback(sender, request, user, **kwargs):\n SecurityLog.logevent(request, \"login\", \"\")\n\n@receiver(user_logged_out)\ndef user_logged_out_callback(sender, request, user, **kwargs):\n SecurityLog.logevent(request, \"logout\", \"\")\n\n# Unfortunately the request object is not passed with this signal,\n# so we cannot get the IP.\n@receiver(user_login_failed)\ndef user_login_failed_callback(sender, credentials, **kwargs):\n SecurityLog.logevent(None, \"login-failed\",\"{credentials}\".format(\n credentials=credentials)\n )\n", "path": "lib/logging.py"}]} | 1,172 | 655 |
gh_patches_debug_22756 | rasdani/github-patches | git_diff | streamlit__streamlit-929 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make it easy to screencast a Streamlit app
See spec here: https://docs.google.com/presentation/d/18bNul9a6rjScGhxRmGbZbCcU3uYp_b3ckxA9DVFJlKM/edit
And see a crappy demo I wrote with some code you can steal:
https://gist.github.com/tvst/c114620cf36b77732d5d67f411c55f12
Questions:
* What browsers support this?
* Can we record as mp4 / h264?
* Can we record in a format that works in both Windows and Mac without extra installs? (Linux is not a problem -- users know how to open video in different formats)
</issue>
<code>
[start of e2e/scripts/st_latex.py]
1 import streamlit as st
2
3 st.latex(r"\LaTeX")
4
5 try:
6 import sympy
7
8 a, b = sympy.symbols("a b")
9 out = a + b
10 except:
11 out = "a + b"
12
13 st.latex(out)
14
[end of e2e/scripts/st_latex.py]
[start of e2e/scripts/st_chart_utc_time.py]
1 from datetime import date
2
3 import pandas as pd
4 import streamlit as st
5
6 df = pd.DataFrame(
7 {
8 "index": [
9 date(2019, 8, 9),
10 date(2019, 8, 10),
11 date(2019, 8, 11),
12 date(2019, 8, 12),
13 ],
14 "numbers": [10, 50, 30, 40],
15 }
16 )
17
18 df.set_index("index", inplace=True)
19
20 # st.area/bar/line_chart all use Altair/Vega-Lite under the hood.
21 # By default, Vega-Lite displays time values in the browser's local
22 # time zone. In `altair.generate_chart`, we explicitly set the time
23 # display to UTC, so that our results are consistent. This test verifies
24 # that change!
25 st.area_chart(df)
26 st.bar_chart(df)
27 st.line_chart(df)
28
[end of e2e/scripts/st_chart_utc_time.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_chart_utc_time.py b/e2e/scripts/st_chart_utc_time.py
--- a/e2e/scripts/st_chart_utc_time.py
+++ b/e2e/scripts/st_chart_utc_time.py
@@ -1,3 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018-2020 Streamlit Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from datetime import date
import pandas as pd
diff --git a/e2e/scripts/st_latex.py b/e2e/scripts/st_latex.py
--- a/e2e/scripts/st_latex.py
+++ b/e2e/scripts/st_latex.py
@@ -1,3 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018-2020 Streamlit Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import streamlit as st
st.latex(r"\LaTeX")
| {"golden_diff": "diff --git a/e2e/scripts/st_chart_utc_time.py b/e2e/scripts/st_chart_utc_time.py\n--- a/e2e/scripts/st_chart_utc_time.py\n+++ b/e2e/scripts/st_chart_utc_time.py\n@@ -1,3 +1,18 @@\n+# -*- coding: utf-8 -*-\n+# Copyright 2018-2020 Streamlit Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n from datetime import date\n \n import pandas as pd\ndiff --git a/e2e/scripts/st_latex.py b/e2e/scripts/st_latex.py\n--- a/e2e/scripts/st_latex.py\n+++ b/e2e/scripts/st_latex.py\n@@ -1,3 +1,18 @@\n+# -*- coding: utf-8 -*-\n+# Copyright 2018-2020 Streamlit Inc.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import streamlit as st\n \n st.latex(r\"\\LaTeX\")\n", "issue": "Make it easy to screencast a Streamlit app\nSee spec here: https://docs.google.com/presentation/d/18bNul9a6rjScGhxRmGbZbCcU3uYp_b3ckxA9DVFJlKM/edit\r\n\r\nAnd see a crappy demo I wrote with some code you can steal:\r\nhttps://gist.github.com/tvst/c114620cf36b77732d5d67f411c55f12\r\n\r\nQuestions:\r\n* What browsers support this?\r\n* Can we record as mp4 / h264?\r\n* Can we record in a format that works in both Windows and Mac without extra installs? (Linux is not a problem -- users know how to open video in different formats)\n", "before_files": [{"content": "import streamlit as st\n\nst.latex(r\"\\LaTeX\")\n\ntry:\n import sympy\n\n a, b = sympy.symbols(\"a b\")\n out = a + b\nexcept:\n out = \"a + b\"\n\nst.latex(out)\n", "path": "e2e/scripts/st_latex.py"}, {"content": "from datetime import date\n\nimport pandas as pd\nimport streamlit as st\n\ndf = pd.DataFrame(\n {\n \"index\": [\n date(2019, 8, 9),\n date(2019, 8, 10),\n date(2019, 8, 11),\n date(2019, 8, 12),\n ],\n \"numbers\": [10, 50, 30, 40],\n }\n)\n\ndf.set_index(\"index\", inplace=True)\n\n# st.area/bar/line_chart all use Altair/Vega-Lite under the hood.\n# By default, Vega-Lite displays time values in the browser's local\n# time zone. In `altair.generate_chart`, we explicitly set the time\n# display to UTC, so that our results are consistent. This test verifies\n# that change!\nst.area_chart(df)\nst.bar_chart(df)\nst.line_chart(df)\n", "path": "e2e/scripts/st_chart_utc_time.py"}]} | 1,076 | 435 |
gh_patches_debug_8419 | rasdani/github-patches | git_diff | searxng__searxng-2830 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: internetarchivescholar engine
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Repository: https://github.com/searxng/searxng
Branch: master
Version: 2023.9.19+3ac7c40b6
<!-- Check if these values are correct -->
**How did you install SearXNG?**
<!-- Did you install SearXNG using the official wiki or using searxng-docker
or manually by executing the searx/webapp.py file? -->
**What happened?**
<!-- A clear and concise description of what the bug is. -->
**How To Reproduce**
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots & Logs**
<!-- If applicable, add screenshots, logs to help explain your problem. -->
**Additional context**
<!-- Add any other context about the problem here. -->
**Technical report**
Error
* Error: KeyError
* Percentage: 25
* Parameters: `()`
* File name: `searx/engines/internet_archive_scholar.py:59`
* Function: `response`
* Code: `'title': result['biblio']['title'],`
</issue>
<code>
[start of searx/engines/internet_archive_scholar.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Internet Archive scholar(science)
4 """
5
6 from datetime import datetime
7 from urllib.parse import urlencode
8 from searx.utils import html_to_text
9
10 about = {
11 "website": "https://scholar.archive.org/",
12 "wikidata_id": "Q115667709",
13 "official_api_documentation": "https://scholar.archive.org/api/redoc",
14 "use_official_api": True,
15 "require_api_key": False,
16 "results": "JSON",
17 }
18 categories = ['science', 'scientific publications']
19 paging = True
20
21 base_url = "https://scholar.archive.org"
22 results_per_page = 15
23
24
25 def request(query, params):
26 args = {
27 "q": query,
28 "limit": results_per_page,
29 "offset": (params["pageno"] - 1) * results_per_page,
30 }
31 params["url"] = f"{base_url}/search?{urlencode(args)}"
32 params["headers"]["Accept"] = "application/json"
33 return params
34
35
36 def response(resp):
37 results = []
38
39 json = resp.json()
40
41 for result in json["results"]:
42 publishedDate, content, doi = None, '', None
43
44 if result['biblio'].get('release_date'):
45 publishedDate = datetime.strptime(result['biblio']['release_date'], "%Y-%m-%d")
46
47 if len(result['abstracts']) > 0:
48 content = result['abstracts'][0].get('body')
49 elif len(result['_highlights']) > 0:
50 content = result['_highlights'][0]
51
52 if len(result['releases']) > 0:
53 doi = result['releases'][0].get('doi')
54
55 results.append(
56 {
57 'template': 'paper.html',
58 'url': result['fulltext']['access_url'],
59 'title': result['biblio']['title'],
60 'content': html_to_text(content),
61 'publisher': result['biblio'].get('publisher'),
62 'doi': doi,
63 'journal': result['biblio'].get('container_name'),
64 'authors': result['biblio'].get('contrib_names'),
65 'tags': result['tags'],
66 'publishedDate': publishedDate,
67 'issns': result['biblio'].get('issns'),
68 'pdf_url': result['fulltext'].get('access_url'),
69 }
70 )
71
72 return results
73
[end of searx/engines/internet_archive_scholar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/internet_archive_scholar.py b/searx/engines/internet_archive_scholar.py
--- a/searx/engines/internet_archive_scholar.py
+++ b/searx/engines/internet_archive_scholar.py
@@ -56,7 +56,7 @@
{
'template': 'paper.html',
'url': result['fulltext']['access_url'],
- 'title': result['biblio']['title'],
+ 'title': result['biblio'].get('title') or result['biblio'].get('container_name'),
'content': html_to_text(content),
'publisher': result['biblio'].get('publisher'),
'doi': doi,
| {"golden_diff": "diff --git a/searx/engines/internet_archive_scholar.py b/searx/engines/internet_archive_scholar.py\n--- a/searx/engines/internet_archive_scholar.py\n+++ b/searx/engines/internet_archive_scholar.py\n@@ -56,7 +56,7 @@\n {\n 'template': 'paper.html',\n 'url': result['fulltext']['access_url'],\n- 'title': result['biblio']['title'],\n+ 'title': result['biblio'].get('title') or result['biblio'].get('container_name'),\n 'content': html_to_text(content),\n 'publisher': result['biblio'].get('publisher'),\n 'doi': doi,\n", "issue": "Bug: internetarchivescholar engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2023.9.19+3ac7c40b6\r\n<!-- Check if these values are correct -->\r\n\r\n**How did you install SearXNG?**\r\n<!-- Did you install SearXNG using the official wiki or using searxng-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: KeyError\r\n * Percentage: 25\r\n * Parameters: `()`\r\n * File name: `searx/engines/internet_archive_scholar.py:59`\r\n * Function: `response`\r\n * Code: `'title': result['biblio']['title'],`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Internet Archive scholar(science)\n\"\"\"\n\nfrom datetime import datetime\nfrom urllib.parse import urlencode\nfrom searx.utils import html_to_text\n\nabout = {\n \"website\": \"https://scholar.archive.org/\",\n \"wikidata_id\": \"Q115667709\",\n \"official_api_documentation\": \"https://scholar.archive.org/api/redoc\",\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\ncategories = ['science', 'scientific publications']\npaging = True\n\nbase_url = \"https://scholar.archive.org\"\nresults_per_page = 15\n\n\ndef request(query, params):\n args = {\n \"q\": query,\n \"limit\": results_per_page,\n \"offset\": (params[\"pageno\"] - 1) * results_per_page,\n }\n params[\"url\"] = f\"{base_url}/search?{urlencode(args)}\"\n params[\"headers\"][\"Accept\"] = \"application/json\"\n return params\n\n\ndef response(resp):\n results = []\n\n json = resp.json()\n\n for result in json[\"results\"]:\n publishedDate, content, doi = None, '', None\n\n if result['biblio'].get('release_date'):\n publishedDate = datetime.strptime(result['biblio']['release_date'], \"%Y-%m-%d\")\n\n if len(result['abstracts']) > 0:\n content = result['abstracts'][0].get('body')\n elif len(result['_highlights']) > 0:\n content = result['_highlights'][0]\n\n if len(result['releases']) > 0:\n doi = result['releases'][0].get('doi')\n\n results.append(\n {\n 'template': 'paper.html',\n 'url': result['fulltext']['access_url'],\n 'title': result['biblio']['title'],\n 'content': html_to_text(content),\n 'publisher': result['biblio'].get('publisher'),\n 'doi': doi,\n 'journal': result['biblio'].get('container_name'),\n 'authors': result['biblio'].get('contrib_names'),\n 'tags': result['tags'],\n 'publishedDate': publishedDate,\n 'issns': result['biblio'].get('issns'),\n 'pdf_url': result['fulltext'].get('access_url'),\n }\n )\n\n return results\n", "path": "searx/engines/internet_archive_scholar.py"}]} | 1,531 | 164 |
gh_patches_debug_21665 | rasdani/github-patches | git_diff | qtile__qtile-1241 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fake screens
[This part of the documentation](https://github.com/qtile/qtile/blob/ed7198a5fb5438110f81a8c6ecc0e6289676c057/libqtile/config.py#L231-L232) mentions "fake screens", and the term is also found [in the code](https://github.com/qtile/qtile/blob/7c2a88fba68bdcf6f25dfb5494a74afc475d674e/libqtile/manager.py#L357-L373).
What are they? How to use them?
We need to document answers to those questions, and then make sure they work correctly.
See #1192 for this last point.
</issue>
<code>
[start of libqtile/confreader.py]
1 # coding: utf-8
2 #
3 # Copyright (c) 2008, Aldo Cortesi <[email protected]>
4 # Copyright (c) 2011, Andrew Grigorev <[email protected]>
5 #
6 # All rights reserved.
7 #
8 # Permission is hereby granted, free of charge, to any person obtaining a copy
9 # of this software and associated documentation files (the "Software"), to deal
10 # in the Software without restriction, including without limitation the rights
11 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 # copies of the Software, and to permit persons to whom the Software is
13 # furnished to do so, subject to the following conditions:
14 #
15 # The above copyright notice and this permission notice shall be included in
16 # all copies or substantial portions of the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 # SOFTWARE.
25 import os
26 import sys
27
28
29 class ConfigError(Exception):
30 pass
31
32
33 class Config(object):
34 settings_keys = [
35 "keys",
36 "mouse",
37 "groups",
38 "dgroups_key_binder",
39 "dgroups_app_rules",
40 "follow_mouse_focus",
41 "focus_on_window_activation",
42 "cursor_warp",
43 "layouts",
44 "floating_layout",
45 "screens",
46 "main",
47 "auto_fullscreen",
48 "widget_defaults",
49 "extension_defaults",
50 "bring_front_click",
51 "wmname",
52 ]
53
54 def __init__(self, **settings):
55 """Create a Config() object from settings
56
57 Only attributes found in Config.settings_keys will be added to object.
58 config attribute precedence is 1.) **settings 2.) self 3.) default_config
59 """
60 from .resources import default_config
61 default = vars(default_config)
62 for key in self.settings_keys:
63 try:
64 value = settings[key]
65 except KeyError:
66 value = getattr(self, key, default[key])
67 setattr(self, key, value)
68 self._init_deprecated(**settings)
69
70 def _init_deprecated(self, extensions=None, **settings):
71 "Initialize deprecated settings."
72 if extensions: # Deprecated in v0.10.7
73 import warnings
74 warnings.warn("'extentions' is deprecated, use "
75 "'extension_defaults'", DeprecationWarning)
76 self.extension_defaults.update(extensions.get('dmenu', {}))
77
78 @classmethod
79 def from_file(cls, path):
80 "Create a Config() object from the python file located at path."
81 try:
82 sys.path.insert(0, os.path.dirname(path))
83 config = __import__(os.path.basename(path)[:-3])
84 except Exception:
85 import traceback
86 from .log_utils import logger
87 logger.exception('Could not import config file %r', path)
88 tb = traceback.format_exc()
89 raise ConfigError(tb)
90 return cls(**vars(config))
91
[end of libqtile/confreader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/confreader.py b/libqtile/confreader.py
--- a/libqtile/confreader.py
+++ b/libqtile/confreader.py
@@ -66,6 +66,7 @@
value = getattr(self, key, default[key])
setattr(self, key, value)
self._init_deprecated(**settings)
+ self._init_fake_screens(**settings)
def _init_deprecated(self, extensions=None, **settings):
"Initialize deprecated settings."
@@ -75,6 +76,14 @@
"'extension_defaults'", DeprecationWarning)
self.extension_defaults.update(extensions.get('dmenu', {}))
+ def _init_fake_screens(self, **settings):
+ " Initiaize fake_screens if they are set."
+ try:
+ value = settings['fake_screens']
+ setattr(self, 'fake_screens', value)
+ except KeyError:
+ pass
+
@classmethod
def from_file(cls, path):
"Create a Config() object from the python file located at path."
| {"golden_diff": "diff --git a/libqtile/confreader.py b/libqtile/confreader.py\n--- a/libqtile/confreader.py\n+++ b/libqtile/confreader.py\n@@ -66,6 +66,7 @@\n value = getattr(self, key, default[key])\n setattr(self, key, value)\n self._init_deprecated(**settings)\n+ self._init_fake_screens(**settings)\n \n def _init_deprecated(self, extensions=None, **settings):\n \"Initialize deprecated settings.\"\n@@ -75,6 +76,14 @@\n \"'extension_defaults'\", DeprecationWarning)\n self.extension_defaults.update(extensions.get('dmenu', {}))\n \n+ def _init_fake_screens(self, **settings):\n+ \" Initiaize fake_screens if they are set.\"\n+ try:\n+ value = settings['fake_screens']\n+ setattr(self, 'fake_screens', value)\n+ except KeyError:\n+ pass\n+\n @classmethod\n def from_file(cls, path):\n \"Create a Config() object from the python file located at path.\"\n", "issue": "Fake screens\n[This part of the documentation](https://github.com/qtile/qtile/blob/ed7198a5fb5438110f81a8c6ecc0e6289676c057/libqtile/config.py#L231-L232) mentions \"fake screens\", and the term is also found [in the code](https://github.com/qtile/qtile/blob/7c2a88fba68bdcf6f25dfb5494a74afc475d674e/libqtile/manager.py#L357-L373).\r\n\r\nWhat are they? How to use them?\r\n\r\nWe need to document answers to those questions, and then make sure they work correctly.\r\n\r\nSee #1192 for this last point.\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright (c) 2008, Aldo Cortesi <[email protected]>\n# Copyright (c) 2011, Andrew Grigorev <[email protected]>\n#\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\nimport sys\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass Config(object):\n settings_keys = [\n \"keys\",\n \"mouse\",\n \"groups\",\n \"dgroups_key_binder\",\n \"dgroups_app_rules\",\n \"follow_mouse_focus\",\n \"focus_on_window_activation\",\n \"cursor_warp\",\n \"layouts\",\n \"floating_layout\",\n \"screens\",\n \"main\",\n \"auto_fullscreen\",\n \"widget_defaults\",\n \"extension_defaults\",\n \"bring_front_click\",\n \"wmname\",\n ]\n\n def __init__(self, **settings):\n \"\"\"Create a Config() object from settings\n\n Only attributes found in Config.settings_keys will be added to object.\n config attribute precedence is 1.) **settings 2.) self 3.) default_config\n \"\"\"\n from .resources import default_config\n default = vars(default_config)\n for key in self.settings_keys:\n try:\n value = settings[key]\n except KeyError:\n value = getattr(self, key, default[key])\n setattr(self, key, value)\n self._init_deprecated(**settings)\n\n def _init_deprecated(self, extensions=None, **settings):\n \"Initialize deprecated settings.\"\n if extensions: # Deprecated in v0.10.7\n import warnings\n warnings.warn(\"'extentions' is deprecated, use \"\n \"'extension_defaults'\", DeprecationWarning)\n self.extension_defaults.update(extensions.get('dmenu', {}))\n\n @classmethod\n def from_file(cls, path):\n \"Create a Config() object from the python file located at path.\"\n try:\n sys.path.insert(0, os.path.dirname(path))\n config = __import__(os.path.basename(path)[:-3])\n except Exception:\n import traceback\n from .log_utils import logger\n logger.exception('Could not import config file %r', path)\n tb = traceback.format_exc()\n raise ConfigError(tb)\n return cls(**vars(config))\n", "path": "libqtile/confreader.py"}]} | 1,603 | 237 |
gh_patches_debug_795 | rasdani/github-patches | git_diff | Parsl__parsl-140 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not import `parsl` before requirements are setup
```
[annawoodard@midway001 parsl]$ python setup.py install
Traceback (most recent call last):
File "setup.py", line 2, in <module>
from parsl.version import VERSION
File "/home/annawoodard/parsl/parsl/__init__.py", line 35, in <module>
from parsl.executors.ipp import IPyParallelExecutor
File "/home/annawoodard/parsl/parsl/executors/ipp.py", line 4, in <module>
from ipyparallel import Client
ModuleNotFoundError: No module named 'ipyparallel'
```
Setuptools is supposed to take care of dependencies for us, but importing parsl in `setup.py` breaks that (because we require the dependencies by importing the parsl version from `version.py` before they can be installed). We should avoid this.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from parsl.version import VERSION
3
4 with open('requirements.txt') as f:
5 install_requires = f.readlines()
6
7 # tests_require = parse_requirements('test-requirements.txt')
8
9 setup(
10 name='parsl',
11 version=VERSION,
12 description='Simple data dependent workflows in Python',
13 long_description='Simple and easy parallel workflows system for Python',
14 url='https://github.com/Parsl/parsl',
15 author='Yadu Nand Babuji',
16 author_email='[email protected]',
17 license='Apache 2.0',
18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
19 package_data={'': ['LICENSE']},
20 packages=find_packages(),
21 install_requires=install_requires,
22 classifiers=[
23 # Maturity
24 'Development Status :: 3 - Alpha',
25 # Intended audience
26 'Intended Audience :: Developers',
27 # Licence, must match with licence above
28 'License :: OSI Approved :: Apache Software License',
29 # Python versions supported
30 'Programming Language :: Python :: 3.5',
31 'Programming Language :: Python :: 3.6',
32 ],
33 keywords=['Workflows', 'Scientific computing'],
34 )
35
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,7 @@
from setuptools import setup, find_packages
-from parsl.version import VERSION
+
+with open('parsl/version.py') as f:
+ exec(f.read())
with open('requirements.txt') as f:
install_requires = f.readlines()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,7 @@\n from setuptools import setup, find_packages\n-from parsl.version import VERSION\n+\n+with open('parsl/version.py') as f:\n+ exec(f.read())\n \n with open('requirements.txt') as f:\n install_requires = f.readlines()\n", "issue": "Do not import `parsl` before requirements are setup\n```\r\n[annawoodard@midway001 parsl]$ python setup.py install\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 2, in <module>\r\n from parsl.version import VERSION\r\n File \"/home/annawoodard/parsl/parsl/__init__.py\", line 35, in <module>\r\n from parsl.executors.ipp import IPyParallelExecutor\r\n File \"/home/annawoodard/parsl/parsl/executors/ipp.py\", line 4, in <module>\r\n from ipyparallel import Client\r\nModuleNotFoundError: No module named 'ipyparallel'\r\n```\r\n\r\nSetuptools is supposed to take care of dependencies for us, but importing parsl in `setup.py` breaks that (because we require the dependencies by importing the parsl version from `version.py` before they can be installed). We should avoid this.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom parsl.version import VERSION\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\n# tests_require = parse_requirements('test-requirements.txt')\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple and easy parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='Yadu Nand Babuji',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}]} | 1,070 | 82 |
gh_patches_debug_29007 | rasdani/github-patches | git_diff | vega__altair-2642 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dots aren't showing up in ranged dot plot

</issue>
<code>
[start of altair/examples/ranged_dot_plot.py]
1 """
2 Ranged Dot Plot
3 -----------------
4 This example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000).
5 """
6 # category: other charts
7 import altair as alt
8 from vega_datasets import data
9
10 source = data.countries.url
11
12 chart = alt.layer(
13 data=source
14 ).transform_filter(
15 filter={"field": 'country',
16 "oneOf": ["China", "India", "United States", "Indonesia", "Brazil"]}
17 ).transform_filter(
18 filter={'field': 'year',
19 "oneOf": [1955, 2000]}
20 )
21
22 chart += alt.Chart().mark_line(color='#db646f').encode(
23 x='life_expect:Q',
24 y='country:N',
25 detail='country:N'
26 )
27 # Add points for life expectancy in 1955 & 2000
28 chart += alt.Chart().mark_point(
29 size=100,
30 opacity=1,
31 filled=True
32 ).encode(
33 x='life_expect:Q',
34 y='country:N',
35 color=alt.Color('year:O',
36 scale=alt.Scale(
37 domain=['1955', '2000'],
38 range=['#e6959c', '#911a24']
39 )
40 )
41 ).interactive()
42
43 chart
44
[end of altair/examples/ranged_dot_plot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/examples/ranged_dot_plot.py b/altair/examples/ranged_dot_plot.py
--- a/altair/examples/ranged_dot_plot.py
+++ b/altair/examples/ranged_dot_plot.py
@@ -1,7 +1,7 @@
"""
Ranged Dot Plot
------------------
-This example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000).
+---------------
+This example shows a ranged dot plot to convey changing life expectancy for the five most populous countries (between 1955 and 2000).
"""
# category: other charts
import altair as alt
@@ -9,7 +9,7 @@
source = data.countries.url
-chart = alt.layer(
+chart = alt.Chart(
data=source
).transform_filter(
filter={"field": 'country',
@@ -19,13 +19,13 @@
"oneOf": [1955, 2000]}
)
-chart += alt.Chart().mark_line(color='#db646f').encode(
+line = chart.mark_line(color='#db646f').encode(
x='life_expect:Q',
y='country:N',
detail='country:N'
)
# Add points for life expectancy in 1955 & 2000
-chart += alt.Chart().mark_point(
+points = chart.mark_point(
size=100,
opacity=1,
filled=True
@@ -34,10 +34,10 @@
y='country:N',
color=alt.Color('year:O',
scale=alt.Scale(
- domain=['1955', '2000'],
+ domain=[1955, 2000],
range=['#e6959c', '#911a24']
)
)
).interactive()
-chart
+(line + points)
| {"golden_diff": "diff --git a/altair/examples/ranged_dot_plot.py b/altair/examples/ranged_dot_plot.py\n--- a/altair/examples/ranged_dot_plot.py\n+++ b/altair/examples/ranged_dot_plot.py\n@@ -1,7 +1,7 @@\n \"\"\"\n Ranged Dot Plot\n------------------\n-This example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n+---------------\n+This example shows a ranged dot plot to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n \"\"\"\n # category: other charts\n import altair as alt\n@@ -9,7 +9,7 @@\n \n source = data.countries.url\n \n-chart = alt.layer(\n+chart = alt.Chart(\n data=source\n ).transform_filter(\n filter={\"field\": 'country',\n@@ -19,13 +19,13 @@\n \"oneOf\": [1955, 2000]}\n )\n \n-chart += alt.Chart().mark_line(color='#db646f').encode(\n+line = chart.mark_line(color='#db646f').encode(\n x='life_expect:Q',\n y='country:N',\n detail='country:N'\n )\n # Add points for life expectancy in 1955 & 2000\n-chart += alt.Chart().mark_point(\n+points = chart.mark_point(\n size=100,\n opacity=1,\n filled=True\n@@ -34,10 +34,10 @@\n y='country:N',\n color=alt.Color('year:O',\n scale=alt.Scale(\n- domain=['1955', '2000'],\n+ domain=[1955, 2000],\n range=['#e6959c', '#911a24']\n )\n )\n ).interactive()\n \n-chart\n+(line + points)\n", "issue": "Dots aren't showing up in ranged dot plot\n\r\n\n", "before_files": [{"content": "\"\"\"\nRanged Dot Plot\n-----------------\nThis example shows a ranged dot plot that uses 'layer' to convey changing life expectancy for the five most populous countries (between 1955 and 2000).\n\"\"\"\n# category: other charts\nimport altair as alt\nfrom vega_datasets import data\n\nsource = data.countries.url\n\nchart = alt.layer(\n data=source\n).transform_filter(\n filter={\"field\": 'country',\n \"oneOf\": [\"China\", \"India\", \"United States\", \"Indonesia\", \"Brazil\"]}\n).transform_filter(\n filter={'field': 'year',\n \"oneOf\": [1955, 2000]}\n)\n\nchart += alt.Chart().mark_line(color='#db646f').encode(\n x='life_expect:Q',\n y='country:N',\n detail='country:N'\n)\n# Add points for life expectancy in 1955 & 2000\nchart += alt.Chart().mark_point(\n size=100,\n opacity=1,\n filled=True\n).encode(\n x='life_expect:Q',\n y='country:N',\n color=alt.Color('year:O',\n scale=alt.Scale(\n domain=['1955', '2000'],\n range=['#e6959c', '#911a24']\n )\n )\n).interactive()\n\nchart\n", "path": "altair/examples/ranged_dot_plot.py"}]} | 1,020 | 436 |
gh_patches_debug_19361 | rasdani/github-patches | git_diff | docker__docker-py-1248 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
create a sans-io docker client class and impliment Client with blocking requests
use https://github.com/mikeal/deferred to create a sans-io version of Client (SansIOClient) that requires something like:
``` python
class SimpleStream(object):
def next(self) -> Deferred:
...
class IOAdapter(object):
def request(self, **kwargs) -> Deferred:
...
def stream(self, **kwargs) -> SimpleStream:
...
def unwrap_deferred(self, deferred: Deferred) -> Any:
...
```
and then implement it with something like:
``` python
class BlockingSimpleStream(SimpleStream):
def __init__(self, stream):
self.generator = _stream_helper(stream):
def next(self) -> Deferred:
return deferred.succeeded(next(self.generator))
class BlockingIOAdapter(IOAdapter):
def __init__(session: requests.Session):
self.session = session
def request(self, **kwargs) -> Deferred:
return deferred.execute(self.session.request, **kwargs)
def stream(self, **kwargs) -> BlockingSimpleStream:
return BlockingSimpleStream(self.session.request(**kwargs, stream=True))
def unwrap_deferred(self, d: Deferred):
return deferred.waitForDeferred(d).getResult()
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 from setuptools import setup
6
7
8 ROOT_DIR = os.path.dirname(__file__)
9 SOURCE_DIR = os.path.join(ROOT_DIR)
10
11 requirements = [
12 'requests >= 2.5.2, < 2.11',
13 'six >= 1.4.0',
14 'websocket-client >= 0.32.0',
15 'docker-pycreds >= 0.2.1'
16 ]
17
18 if sys.platform == 'win32':
19 requirements.append('pypiwin32 >= 219')
20
21 extras_require = {
22 ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
23 # While not imported explicitly, the ipaddress module is required for
24 # ssl_match_hostname to verify hosts match with certificates via
25 # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
26 ':python_version < "3.3"': 'ipaddress >= 1.0.16',
27 }
28
29 version = None
30 exec(open('docker/version.py').read())
31
32 with open('./test-requirements.txt') as test_reqs_txt:
33 test_requirements = [line for line in test_reqs_txt]
34
35
36 setup(
37 name="docker-py",
38 version=version,
39 description="Python client for Docker.",
40 url='https://github.com/docker/docker-py/',
41 packages=[
42 'docker', 'docker.api', 'docker.auth', 'docker.transport',
43 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
44 'docker.types',
45 ],
46 install_requires=requirements,
47 tests_require=test_requirements,
48 extras_require=extras_require,
49 zip_safe=False,
50 test_suite='tests',
51 classifiers=[
52 'Development Status :: 4 - Beta',
53 'Environment :: Other Environment',
54 'Intended Audience :: Developers',
55 'Operating System :: OS Independent',
56 'Programming Language :: Python',
57 'Programming Language :: Python :: 2',
58 'Programming Language :: Python :: 2.6',
59 'Programming Language :: Python :: 2.7',
60 'Programming Language :: Python :: 3',
61 'Programming Language :: Python :: 3.3',
62 'Programming Language :: Python :: 3.4',
63 'Programming Language :: Python :: 3.5',
64 'Topic :: Utilities',
65 'License :: OSI Approved :: Apache Software License',
66 ],
67 )
68
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,10 +33,20 @@
test_requirements = [line for line in test_reqs_txt]
+long_description = ''
+try:
+ with open('./README.rst') as readme_rst:
+ long_description = readme_rst.read()
+except IOError:
+ # README.rst is only generated on release. Its absence should not prevent
+ # setup.py from working properly.
+ pass
+
setup(
name="docker-py",
version=version,
description="Python client for Docker.",
+ long_description=long_description,
url='https://github.com/docker/docker-py/',
packages=[
'docker', 'docker.api', 'docker.auth', 'docker.transport',
@@ -64,4 +74,6 @@
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
+ maintainer='Joffrey F',
+ maintainer_email='[email protected]',
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,10 +33,20 @@\n test_requirements = [line for line in test_reqs_txt]\n \n \n+long_description = ''\n+try:\n+ with open('./README.rst') as readme_rst:\n+ long_description = readme_rst.read()\n+except IOError:\n+ # README.rst is only generated on release. Its absence should not prevent\n+ # setup.py from working properly.\n+ pass\n+\n setup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n+ long_description=long_description,\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n@@ -64,4 +74,6 @@\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n+ maintainer='Joffrey F',\n+ maintainer_email='[email protected]',\n )\n", "issue": "create a sans-io docker client class and impliment Client with blocking requests\nuse https://github.com/mikeal/deferred to create a sans-io version of Client (SansIOClient) that requires something like:\n\n``` python\nclass SimpleStream(object):\n def next(self) -> Deferred:\n ...\n\nclass IOAdapter(object):\n def request(self, **kwargs) -> Deferred:\n ...\n\n def stream(self, **kwargs) -> SimpleStream:\n ...\n\n def unwrap_deferred(self, deferred: Deferred) -> Any:\n ...\n```\n\nand then implement it with something like:\n\n``` python\nclass BlockingSimpleStream(SimpleStream):\n def __init__(self, stream):\n self.generator = _stream_helper(stream):\n def next(self) -> Deferred:\n return deferred.succeeded(next(self.generator))\n\nclass BlockingIOAdapter(IOAdapter):\n def __init__(session: requests.Session):\n self.session = session\n\n def request(self, **kwargs) -> Deferred:\n return deferred.execute(self.session.request, **kwargs)\n\n def stream(self, **kwargs) -> BlockingSimpleStream:\n return BlockingSimpleStream(self.session.request(**kwargs, stream=True))\n\n def unwrap_deferred(self, d: Deferred):\n return deferred.waitForDeferred(d).getResult()\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n # While not imported explicitly, the ipaddress module is required for\n # ssl_match_hostname to verify hosts match with certificates via\n # ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',\n 'docker.types',\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}]} | 1,438 | 234 |
gh_patches_debug_32168 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4544 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
azure - event hub resources
Add event hub resource & implement firewall filter
</issue>
<code>
[start of tools/c7n_azure/c7n_azure/resources/event_hub.py]
1 # Copyright 2019 Microsoft Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from c7n_azure.provider import resources
16 from c7n_azure.resources.arm import ArmResourceManager
17
18
19 @resources.register('eventhub')
20 class EventHub(ArmResourceManager):
21 """Event Hub Resource
22
23 :example:
24
25 Finds all Event Hub resources in the subscription.
26
27 .. code-block:: yaml
28
29 policies:
30 - name: find-all-eventhubs
31 resource: azure.eventhub
32
33 """
34
35 class resource_type(ArmResourceManager.resource_type):
36 doc_groups = ['Events']
37
38 service = 'azure.mgmt.eventhub'
39 client = 'EventHubManagementClient'
40 enum_spec = ('namespaces', 'list', None)
41 default_report_fields = (
42 'name',
43 'location',
44 'resourceGroup',
45 'sku.name',
46 'properties.isAutoInflateEnabled'
47 )
48 resource_type = 'Microsoft.EventHub/namespaces'
49
[end of tools/c7n_azure/c7n_azure/resources/event_hub.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/c7n_azure/c7n_azure/resources/event_hub.py b/tools/c7n_azure/c7n_azure/resources/event_hub.py
--- a/tools/c7n_azure/c7n_azure/resources/event_hub.py
+++ b/tools/c7n_azure/c7n_azure/resources/event_hub.py
@@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
+
+from c7n_azure.filters import FirewallRulesFilter
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
+from netaddr import IPSet
@resources.register('eventhub')
@@ -22,13 +26,17 @@
:example:
- Finds all Event Hub resources in the subscription.
+ This policy will find all Event Hubs allowing traffic from 1.2.2.128/25 CIDR.
.. code-block:: yaml
policies:
- - name: find-all-eventhubs
- resource: azure.eventhub
+ - name: find-event-hub-allowing-subnet
+ resource: azure.eventhub
+ filters:
+ - type: firewall-rules
+ include:
+ - '1.2.2.128/25'
"""
@@ -46,3 +54,29 @@
'properties.isAutoInflateEnabled'
)
resource_type = 'Microsoft.EventHub/namespaces'
+
+
[email protected]_registry.register('firewall-rules')
+class EventHubFirewallRulesFilter(FirewallRulesFilter):
+
+ def __init__(self, data, manager=None):
+ super(EventHubFirewallRulesFilter, self).__init__(data, manager)
+ self._log = logging.getLogger('custodian.azure.eventhub')
+ self.client = None
+
+ @property
+ def log(self):
+ return self._log
+
+ def process(self, resources, event=None):
+ self.client = self.manager.get_client()
+ return super(EventHubFirewallRulesFilter, self).process(resources, event)
+
+ def _query_rules(self, resource):
+ query = self.client.namespaces.get_network_rule_set(
+ resource['resourceGroup'],
+ resource['name'])
+
+ resource_rules = IPSet([r.ip_mask for r in query.ip_rules])
+
+ return resource_rules
| {"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/resources/event_hub.py b/tools/c7n_azure/c7n_azure/resources/event_hub.py\n--- a/tools/c7n_azure/c7n_azure/resources/event_hub.py\n+++ b/tools/c7n_azure/c7n_azure/resources/event_hub.py\n@@ -12,8 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import logging\n+\n+from c7n_azure.filters import FirewallRulesFilter\n from c7n_azure.provider import resources\n from c7n_azure.resources.arm import ArmResourceManager\n+from netaddr import IPSet\n \n \n @resources.register('eventhub')\n@@ -22,13 +26,17 @@\n \n :example:\n \n- Finds all Event Hub resources in the subscription.\n+ This policy will find all Event Hubs allowing traffic from 1.2.2.128/25 CIDR.\n \n .. code-block:: yaml\n \n policies:\n- - name: find-all-eventhubs\n- resource: azure.eventhub\n+ - name: find-event-hub-allowing-subnet\n+ resource: azure.eventhub\n+ filters:\n+ - type: firewall-rules\n+ include:\n+ - '1.2.2.128/25'\n \n \"\"\"\n \n@@ -46,3 +54,29 @@\n 'properties.isAutoInflateEnabled'\n )\n resource_type = 'Microsoft.EventHub/namespaces'\n+\n+\[email protected]_registry.register('firewall-rules')\n+class EventHubFirewallRulesFilter(FirewallRulesFilter):\n+\n+ def __init__(self, data, manager=None):\n+ super(EventHubFirewallRulesFilter, self).__init__(data, manager)\n+ self._log = logging.getLogger('custodian.azure.eventhub')\n+ self.client = None\n+\n+ @property\n+ def log(self):\n+ return self._log\n+\n+ def process(self, resources, event=None):\n+ self.client = self.manager.get_client()\n+ return super(EventHubFirewallRulesFilter, self).process(resources, event)\n+\n+ def _query_rules(self, resource):\n+ query = self.client.namespaces.get_network_rule_set(\n+ resource['resourceGroup'],\n+ resource['name'])\n+\n+ resource_rules = IPSet([r.ip_mask for r in query.ip_rules])\n+\n+ return resource_rules\n", "issue": "azure - event hub resources\nAdd event hub resource & implement firewall filter\n", "before_files": [{"content": "# Copyright 2019 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.provider import resources\nfrom c7n_azure.resources.arm import ArmResourceManager\n\n\[email protected]('eventhub')\nclass EventHub(ArmResourceManager):\n \"\"\"Event Hub Resource\n\n :example:\n\n Finds all Event Hub resources in the subscription.\n\n .. code-block:: yaml\n\n policies:\n - name: find-all-eventhubs\n resource: azure.eventhub\n\n \"\"\"\n\n class resource_type(ArmResourceManager.resource_type):\n doc_groups = ['Events']\n\n service = 'azure.mgmt.eventhub'\n client = 'EventHubManagementClient'\n enum_spec = ('namespaces', 'list', None)\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'sku.name',\n 'properties.isAutoInflateEnabled'\n )\n resource_type = 'Microsoft.EventHub/namespaces'\n", "path": "tools/c7n_azure/c7n_azure/resources/event_hub.py"}]} | 980 | 553 |
gh_patches_debug_20098 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Project list widget fails due to a date vs. datetime formatting error
</issue>
<code>
[start of akvo/rsr/templatetags/rsr_filters.py]
1 # -*- coding: utf-8 -*-
2 """
3 Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 import datetime
10 import time
11
12 from django import template
13 from django.conf import settings
14 from decimal import Decimal, ROUND_HALF_UP
15
16 register = template.Library()
17
18 DECIMAL_PLACES = getattr(settings, 'DECIMALS_DECIMAL_PLACES', 2)
19
20
21 @register.filter
22 def get_item(dictionary, key):
23 """Enable lookup in dicts."""
24 return dictionary.get(key)
25
26
27 @register.filter
28 def string_to_date(value):
29 try:
30 time_format = "%Y-%m-%d %H:%M:%S"
31 fmt_time = time.strptime(value, time_format)
32 return datetime.datetime(*fmt_time[:6])
33 except:
34 return value
35
36 # http://stackoverflow.com/questions/250357/smart-truncate-in-python
37
38
39 @register.filter("smart_truncate")
40 def smart_truncate(content, length=100, suffix='...'):
41 if len(content) <= length:
42 return content
43 else:
44 return content[:length].rsplit(' ', 1)[0] + suffix
45
46
47 @register.filter
48 def round(value, decimal_places=DECIMAL_PLACES):
49 try:
50 value = Decimal(str(value))
51 except:
52 return u''
53 if settings.DECIMALS_DEBUG:
54 decimal_result = value.quantize(Decimal(10) ** -decimal_places)
55 return decimal_result
56 else:
57 decimal_result = value.quantize(Decimal(10), ROUND_HALF_UP)
58 return 0 if decimal_result <= 0 else decimal_result
59 round.is_safe = True
60
61
62 @register.filter
63 def countries_list(obj):
64 """ return a list of the countries of all locations of an object.
65 currently works for Project and Organisation """
66 return obj.locations.values_list('country__name', flat=True)
67
68
69 @register.filter
70 def continents_list(obj):
71 """return a list of the continents of all locations of an object"
72 currently works for Project and Organisation """
73 return obj.locations.values_list('country__continent', flat=True)
74
75
76 @register.filter
77 def rsr_sorted_set(iterable):
78 """ create a set of the iterable to eliminate duplicates
79 then make a list of the set and sort it
80 used with countries_list and continents_list
81 """
82 set_list = list(frozenset(iterable))
83 set_list.sort()
84 return set_list
85
86
87 @register.filter
88 def load_partnerships_and_orgs(project):
89 return project.partnerships.prefetch_related('organisation').all()
90
[end of akvo/rsr/templatetags/rsr_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/templatetags/rsr_filters.py b/akvo/rsr/templatetags/rsr_filters.py
--- a/akvo/rsr/templatetags/rsr_filters.py
+++ b/akvo/rsr/templatetags/rsr_filters.py
@@ -6,9 +6,6 @@
For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""
-import datetime
-import time
-
from django import template
from django.conf import settings
from decimal import Decimal, ROUND_HALF_UP
@@ -24,18 +21,7 @@
return dictionary.get(key)
[email protected]
-def string_to_date(value):
- try:
- time_format = "%Y-%m-%d %H:%M:%S"
- fmt_time = time.strptime(value, time_format)
- return datetime.datetime(*fmt_time[:6])
- except:
- return value
-
# http://stackoverflow.com/questions/250357/smart-truncate-in-python
-
-
@register.filter("smart_truncate")
def smart_truncate(content, length=100, suffix='...'):
if len(content) <= length:
| {"golden_diff": "diff --git a/akvo/rsr/templatetags/rsr_filters.py b/akvo/rsr/templatetags/rsr_filters.py\n--- a/akvo/rsr/templatetags/rsr_filters.py\n+++ b/akvo/rsr/templatetags/rsr_filters.py\n@@ -6,9 +6,6 @@\n For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \"\"\"\n \n-import datetime\n-import time\n-\n from django import template\n from django.conf import settings\n from decimal import Decimal, ROUND_HALF_UP\n@@ -24,18 +21,7 @@\n return dictionary.get(key)\n \n \[email protected]\n-def string_to_date(value):\n- try:\n- time_format = \"%Y-%m-%d %H:%M:%S\"\n- fmt_time = time.strptime(value, time_format)\n- return datetime.datetime(*fmt_time[:6])\n- except:\n- return value\n-\n # http://stackoverflow.com/questions/250357/smart-truncate-in-python\n-\n-\n @register.filter(\"smart_truncate\")\n def smart_truncate(content, length=100, suffix='...'):\n if len(content) <= length:\n", "issue": "Project list widget fails due to a date vs. datetime formatting error\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport datetime\nimport time\n\nfrom django import template\nfrom django.conf import settings\nfrom decimal import Decimal, ROUND_HALF_UP\n\nregister = template.Library()\n\nDECIMAL_PLACES = getattr(settings, 'DECIMALS_DECIMAL_PLACES', 2)\n\n\[email protected]\ndef get_item(dictionary, key):\n \"\"\"Enable lookup in dicts.\"\"\"\n return dictionary.get(key)\n\n\[email protected]\ndef string_to_date(value):\n try:\n time_format = \"%Y-%m-%d %H:%M:%S\"\n fmt_time = time.strptime(value, time_format)\n return datetime.datetime(*fmt_time[:6])\n except:\n return value\n\n# http://stackoverflow.com/questions/250357/smart-truncate-in-python\n\n\[email protected](\"smart_truncate\")\ndef smart_truncate(content, length=100, suffix='...'):\n if len(content) <= length:\n return content\n else:\n return content[:length].rsplit(' ', 1)[0] + suffix\n\n\[email protected]\ndef round(value, decimal_places=DECIMAL_PLACES):\n try:\n value = Decimal(str(value))\n except:\n return u''\n if settings.DECIMALS_DEBUG:\n decimal_result = value.quantize(Decimal(10) ** -decimal_places)\n return decimal_result\n else:\n decimal_result = value.quantize(Decimal(10), ROUND_HALF_UP)\n return 0 if decimal_result <= 0 else decimal_result\nround.is_safe = True\n\n\[email protected]\ndef countries_list(obj):\n \"\"\" return a list of the countries of all locations of an object.\n currently works for Project and Organisation \"\"\"\n return obj.locations.values_list('country__name', flat=True)\n\n\[email protected]\ndef continents_list(obj):\n \"\"\"return a list of the continents of all locations of an object\"\n currently works for Project and Organisation \"\"\"\n return obj.locations.values_list('country__continent', flat=True)\n\n\[email protected]\ndef rsr_sorted_set(iterable):\n \"\"\" create a set of the iterable to eliminate duplicates\n then make a list of the set and sort it\n used with countries_list and continents_list\n \"\"\"\n set_list = list(frozenset(iterable))\n set_list.sort()\n return set_list\n\n\[email protected]\ndef load_partnerships_and_orgs(project):\n return project.partnerships.prefetch_related('organisation').all()\n", "path": "akvo/rsr/templatetags/rsr_filters.py"}]} | 1,329 | 269 |
gh_patches_debug_21555 | rasdani/github-patches | git_diff | getpelican__pelican-845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Conflicts rendering Category pages when category is not defined in consistent case
I was testing a jinja macro that dealt with creating links for categories.
I noted that if you define a category in one article as `Category: Something` and in another article as `Category: something` that these are treated as separate categories, however, when your category page is rendered, there is only the lowecase url, e.g. `category/something.html`. This will only associate with the articles with meta data defined as `Category: something` and not anywhere where it is defined with uppercase since there is no `category/Something.html`.
I am not sure if making this case insensitive would break code. Certainly, it would be unclear when printing the category name which case to use. From an intelligent template process, you would set you case using CSS style attribute to be sure it was the way you want, and it could always render categories in lower case.
Otherwise, it might just be sufficient to put this into the documentation. I always tend to capitalize by categories, but some people might not notice and wonder why some articles are missing. I have not yet tested this, but I would imagine the same issue exists for tags.
</issue>
<code>
[start of pelican/urlwrappers.py]
1 import os
2 import functools
3 import logging
4
5 import six
6
7 from pelican.utils import (slugify, python_2_unicode_compatible)
8
9 logger = logging.getLogger(__name__)
10
11
12 @python_2_unicode_compatible
13 @functools.total_ordering
14 class URLWrapper(object):
15 def __init__(self, name, settings):
16 self.name = name
17 self.slug = slugify(self.name)
18 self.settings = settings
19
20 def as_dict(self):
21 return self.__dict__
22
23 def __hash__(self):
24 return hash(self.name)
25
26 def _key(self):
27 return self.name
28
29 def _normalize_key(self, key):
30 return six.text_type(key)
31
32 def __eq__(self, other):
33 return self._key() == self._normalize_key(other)
34
35 def __ne__(self, other):
36 return self._key() != self._normalize_key(other)
37
38 def __lt__(self, other):
39 return self._key() < self._normalize_key(other)
40
41 def __str__(self):
42 return self.name
43
44 def __repr__(self):
45 return '<{} {}>'.format(type(self).__name__, str(self))
46
47 def _from_settings(self, key, get_page_name=False):
48 """Returns URL information as defined in settings.
49
50 When get_page_name=True returns URL without anything after {slug} e.g.
51 if in settings: CATEGORY_URL="cat/{slug}.html" this returns
52 "cat/{slug}" Useful for pagination.
53
54 """
55 setting = "%s_%s" % (self.__class__.__name__.upper(), key)
56 value = self.settings[setting]
57 if not isinstance(value, six.string_types):
58 logger.warning('%s is set to %s' % (setting, value))
59 return value
60 else:
61 if get_page_name:
62 return os.path.splitext(value)[0].format(**self.as_dict())
63 else:
64 return value.format(**self.as_dict())
65
66 page_name = property(functools.partial(_from_settings, key='URL',
67 get_page_name=True))
68 url = property(functools.partial(_from_settings, key='URL'))
69 save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
70
71
72 class Category(URLWrapper):
73 pass
74
75
76 class Tag(URLWrapper):
77 def __init__(self, name, *args, **kwargs):
78 super(Tag, self).__init__(name.strip(), *args, **kwargs)
79
80
81 class Author(URLWrapper):
82 pass
83
[end of pelican/urlwrappers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py
--- a/pelican/urlwrappers.py
+++ b/pelican/urlwrappers.py
@@ -13,21 +13,35 @@
@functools.total_ordering
class URLWrapper(object):
def __init__(self, name, settings):
+ # next 2 lines are redundant with the setter of the name property
+ # but are here for clarity
+ self._name = name
+ self.slug = slugify(name)
self.name = name
- self.slug = slugify(self.name)
self.settings = settings
+ @property
+ def name(self):
+ return self._name
+
+ @name.setter
+ def name(self, name):
+ self._name = name
+ self.slug = slugify(name)
+
def as_dict(self):
- return self.__dict__
+ d = self.__dict__
+ d['name'] = self.name
+ return d
def __hash__(self):
- return hash(self.name)
+ return hash(self.slug)
def _key(self):
- return self.name
+ return self.slug
def _normalize_key(self, key):
- return six.text_type(key)
+ return six.text_type(slugify(key))
def __eq__(self, other):
return self._key() == self._normalize_key(other)
| {"golden_diff": "diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py\n--- a/pelican/urlwrappers.py\n+++ b/pelican/urlwrappers.py\n@@ -13,21 +13,35 @@\n @functools.total_ordering\n class URLWrapper(object):\n def __init__(self, name, settings):\n+ # next 2 lines are redundant with the setter of the name property\n+ # but are here for clarity\n+ self._name = name\n+ self.slug = slugify(name)\n self.name = name\n- self.slug = slugify(self.name)\n self.settings = settings\n \n+ @property\n+ def name(self):\n+ return self._name\n+\n+ @name.setter\n+ def name(self, name):\n+ self._name = name\n+ self.slug = slugify(name)\n+\n def as_dict(self):\n- return self.__dict__\n+ d = self.__dict__\n+ d['name'] = self.name\n+ return d\n \n def __hash__(self):\n- return hash(self.name)\n+ return hash(self.slug)\n \n def _key(self):\n- return self.name\n+ return self.slug\n \n def _normalize_key(self, key):\n- return six.text_type(key)\n+ return six.text_type(slugify(key))\n \n def __eq__(self, other):\n return self._key() == self._normalize_key(other)\n", "issue": "Conflicts rendering Category pages when category is not defined in consistent case\nI was testing a jinja macro that dealt with creating links for categories.\n\nI noted that if you define a category in one article as `Category: Something` and in another article as `Category: something` that these are treated as separate categories, however, when your category page is rendered, there is only the lowecase url, e.g. `category/something.html`. This will only associate with the articles with meta data defined as `Category: something` and not anywhere where it is defined with uppercase since there is no `category/Something.html`.\n\nI am not sure if making this case insensitive would break code. Certainly, it would be unclear when printing the category name which case to use. From an intelligent template process, you would set you case using CSS style attribute to be sure it was the way you want, and it could always render categories in lower case.\n\nOtherwise, it might just be sufficient to put this into the documentation. I always tend to capitalize by categories, but some people might not notice and wonder why some articles are missing. I have not yet tested this, but I would imagine the same issue exists for tags.\n\n", "before_files": [{"content": "import os\nimport functools\nimport logging\n\nimport six\n\nfrom pelican.utils import (slugify, python_2_unicode_compatible)\n\nlogger = logging.getLogger(__name__)\n\n\n@python_2_unicode_compatible\[email protected]_ordering\nclass URLWrapper(object):\n def __init__(self, name, settings):\n self.name = name\n self.slug = slugify(self.name)\n self.settings = settings\n\n def as_dict(self):\n return self.__dict__\n\n def __hash__(self):\n return hash(self.name)\n\n def _key(self):\n return self.name\n\n def _normalize_key(self, key):\n return six.text_type(key)\n\n def __eq__(self, other):\n return self._key() == self._normalize_key(other)\n\n def __ne__(self, other):\n return self._key() != self._normalize_key(other)\n\n def __lt__(self, other):\n return self._key() < self._normalize_key(other)\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<{} {}>'.format(type(self).__name__, str(self))\n\n def _from_settings(self, key, get_page_name=False):\n \"\"\"Returns URL information as defined in settings.\n\n When get_page_name=True returns URL without anything after {slug} e.g.\n if in settings: CATEGORY_URL=\"cat/{slug}.html\" this returns\n \"cat/{slug}\" Useful for pagination.\n\n \"\"\"\n setting = \"%s_%s\" % (self.__class__.__name__.upper(), key)\n value = self.settings[setting]\n if not isinstance(value, six.string_types):\n logger.warning('%s is set to %s' % (setting, value))\n return value\n else:\n if get_page_name:\n return os.path.splitext(value)[0].format(**self.as_dict())\n else:\n return value.format(**self.as_dict())\n\n page_name = property(functools.partial(_from_settings, key='URL',\n get_page_name=True))\n url = property(functools.partial(_from_settings, key='URL'))\n save_as = property(functools.partial(_from_settings, key='SAVE_AS'))\n\n\nclass Category(URLWrapper):\n pass\n\n\nclass Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n\n\nclass Author(URLWrapper):\n pass\n", "path": "pelican/urlwrappers.py"}]} | 1,479 | 321 |
gh_patches_debug_38335 | rasdani/github-patches | git_diff | ethereum__consensus-specs-863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename `Transactions` back to `Operations`
A few of us implementers have been talking about the naming of `Transactions` and believe it is best renamed back to `Operations` to lower confusion and potentially mistaking `Transactions` with transactions in the classical sense. The only thing that should be known as a `Transaction` is a `Transfer`.
If not, it would be great to know what the reason behind the rename was.
</issue>
<code>
[start of utils/phase0/state_transition.py]
1 from . import spec
2
3
4 from typing import ( # noqa: F401
5 Any,
6 Callable,
7 List,
8 NewType,
9 Tuple,
10 )
11
12 from .spec import (
13 BeaconState,
14 BeaconBlock,
15 )
16
17
18 def expected_deposit_count(state: BeaconState) -> int:
19 return min(
20 spec.MAX_DEPOSITS,
21 state.latest_eth1_data.deposit_count - state.deposit_index
22 )
23
24
25 def process_transaction_type(state: BeaconState,
26 transactions: List[Any],
27 max_transactions: int,
28 tx_fn: Callable[[BeaconState, Any], None]) -> None:
29 assert len(transactions) <= max_transactions
30 for transaction in transactions:
31 tx_fn(state, transaction)
32
33
34 def process_transactions(state: BeaconState, block: BeaconBlock) -> None:
35 process_transaction_type(
36 state,
37 block.body.proposer_slashings,
38 spec.MAX_PROPOSER_SLASHINGS,
39 spec.process_proposer_slashing,
40 )
41
42 process_transaction_type(
43 state,
44 block.body.attester_slashings,
45 spec.MAX_ATTESTER_SLASHINGS,
46 spec.process_attester_slashing,
47 )
48
49 process_transaction_type(
50 state,
51 block.body.attestations,
52 spec.MAX_ATTESTATIONS,
53 spec.process_attestation,
54 )
55
56 assert len(block.body.deposits) == expected_deposit_count(state)
57 process_transaction_type(
58 state,
59 block.body.deposits,
60 spec.MAX_DEPOSITS,
61 spec.process_deposit,
62 )
63
64 process_transaction_type(
65 state,
66 block.body.voluntary_exits,
67 spec.MAX_VOLUNTARY_EXITS,
68 spec.process_voluntary_exit,
69 )
70
71 assert len(block.body.transfers) == len(set(block.body.transfers))
72 process_transaction_type(
73 state,
74 block.body.transfers,
75 spec.MAX_TRANSFERS,
76 spec.process_transfer,
77 )
78
79
80 def process_block(state: BeaconState,
81 block: BeaconBlock,
82 verify_state_root: bool=False) -> None:
83 spec.process_block_header(state, block)
84 spec.process_randao(state, block)
85 spec.process_eth1_data(state, block)
86
87 process_transactions(state, block)
88 if verify_state_root:
89 spec.verify_block_state_root(state, block)
90
91
92 def process_epoch_transition(state: BeaconState) -> None:
93 spec.update_justification_and_finalization(state)
94 spec.process_crosslinks(state)
95 spec.maybe_reset_eth1_period(state)
96 spec.apply_rewards(state)
97 spec.process_ejections(state)
98 spec.update_registry(state)
99 spec.process_slashings(state)
100 spec.process_exit_queue(state)
101 spec.finish_epoch_update(state)
102
103
104 def state_transition(state: BeaconState,
105 block: BeaconBlock,
106 verify_state_root: bool=False) -> BeaconState:
107 while state.slot < block.slot:
108 spec.cache_state(state)
109 if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
110 process_epoch_transition(state)
111 spec.advance_slot(state)
112 if block.slot == state.slot:
113 process_block(state, block, verify_state_root)
114
[end of utils/phase0/state_transition.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py
--- a/utils/phase0/state_transition.py
+++ b/utils/phase0/state_transition.py
@@ -22,31 +22,31 @@
)
-def process_transaction_type(state: BeaconState,
- transactions: List[Any],
- max_transactions: int,
- tx_fn: Callable[[BeaconState, Any], None]) -> None:
- assert len(transactions) <= max_transactions
- for transaction in transactions:
- tx_fn(state, transaction)
+def process_operation_type(state: BeaconState,
+ operations: List[Any],
+ max_operations: int,
+ tx_fn: Callable[[BeaconState, Any], None]) -> None:
+ assert len(operations) <= max_operations
+ for operation in operations:
+ tx_fn(state, operation)
-def process_transactions(state: BeaconState, block: BeaconBlock) -> None:
- process_transaction_type(
+def process_operations(state: BeaconState, block: BeaconBlock) -> None:
+ process_operation_type(
state,
block.body.proposer_slashings,
spec.MAX_PROPOSER_SLASHINGS,
spec.process_proposer_slashing,
)
- process_transaction_type(
+ process_operation_type(
state,
block.body.attester_slashings,
spec.MAX_ATTESTER_SLASHINGS,
spec.process_attester_slashing,
)
- process_transaction_type(
+ process_operation_type(
state,
block.body.attestations,
spec.MAX_ATTESTATIONS,
@@ -54,14 +54,14 @@
)
assert len(block.body.deposits) == expected_deposit_count(state)
- process_transaction_type(
+ process_operation_type(
state,
block.body.deposits,
spec.MAX_DEPOSITS,
spec.process_deposit,
)
- process_transaction_type(
+ process_operation_type(
state,
block.body.voluntary_exits,
spec.MAX_VOLUNTARY_EXITS,
@@ -69,7 +69,7 @@
)
assert len(block.body.transfers) == len(set(block.body.transfers))
- process_transaction_type(
+ process_operation_type(
state,
block.body.transfers,
spec.MAX_TRANSFERS,
@@ -84,7 +84,7 @@
spec.process_randao(state, block)
spec.process_eth1_data(state, block)
- process_transactions(state, block)
+ process_operations(state, block)
if verify_state_root:
spec.verify_block_state_root(state, block)
| {"golden_diff": "diff --git a/utils/phase0/state_transition.py b/utils/phase0/state_transition.py\n--- a/utils/phase0/state_transition.py\n+++ b/utils/phase0/state_transition.py\n@@ -22,31 +22,31 @@\n )\n \n \n-def process_transaction_type(state: BeaconState,\n- transactions: List[Any],\n- max_transactions: int,\n- tx_fn: Callable[[BeaconState, Any], None]) -> None:\n- assert len(transactions) <= max_transactions\n- for transaction in transactions:\n- tx_fn(state, transaction)\n+def process_operation_type(state: BeaconState,\n+ operations: List[Any],\n+ max_operations: int,\n+ tx_fn: Callable[[BeaconState, Any], None]) -> None:\n+ assert len(operations) <= max_operations\n+ for operation in operations:\n+ tx_fn(state, operation)\n \n \n-def process_transactions(state: BeaconState, block: BeaconBlock) -> None:\n- process_transaction_type(\n+def process_operations(state: BeaconState, block: BeaconBlock) -> None:\n+ process_operation_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n \n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n \n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n@@ -54,14 +54,14 @@\n )\n \n assert len(block.body.deposits) == expected_deposit_count(state)\n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n \n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n@@ -69,7 +69,7 @@\n )\n \n assert len(block.body.transfers) == len(set(block.body.transfers))\n- process_transaction_type(\n+ process_operation_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n@@ -84,7 +84,7 @@\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n \n- process_transactions(state, block)\n+ process_operations(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n", "issue": "Rename `Transactions` back to `Operations`\nA few of us implementers have been talking about the naming of `Transactions` and believe it is best renamed back to `Operations` to lower confusion and potentially mistaking `Transactions` with transactions in the classical sense. The only thing that should be known as a `Transaction` is a `Transfer`.\r\n\r\nIf not, it would be great to know what the reason behind the rename was.\r\n\n", "before_files": [{"content": "from . import spec\n\n\nfrom typing import ( # noqa: F401\n Any,\n Callable,\n List,\n NewType,\n Tuple,\n)\n\nfrom .spec import (\n BeaconState,\n BeaconBlock,\n)\n\n\ndef expected_deposit_count(state: BeaconState) -> int:\n return min(\n spec.MAX_DEPOSITS,\n state.latest_eth1_data.deposit_count - state.deposit_index\n )\n\n\ndef process_transaction_type(state: BeaconState,\n transactions: List[Any],\n max_transactions: int,\n tx_fn: Callable[[BeaconState, Any], None]) -> None:\n assert len(transactions) <= max_transactions\n for transaction in transactions:\n tx_fn(state, transaction)\n\n\ndef process_transactions(state: BeaconState, block: BeaconBlock) -> None:\n process_transaction_type(\n state,\n block.body.proposer_slashings,\n spec.MAX_PROPOSER_SLASHINGS,\n spec.process_proposer_slashing,\n )\n\n process_transaction_type(\n state,\n block.body.attester_slashings,\n spec.MAX_ATTESTER_SLASHINGS,\n spec.process_attester_slashing,\n )\n\n process_transaction_type(\n state,\n block.body.attestations,\n spec.MAX_ATTESTATIONS,\n spec.process_attestation,\n )\n\n assert len(block.body.deposits) == expected_deposit_count(state)\n process_transaction_type(\n state,\n block.body.deposits,\n spec.MAX_DEPOSITS,\n spec.process_deposit,\n )\n\n process_transaction_type(\n state,\n block.body.voluntary_exits,\n spec.MAX_VOLUNTARY_EXITS,\n spec.process_voluntary_exit,\n )\n\n assert len(block.body.transfers) == len(set(block.body.transfers))\n process_transaction_type(\n state,\n block.body.transfers,\n spec.MAX_TRANSFERS,\n spec.process_transfer,\n )\n\n\ndef process_block(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> None:\n spec.process_block_header(state, block)\n spec.process_randao(state, block)\n spec.process_eth1_data(state, block)\n\n process_transactions(state, block)\n if verify_state_root:\n spec.verify_block_state_root(state, block)\n\n\ndef process_epoch_transition(state: BeaconState) -> None:\n spec.update_justification_and_finalization(state)\n spec.process_crosslinks(state)\n spec.maybe_reset_eth1_period(state)\n spec.apply_rewards(state)\n spec.process_ejections(state)\n spec.update_registry(state)\n spec.process_slashings(state)\n spec.process_exit_queue(state)\n spec.finish_epoch_update(state)\n\n\ndef state_transition(state: BeaconState,\n block: BeaconBlock,\n verify_state_root: bool=False) -> BeaconState:\n while state.slot < block.slot:\n spec.cache_state(state)\n if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:\n process_epoch_transition(state)\n spec.advance_slot(state)\n if block.slot == state.slot:\n process_block(state, block, verify_state_root)\n", "path": "utils/phase0/state_transition.py"}]} | 1,525 | 570 |
gh_patches_debug_30729 | rasdani/github-patches | git_diff | wearepal__EthicML-337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SVM Kernel name
Clearly [this](https://github.com/predictive-analytics-lab/EthicML/blob/f7fcf435b5807ef9931f3ff3b259fc7cc4b38da8/ethicml/algorithms/inprocess/svm.py#L20) is not right
</issue>
<code>
[start of ethicml/algorithms/inprocess/svm.py]
1 """Wrapper for SKLearn implementation of SVM."""
2 from typing import Optional, Union
3
4 import pandas as pd
5 from sklearn.svm import SVC, LinearSVC
6
7 from ethicml.common import implements
8 from ethicml.utility import DataTuple, Prediction, TestTuple
9
10 from .in_algorithm import InAlgorithm
11
12 __all__ = ["SVM"]
13
14
15 class SVM(InAlgorithm):
16 """Support Vector Machine."""
17
18 def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):
19 """Init SVM."""
20 kernel_name = f" (kernel)" if kernel is not None else ""
21 super().__init__(name="SVM" + kernel_name, is_fairness_algo=False)
22 self.C = SVC().C if C is None else C
23 self.kernel = SVC().kernel if kernel is None else kernel
24
25 @implements(InAlgorithm)
26 def run(self, train: DataTuple, test: Union[DataTuple, TestTuple]) -> Prediction:
27 clf = select_svm(self.C, self.kernel)
28 clf.fit(train.x, train.y.to_numpy().ravel())
29 return Prediction(hard=pd.Series(clf.predict(test.x)))
30
31
32 def select_svm(C: float, kernel: str) -> SVC:
33 """Select the appropriate SVM model for the given parameters."""
34 if kernel == "linear":
35 return LinearSVC(C=C, dual=False, tol=1e-12, random_state=888)
36 return SVC(C=C, kernel=kernel, gamma="auto", random_state=888)
37
[end of ethicml/algorithms/inprocess/svm.py]
[start of ethicml/algorithms/inprocess/logistic_regression.py]
1 """Wrapper around Sci-Kit Learn Logistic Regression."""
2 from typing import Optional
3
4 import pandas as pd
5 from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
6 from sklearn.model_selection import KFold
7
8 from ethicml.common import implements
9 from ethicml.utility import DataTuple, Prediction, SoftPrediction, TestTuple
10
11 from .in_algorithm import InAlgorithm
12
13 __all__ = ["LR", "LRCV", "LRProb"]
14
15
16 class LR(InAlgorithm):
17 """Logistic regression with hard predictions."""
18
19 def __init__(self, C: Optional[float] = None):
20 """Init LR."""
21 self.C = LogisticRegression().C if C is None else C
22 super().__init__(name=f"Logistic Regression, C={self.C}", is_fairness_algo=False)
23
24 @implements(InAlgorithm)
25 def run(self, train: DataTuple, test: TestTuple) -> Prediction:
26 clf = LogisticRegression(solver="liblinear", random_state=888, C=self.C, multi_class="auto")
27 clf.fit(train.x, train.y.to_numpy().ravel())
28 return Prediction(hard=pd.Series(clf.predict(test.x)))
29
30
31 class LRProb(InAlgorithm):
32 """Logistic regression with soft output."""
33
34 def __init__(self, C: Optional[int] = None):
35 """Init LRProb."""
36 self.C = LogisticRegression().C if C is None else C
37 super().__init__(name=f"Logistic Regression Prob, C={self.C}", is_fairness_algo=False)
38
39 @implements(InAlgorithm)
40 def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:
41 clf = LogisticRegression(solver="liblinear", random_state=888, C=self.C, multi_class="auto")
42 clf.fit(train.x, train.y.to_numpy().ravel())
43 return SoftPrediction(soft=pd.Series(clf.predict_proba(test.x)[:, 1]))
44
45
46 class LRCV(InAlgorithm):
47 """Kind of a cheap hack for now, but gives a proper cross-valudeted LR."""
48
49 def __init__(self) -> None:
50 """Init LRCV."""
51 super().__init__(name="LRCV", is_fairness_algo=False)
52
53 @implements(InAlgorithm)
54 def run(self, train: DataTuple, test: TestTuple) -> Prediction:
55 folder = KFold(n_splits=3, shuffle=False)
56 clf = LogisticRegressionCV(
57 cv=folder, n_jobs=-1, random_state=888, solver="liblinear", multi_class="auto"
58 )
59 clf.fit(train.x, train.y.to_numpy().ravel())
60 return Prediction(hard=pd.Series(clf.predict(test.x)), info=dict(C=clf.C_[0]))
61
[end of ethicml/algorithms/inprocess/logistic_regression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ethicml/algorithms/inprocess/logistic_regression.py b/ethicml/algorithms/inprocess/logistic_regression.py
--- a/ethicml/algorithms/inprocess/logistic_regression.py
+++ b/ethicml/algorithms/inprocess/logistic_regression.py
@@ -19,7 +19,7 @@
def __init__(self, C: Optional[float] = None):
"""Init LR."""
self.C = LogisticRegression().C if C is None else C
- super().__init__(name=f"Logistic Regression, C={self.C}", is_fairness_algo=False)
+ super().__init__(name=f"Logistic Regression (C={self.C})", is_fairness_algo=False)
@implements(InAlgorithm)
def run(self, train: DataTuple, test: TestTuple) -> Prediction:
@@ -34,7 +34,7 @@
def __init__(self, C: Optional[int] = None):
"""Init LRProb."""
self.C = LogisticRegression().C if C is None else C
- super().__init__(name=f"Logistic Regression Prob, C={self.C}", is_fairness_algo=False)
+ super().__init__(name=f"Logistic Regression Prob (C={self.C})", is_fairness_algo=False)
@implements(InAlgorithm)
def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:
diff --git a/ethicml/algorithms/inprocess/svm.py b/ethicml/algorithms/inprocess/svm.py
--- a/ethicml/algorithms/inprocess/svm.py
+++ b/ethicml/algorithms/inprocess/svm.py
@@ -17,7 +17,7 @@
def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):
"""Init SVM."""
- kernel_name = f" (kernel)" if kernel is not None else ""
+ kernel_name = f" ({kernel})" if kernel is not None else ""
super().__init__(name="SVM" + kernel_name, is_fairness_algo=False)
self.C = SVC().C if C is None else C
self.kernel = SVC().kernel if kernel is None else kernel
| {"golden_diff": "diff --git a/ethicml/algorithms/inprocess/logistic_regression.py b/ethicml/algorithms/inprocess/logistic_regression.py\n--- a/ethicml/algorithms/inprocess/logistic_regression.py\n+++ b/ethicml/algorithms/inprocess/logistic_regression.py\n@@ -19,7 +19,7 @@\n def __init__(self, C: Optional[float] = None):\n \"\"\"Init LR.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n- super().__init__(name=f\"Logistic Regression, C={self.C}\", is_fairness_algo=False)\n+ super().__init__(name=f\"Logistic Regression (C={self.C})\", is_fairness_algo=False)\n \n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n@@ -34,7 +34,7 @@\n def __init__(self, C: Optional[int] = None):\n \"\"\"Init LRProb.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n- super().__init__(name=f\"Logistic Regression Prob, C={self.C}\", is_fairness_algo=False)\n+ super().__init__(name=f\"Logistic Regression Prob (C={self.C})\", is_fairness_algo=False)\n \n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:\ndiff --git a/ethicml/algorithms/inprocess/svm.py b/ethicml/algorithms/inprocess/svm.py\n--- a/ethicml/algorithms/inprocess/svm.py\n+++ b/ethicml/algorithms/inprocess/svm.py\n@@ -17,7 +17,7 @@\n \n def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):\n \"\"\"Init SVM.\"\"\"\n- kernel_name = f\" (kernel)\" if kernel is not None else \"\"\n+ kernel_name = f\" ({kernel})\" if kernel is not None else \"\"\n super().__init__(name=\"SVM\" + kernel_name, is_fairness_algo=False)\n self.C = SVC().C if C is None else C\n self.kernel = SVC().kernel if kernel is None else kernel\n", "issue": "SVM Kernel name\nClearly [this](https://github.com/predictive-analytics-lab/EthicML/blob/f7fcf435b5807ef9931f3ff3b259fc7cc4b38da8/ethicml/algorithms/inprocess/svm.py#L20) is not right \n", "before_files": [{"content": "\"\"\"Wrapper for SKLearn implementation of SVM.\"\"\"\nfrom typing import Optional, Union\n\nimport pandas as pd\nfrom sklearn.svm import SVC, LinearSVC\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"SVM\"]\n\n\nclass SVM(InAlgorithm):\n \"\"\"Support Vector Machine.\"\"\"\n\n def __init__(self, C: Optional[float] = None, kernel: Optional[str] = None):\n \"\"\"Init SVM.\"\"\"\n kernel_name = f\" (kernel)\" if kernel is not None else \"\"\n super().__init__(name=\"SVM\" + kernel_name, is_fairness_algo=False)\n self.C = SVC().C if C is None else C\n self.kernel = SVC().kernel if kernel is None else kernel\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: Union[DataTuple, TestTuple]) -> Prediction:\n clf = select_svm(self.C, self.kernel)\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)))\n\n\ndef select_svm(C: float, kernel: str) -> SVC:\n \"\"\"Select the appropriate SVM model for the given parameters.\"\"\"\n if kernel == \"linear\":\n return LinearSVC(C=C, dual=False, tol=1e-12, random_state=888)\n return SVC(C=C, kernel=kernel, gamma=\"auto\", random_state=888)\n", "path": "ethicml/algorithms/inprocess/svm.py"}, {"content": "\"\"\"Wrapper around Sci-Kit Learn Logistic Regression.\"\"\"\nfrom typing import Optional\n\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.model_selection import KFold\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, SoftPrediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"LR\", \"LRCV\", \"LRProb\"]\n\n\nclass LR(InAlgorithm):\n \"\"\"Logistic regression with hard predictions.\"\"\"\n\n def __init__(self, C: Optional[float] = None):\n \"\"\"Init LR.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n super().__init__(name=f\"Logistic Regression, C={self.C}\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n clf = LogisticRegression(solver=\"liblinear\", random_state=888, C=self.C, multi_class=\"auto\")\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)))\n\n\nclass LRProb(InAlgorithm):\n \"\"\"Logistic regression with soft output.\"\"\"\n\n def __init__(self, C: Optional[int] = None):\n \"\"\"Init LRProb.\"\"\"\n self.C = LogisticRegression().C if C is None else C\n super().__init__(name=f\"Logistic Regression Prob, C={self.C}\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> SoftPrediction:\n clf = LogisticRegression(solver=\"liblinear\", random_state=888, C=self.C, multi_class=\"auto\")\n clf.fit(train.x, train.y.to_numpy().ravel())\n return SoftPrediction(soft=pd.Series(clf.predict_proba(test.x)[:, 1]))\n\n\nclass LRCV(InAlgorithm):\n \"\"\"Kind of a cheap hack for now, but gives a proper cross-valudeted LR.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Init LRCV.\"\"\"\n super().__init__(name=\"LRCV\", is_fairness_algo=False)\n\n @implements(InAlgorithm)\n def run(self, train: DataTuple, test: TestTuple) -> Prediction:\n folder = KFold(n_splits=3, shuffle=False)\n clf = LogisticRegressionCV(\n cv=folder, n_jobs=-1, random_state=888, solver=\"liblinear\", multi_class=\"auto\"\n )\n clf.fit(train.x, train.y.to_numpy().ravel())\n return Prediction(hard=pd.Series(clf.predict(test.x)), info=dict(C=clf.C_[0]))\n", "path": "ethicml/algorithms/inprocess/logistic_regression.py"}]} | 1,743 | 490 |
gh_patches_debug_35133 | rasdani/github-patches | git_diff | cowrie__cowrie-1472 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MalShare uploader not working
**Describe the bug**
In my config I have
```
[output_malshare]
enabled = true
```
and in my logs I have
```
[stdout#info] Sending file to MalShare
[stdout#info] Submited to MalShare
```
but when I check on MalShare I can't find any the binaries that have been caught in my honeypot.
**To Reproduce**
Steps to reproduce the behavior:
1. Enable MalShare submission in your config
2. Wait for a bot to drop a binary in your honeypot
3. Try to find the binary on malshare (search by md5)
4. Observe that the binary is not there
**Expected behavior**
The binary should be uploaded successfully to MalShare
**Server (please complete the following information):**
- OS: [e.g. RedHat Linux 7.1, output of uname -a] Ubuntu 20.04, Linux 5.4.0
- Python: 3.8.5
**Additional context**
Based on [MalShare's API docs](https://malshare.com/doc.php) it seems that uploading files now requires an API key and a slightly different POST path than the one [defined in cowrie](https://github.com/cowrie/cowrie/blob/b848ec261554ee9128640601eb9a6734b2bffefe/src/cowrie/output/malshare.py#L90). Probably adding an API key option to the config and updating the uploader with the new path and to use the API key will solve this.
</issue>
<code>
[start of src/cowrie/output/malshare.py]
1 # Copyright (c) 2015 Michel Oosterhof <[email protected]>
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions
6 # are met:
7 #
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
13 # 3. The names of the author(s) may not be used to endorse or promote
14 # products derived from this software without specific prior written
15 # permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS`` AND ANY EXPRESS OR
18 # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 # SUCH DAMAGE.
28
29 """
30 Send files to https://malshare.com/
31 More info https://malshare.com/doc.php
32 """
33
34 from __future__ import absolute_import, division
35
36 import os
37
38 try:
39 from urllib.parse import urlparse
40 except ImportError:
41 from urlparse import urlparse
42 import requests
43
44 import cowrie.core.output
45
46
47 class Output(cowrie.core.output.Output):
48 """
49 malshare output
50
51 TODO: use `treq`
52 """
53 def start(self):
54 """
55 Start output plugin
56 """
57 pass
58
59 def stop(self):
60 """
61 Stop output plugin
62 """
63 pass
64
65 def write(self, entry):
66 if entry["eventid"] == "cowrie.session.file_download":
67 print("Sending file to MalShare")
68 p = urlparse(entry["url"]).path
69 if p == "":
70 fileName = entry["shasum"]
71 else:
72 b = os.path.basename(p)
73 if b == "":
74 fileName = entry["shasum"]
75 else:
76 fileName = b
77
78 self.postfile(entry["outfile"], fileName)
79
80 elif entry["eventid"] == "cowrie.session.file_upload":
81 print("Sending file to MalShare")
82 self.postfile(entry["outfile"], entry["filename"])
83
84 def postfile(self, artifact, fileName):
85 """
86 Send a file to MalShare
87 """
88 try:
89 res = requests.post(
90 "https://malshare.com/api.php?mode=cli",
91 files={fileName: open(artifact, "rb")}
92 )
93 if res and res.ok:
94 print("Submited to MalShare")
95 else:
96 print("MalShare Request failed: {}".format(res.status_code))
97 except Exception as e:
98 print("MalShare Request failed: {}".format(e))
99
[end of src/cowrie/output/malshare.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cowrie/output/malshare.py b/src/cowrie/output/malshare.py
--- a/src/cowrie/output/malshare.py
+++ b/src/cowrie/output/malshare.py
@@ -41,7 +41,10 @@
from urlparse import urlparse
import requests
+from twisted.python import log
+
import cowrie.core.output
+from cowrie.core.config import CowrieConfig
class Output(cowrie.core.output.Output):
@@ -54,7 +57,7 @@
"""
Start output plugin
"""
- pass
+ self.apiKey = CowrieConfig().get('output_malshare', 'api_key')
def stop(self):
"""
@@ -64,7 +67,6 @@
def write(self, entry):
if entry["eventid"] == "cowrie.session.file_download":
- print("Sending file to MalShare")
p = urlparse(entry["url"]).path
if p == "":
fileName = entry["shasum"]
@@ -78,7 +80,6 @@
self.postfile(entry["outfile"], fileName)
elif entry["eventid"] == "cowrie.session.file_upload":
- print("Sending file to MalShare")
self.postfile(entry["outfile"], entry["filename"])
def postfile(self, artifact, fileName):
@@ -87,12 +88,12 @@
"""
try:
res = requests.post(
- "https://malshare.com/api.php?mode=cli",
- files={fileName: open(artifact, "rb")}
+ "https://malshare.com/api.php?api_key="+self.apiKey+"&action=upload",
+ files={"upload": open(artifact, "rb")}
)
if res and res.ok:
- print("Submited to MalShare")
+ log.msg("Submitted to MalShare")
else:
- print("MalShare Request failed: {}".format(res.status_code))
+ log.msg("MalShare Request failed: {}".format(res.status_code))
except Exception as e:
- print("MalShare Request failed: {}".format(e))
+ log.msg("MalShare Request failed: {}".format(e))
| {"golden_diff": "diff --git a/src/cowrie/output/malshare.py b/src/cowrie/output/malshare.py\n--- a/src/cowrie/output/malshare.py\n+++ b/src/cowrie/output/malshare.py\n@@ -41,7 +41,10 @@\n from urlparse import urlparse\n import requests\n \n+from twisted.python import log\n+\n import cowrie.core.output\n+from cowrie.core.config import CowrieConfig\n \n \n class Output(cowrie.core.output.Output):\n@@ -54,7 +57,7 @@\n \"\"\"\n Start output plugin\n \"\"\"\n- pass\n+ self.apiKey = CowrieConfig().get('output_malshare', 'api_key')\n \n def stop(self):\n \"\"\"\n@@ -64,7 +67,6 @@\n \n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.file_download\":\n- print(\"Sending file to MalShare\")\n p = urlparse(entry[\"url\"]).path\n if p == \"\":\n fileName = entry[\"shasum\"]\n@@ -78,7 +80,6 @@\n self.postfile(entry[\"outfile\"], fileName)\n \n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n- print(\"Sending file to MalShare\")\n self.postfile(entry[\"outfile\"], entry[\"filename\"])\n \n def postfile(self, artifact, fileName):\n@@ -87,12 +88,12 @@\n \"\"\"\n try:\n res = requests.post(\n- \"https://malshare.com/api.php?mode=cli\",\n- files={fileName: open(artifact, \"rb\")}\n+ \"https://malshare.com/api.php?api_key=\"+self.apiKey+\"&action=upload\",\n+ files={\"upload\": open(artifact, \"rb\")}\n )\n if res and res.ok:\n- print(\"Submited to MalShare\")\n+ log.msg(\"Submitted to MalShare\")\n else:\n- print(\"MalShare Request failed: {}\".format(res.status_code))\n+ log.msg(\"MalShare Request failed: {}\".format(res.status_code))\n except Exception as e:\n- print(\"MalShare Request failed: {}\".format(e))\n+ log.msg(\"MalShare Request failed: {}\".format(e))\n", "issue": "MalShare uploader not working\n**Describe the bug**\r\nIn my config I have\r\n```\r\n[output_malshare]\r\nenabled = true\r\n```\r\n\r\nand in my logs I have\r\n```\r\n[stdout#info] Sending file to MalShare\r\n[stdout#info] Submited to MalShare\r\n```\r\n\r\nbut when I check on MalShare I can't find any the binaries that have been caught in my honeypot.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Enable MalShare submission in your config\r\n2. Wait for a bot to drop a binary in your honeypot\r\n3. Try to find the binary on malshare (search by md5)\r\n4. Observe that the binary is not there\r\n\r\n**Expected behavior**\r\nThe binary should be uploaded successfully to MalShare\r\n\r\n**Server (please complete the following information):**\r\n - OS: [e.g. RedHat Linux 7.1, output of uname -a] Ubuntu 20.04, Linux 5.4.0\r\n - Python: 3.8.5\r\n\r\n**Additional context**\r\nBased on [MalShare's API docs](https://malshare.com/doc.php) it seems that uploading files now requires an API key and a slightly different POST path than the one [defined in cowrie](https://github.com/cowrie/cowrie/blob/b848ec261554ee9128640601eb9a6734b2bffefe/src/cowrie/output/malshare.py#L90). Probably adding an API key option to the config and updating the uploader with the new path and to use the API key will solve this.\r\n\n", "before_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS`` AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\n\"\"\"\nSend files to https://malshare.com/\nMore info https://malshare.com/doc.php\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport os\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\nimport requests\n\nimport cowrie.core.output\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n malshare output\n\n TODO: use `treq`\n \"\"\"\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n pass\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry[\"eventid\"] == \"cowrie.session.file_download\":\n print(\"Sending file to MalShare\")\n p = urlparse(entry[\"url\"]).path\n if p == \"\":\n fileName = entry[\"shasum\"]\n else:\n b = os.path.basename(p)\n if b == \"\":\n fileName = entry[\"shasum\"]\n else:\n fileName = b\n\n self.postfile(entry[\"outfile\"], fileName)\n\n elif entry[\"eventid\"] == \"cowrie.session.file_upload\":\n print(\"Sending file to MalShare\")\n self.postfile(entry[\"outfile\"], entry[\"filename\"])\n\n def postfile(self, artifact, fileName):\n \"\"\"\n Send a file to MalShare\n \"\"\"\n try:\n res = requests.post(\n \"https://malshare.com/api.php?mode=cli\",\n files={fileName: open(artifact, \"rb\")}\n )\n if res and res.ok:\n print(\"Submited to MalShare\")\n else:\n print(\"MalShare Request failed: {}\".format(res.status_code))\n except Exception as e:\n print(\"MalShare Request failed: {}\".format(e))\n", "path": "src/cowrie/output/malshare.py"}]} | 1,785 | 485 |
gh_patches_debug_5923 | rasdani/github-patches | git_diff | dotkom__onlineweb4-488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Featured careeropprotunities are not featured
The featured opportunities are not prioritized over other opportunities.
</issue>
<code>
[start of apps/careeropportunity/views.py]
1 #-*- coding: utf-8 -*-
2 from django.utils import timezone
3
4 from datetime import datetime
5
6 from django.conf import settings
7 from django.shortcuts import render_to_response
8 from django.shortcuts import get_object_or_404
9 from django.template import RequestContext
10
11 from apps.careeropportunity.models import CareerOpportunity
12
13
14 def index(request):
15 opportunities = CareerOpportunity.objects.filter(
16 start__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')
17
18 return render_to_response('careeropportunity/index.html', \
19 {'opportunities': opportunities}, \
20 context_instance=RequestContext(request))
21
22
23 def details(request, opportunity_id):
24 opportunity = get_object_or_404(CareerOpportunity, pk=opportunity_id)
25
26 return render_to_response('careeropportunity/details.html', \
27 {'opportunity': opportunity}, \
28 context_instance=RequestContext(request))
29
[end of apps/careeropportunity/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/careeropportunity/views.py b/apps/careeropportunity/views.py
--- a/apps/careeropportunity/views.py
+++ b/apps/careeropportunity/views.py
@@ -13,7 +13,7 @@
def index(request):
opportunities = CareerOpportunity.objects.filter(
- start__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')
+ start__lte=timezone.now(), end__gte=timezone.now()).order_by('-featured', '-start')
return render_to_response('careeropportunity/index.html', \
{'opportunities': opportunities}, \
| {"golden_diff": "diff --git a/apps/careeropportunity/views.py b/apps/careeropportunity/views.py\n--- a/apps/careeropportunity/views.py\n+++ b/apps/careeropportunity/views.py\n@@ -13,7 +13,7 @@\n \n def index(request):\n opportunities = CareerOpportunity.objects.filter(\n- \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')\n+ \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('-featured', '-start')\n \n return render_to_response('careeropportunity/index.html', \\\n {'opportunities': opportunities}, \\\n", "issue": "Featured careeropprotunities are not featured\nThe featured opportunities are not prioritized over other opportunities. \n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\nfrom django.utils import timezone\n\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import get_object_or_404\nfrom django.template import RequestContext\n\nfrom apps.careeropportunity.models import CareerOpportunity\n\n\ndef index(request):\n opportunities = CareerOpportunity.objects.filter(\n \tstart__lte=timezone.now(), end__gte=timezone.now()).order_by('featured', '-start')\n \n return render_to_response('careeropportunity/index.html', \\\n {'opportunities': opportunities}, \\\n context_instance=RequestContext(request))\n\n\ndef details(request, opportunity_id):\n opportunity = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n\n return render_to_response('careeropportunity/details.html', \\\n {'opportunity': opportunity}, \\\n context_instance=RequestContext(request))\n", "path": "apps/careeropportunity/views.py"}]} | 808 | 141 |
gh_patches_debug_17300 | rasdani/github-patches | git_diff | techmatters__terraso-backend-889 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime
## Description
When running `make test`, many warnings of this form are observed:
```
/home/terraso/.local/lib/python3.11/site-packages/django/db/models/fields/__init__.py:1595: RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime (2023-07-11 22:39:48.700825) while time zone support is active.
warnings.warn(
```
</issue>
<code>
[start of terraso_backend/apps/audit_logs/services.py]
1 import typing
2 from datetime import datetime
3 from enum import Enum
4
5 from django.contrib.contenttypes.models import ContentType
6 from django.core.paginator import Paginator
7 from django.db import transaction
8 from django.db.models.query import QuerySet
9
10 from apps.core.models import User
11
12 from . import api, models
13
14 TEMPLATE = "{client_time} - {user} {action} {resource}"
15
16
17 class _AuditLogService:
18 """
19 AuditLogService implements the AuditLog protocol
20 """
21
22 def log(
23 self,
24 user: User,
25 action: api.ACTIONS,
26 resource: object,
27 metadata: typing.Optional[dict[str, any]] = None,
28 client_time: typing.Optional[datetime] = None,
29 ) -> None:
30 """
31 log logs an action performed by a user on a resource
32 example:
33 log(user, "create", resource, client_time=1234567890)
34 :param client_time:
35 :param metadata:
36 :param action:
37 :param user:
38 :type resource: object
39
40 """
41 if not hasattr(user, "id"):
42 raise ValueError("Invalid user")
43
44 get_user_readable = getattr(user, "human_readable", None)
45 user_readable = get_user_readable() if callable(get_user_readable) else user.full_name()
46
47 if not isinstance(action, Enum) or not hasattr(models.Events, action.value):
48 raise ValueError("Invalid action")
49
50 resource_id = resource.id if hasattr(resource, "id") else None
51 if resource_id is None:
52 raise ValueError("Invalid resource")
53
54 get_resource_human_readable = getattr(resource, "human_readable", None)
55 if callable(get_resource_human_readable):
56 resource_human_readable = get_resource_human_readable()
57 else:
58 resource_human_readable = resource_id
59
60 content_type = ContentType.objects.get_for_model(resource)
61 resource_obj = resource
62
63 resource_repr = resource.__dict__.__str__()
64
65 if metadata is None:
66 metadata = {}
67
68 with transaction.atomic():
69 log = models.Log(
70 user=user,
71 event=action.value,
72 resource_id=resource_id,
73 resource_content_type=content_type,
74 resource_object=resource_obj,
75 resource_json_repr=resource_repr,
76 resource_human_readable=str(resource_human_readable),
77 user_human_readable=str(user_readable),
78 )
79
80 if client_time is None:
81 client_time = datetime.now()
82 log.client_timestamp = client_time
83
84 log.metadata = metadata
85 log.save()
86
87
88 class LogData:
89 """
90 LazyPaginator implements the Paginator protocol
91 """
92
93 def __init__(self, data: QuerySet):
94 self.data = data
95
96 def get_paginator(self, page_size: int = 10):
97 return Paginator(self.data, page_size)
98
99 def __len__(self):
100 return len(self.data)
101
102 def __iter__(self):
103 return iter(self.data)
104
105
106 def new_audit_logger() -> api.AuditLog:
107 """
108 new_audit_logger creates a new audit log
109 """
110 return _AuditLogService()
111
[end of terraso_backend/apps/audit_logs/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/terraso_backend/apps/audit_logs/services.py b/terraso_backend/apps/audit_logs/services.py
--- a/terraso_backend/apps/audit_logs/services.py
+++ b/terraso_backend/apps/audit_logs/services.py
@@ -2,6 +2,7 @@
from datetime import datetime
from enum import Enum
+from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator
from django.db import transaction
@@ -79,7 +80,12 @@
if client_time is None:
client_time = datetime.now()
- log.client_timestamp = client_time
+ if settings.USE_TZ:
+ from django.utils.timezone import make_aware
+
+ log.client_timestamp = make_aware(client_time)
+ else:
+ log.client_timestamp = client_time
log.metadata = metadata
log.save()
| {"golden_diff": "diff --git a/terraso_backend/apps/audit_logs/services.py b/terraso_backend/apps/audit_logs/services.py\n--- a/terraso_backend/apps/audit_logs/services.py\n+++ b/terraso_backend/apps/audit_logs/services.py\n@@ -2,6 +2,7 @@\n from datetime import datetime\n from enum import Enum\n \n+from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.core.paginator import Paginator\n from django.db import transaction\n@@ -79,7 +80,12 @@\n \n if client_time is None:\n client_time = datetime.now()\n- log.client_timestamp = client_time\n+ if settings.USE_TZ:\n+ from django.utils.timezone import make_aware\n+\n+ log.client_timestamp = make_aware(client_time)\n+ else:\n+ log.client_timestamp = client_time\n \n log.metadata = metadata\n log.save()\n", "issue": "RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime\n## Description\r\nWhen running `make test`, many warnings of this form are observed:\r\n```\r\n /home/terraso/.local/lib/python3.11/site-packages/django/db/models/fields/__init__.py:1595: RuntimeWarning: DateTimeField Log.client_timestamp received a naive datetime (2023-07-11 22:39:48.700825) while time zone support is active.\r\n warnings.warn(\r\n```\n", "before_files": [{"content": "import typing\nfrom datetime import datetime\nfrom enum import Enum\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.paginator import Paginator\nfrom django.db import transaction\nfrom django.db.models.query import QuerySet\n\nfrom apps.core.models import User\n\nfrom . import api, models\n\nTEMPLATE = \"{client_time} - {user} {action} {resource}\"\n\n\nclass _AuditLogService:\n \"\"\"\n AuditLogService implements the AuditLog protocol\n \"\"\"\n\n def log(\n self,\n user: User,\n action: api.ACTIONS,\n resource: object,\n metadata: typing.Optional[dict[str, any]] = None,\n client_time: typing.Optional[datetime] = None,\n ) -> None:\n \"\"\"\n log logs an action performed by a user on a resource\n example:\n log(user, \"create\", resource, client_time=1234567890)\n :param client_time:\n :param metadata:\n :param action:\n :param user:\n :type resource: object\n\n \"\"\"\n if not hasattr(user, \"id\"):\n raise ValueError(\"Invalid user\")\n\n get_user_readable = getattr(user, \"human_readable\", None)\n user_readable = get_user_readable() if callable(get_user_readable) else user.full_name()\n\n if not isinstance(action, Enum) or not hasattr(models.Events, action.value):\n raise ValueError(\"Invalid action\")\n\n resource_id = resource.id if hasattr(resource, \"id\") else None\n if resource_id is None:\n raise ValueError(\"Invalid resource\")\n\n get_resource_human_readable = getattr(resource, \"human_readable\", None)\n if callable(get_resource_human_readable):\n resource_human_readable = get_resource_human_readable()\n else:\n resource_human_readable = resource_id\n\n content_type = ContentType.objects.get_for_model(resource)\n resource_obj = resource\n\n resource_repr = resource.__dict__.__str__()\n\n if metadata is None:\n metadata = {}\n\n with transaction.atomic():\n log = models.Log(\n user=user,\n event=action.value,\n resource_id=resource_id,\n resource_content_type=content_type,\n resource_object=resource_obj,\n resource_json_repr=resource_repr,\n resource_human_readable=str(resource_human_readable),\n user_human_readable=str(user_readable),\n )\n\n if client_time is None:\n client_time = datetime.now()\n log.client_timestamp = client_time\n\n log.metadata = metadata\n log.save()\n\n\nclass LogData:\n \"\"\"\n LazyPaginator implements the Paginator protocol\n \"\"\"\n\n def __init__(self, data: QuerySet):\n self.data = data\n\n def get_paginator(self, page_size: int = 10):\n return Paginator(self.data, page_size)\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return iter(self.data)\n\n\ndef new_audit_logger() -> api.AuditLog:\n \"\"\"\n new_audit_logger creates a new audit log\n \"\"\"\n return _AuditLogService()\n", "path": "terraso_backend/apps/audit_logs/services.py"}]} | 1,536 | 199 |
gh_patches_debug_10615 | rasdani/github-patches | git_diff | pandas-dev__pandas-14007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DEPR: deprecate SparseList
</issue>
<code>
[start of pandas/sparse/list.py]
1 import numpy as np
2 from pandas.core.base import PandasObject
3 from pandas.formats.printing import pprint_thing
4
5 from pandas.types.common import is_scalar
6 from pandas.sparse.array import SparseArray
7 import pandas._sparse as splib
8
9
10 class SparseList(PandasObject):
11
12 """
13 Data structure for accumulating data to be converted into a
14 SparseArray. Has similar API to the standard Python list
15
16 Parameters
17 ----------
18 data : scalar or array-like
19 fill_value : scalar, default NaN
20 """
21
22 def __init__(self, data=None, fill_value=np.nan):
23 self.fill_value = fill_value
24 self._chunks = []
25
26 if data is not None:
27 self.append(data)
28
29 def __unicode__(self):
30 contents = '\n'.join(repr(c) for c in self._chunks)
31 return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
32
33 def __len__(self):
34 return sum(len(c) for c in self._chunks)
35
36 def __getitem__(self, i):
37 if i < 0:
38 if i + len(self) < 0: # pragma: no cover
39 raise ValueError('%d out of range' % i)
40 i += len(self)
41
42 passed = 0
43 j = 0
44 while i >= passed + len(self._chunks[j]):
45 passed += len(self._chunks[j])
46 j += 1
47 return self._chunks[j][i - passed]
48
49 def __setitem__(self, i, value):
50 raise NotImplementedError
51
52 @property
53 def nchunks(self):
54 return len(self._chunks)
55
56 @property
57 def is_consolidated(self):
58 return self.nchunks == 1
59
60 def consolidate(self, inplace=True):
61 """
62 Internally consolidate chunks of data
63
64 Parameters
65 ----------
66 inplace : boolean, default True
67 Modify the calling object instead of constructing a new one
68
69 Returns
70 -------
71 splist : SparseList
72 If inplace=False, new object, otherwise reference to existing
73 object
74 """
75 if not inplace:
76 result = self.copy()
77 else:
78 result = self
79
80 if result.is_consolidated:
81 return result
82
83 result._consolidate_inplace()
84 return result
85
86 def _consolidate_inplace(self):
87 new_values = np.concatenate([c.sp_values for c in self._chunks])
88 new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
89 new_arr = SparseArray(new_values, sparse_index=new_index,
90 fill_value=self.fill_value)
91 self._chunks = [new_arr]
92
93 def copy(self):
94 """
95 Return copy of the list
96
97 Returns
98 -------
99 new_list : SparseList
100 """
101 new_splist = SparseList(fill_value=self.fill_value)
102 new_splist._chunks = list(self._chunks)
103 return new_splist
104
105 def to_array(self):
106 """
107 Return SparseArray from data stored in the SparseList
108
109 Returns
110 -------
111 sparr : SparseArray
112 """
113 self.consolidate(inplace=True)
114 return self._chunks[0]
115
116 def append(self, value):
117 """
118 Append element or array-like chunk of data to the SparseList
119
120 Parameters
121 ----------
122 value: scalar or array-like
123 """
124 if is_scalar(value):
125 value = [value]
126
127 sparr = SparseArray(value, fill_value=self.fill_value)
128 self._chunks.append(sparr)
129 self._consolidated = False
130
131
132 def _concat_sparse_indexes(indexes):
133 all_indices = []
134 total_length = 0
135
136 for index in indexes:
137 # increment by offset
138 inds = index.to_int_index().indices + total_length
139
140 all_indices.append(inds)
141 total_length += index.length
142
143 return splib.IntIndex(total_length, np.concatenate(all_indices))
144
[end of pandas/sparse/list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py
--- a/pandas/sparse/list.py
+++ b/pandas/sparse/list.py
@@ -1,3 +1,4 @@
+import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.formats.printing import pprint_thing
@@ -20,6 +21,11 @@
"""
def __init__(self, data=None, fill_value=np.nan):
+
+ # see gh-13784
+ warnings.warn("SparseList is deprecated and will be removed "
+ "in a future version", FutureWarning, stacklevel=2)
+
self.fill_value = fill_value
self._chunks = []
| {"golden_diff": "diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py\n--- a/pandas/sparse/list.py\n+++ b/pandas/sparse/list.py\n@@ -1,3 +1,4 @@\n+import warnings\n import numpy as np\n from pandas.core.base import PandasObject\n from pandas.formats.printing import pprint_thing\n@@ -20,6 +21,11 @@\n \"\"\"\n \n def __init__(self, data=None, fill_value=np.nan):\n+\n+ # see gh-13784\n+ warnings.warn(\"SparseList is deprecated and will be removed \"\n+ \"in a future version\", FutureWarning, stacklevel=2)\n+\n self.fill_value = fill_value\n self._chunks = []\n", "issue": "DEPR: deprecate SparseList\n\n", "before_files": [{"content": "import numpy as np\nfrom pandas.core.base import PandasObject\nfrom pandas.formats.printing import pprint_thing\n\nfrom pandas.types.common import is_scalar\nfrom pandas.sparse.array import SparseArray\nimport pandas._sparse as splib\n\n\nclass SparseList(PandasObject):\n\n \"\"\"\n Data structure for accumulating data to be converted into a\n SparseArray. Has similar API to the standard Python list\n\n Parameters\n ----------\n data : scalar or array-like\n fill_value : scalar, default NaN\n \"\"\"\n\n def __init__(self, data=None, fill_value=np.nan):\n self.fill_value = fill_value\n self._chunks = []\n\n if data is not None:\n self.append(data)\n\n def __unicode__(self):\n contents = '\\n'.join(repr(c) for c in self._chunks)\n return '%s\\n%s' % (object.__repr__(self), pprint_thing(contents))\n\n def __len__(self):\n return sum(len(c) for c in self._chunks)\n\n def __getitem__(self, i):\n if i < 0:\n if i + len(self) < 0: # pragma: no cover\n raise ValueError('%d out of range' % i)\n i += len(self)\n\n passed = 0\n j = 0\n while i >= passed + len(self._chunks[j]):\n passed += len(self._chunks[j])\n j += 1\n return self._chunks[j][i - passed]\n\n def __setitem__(self, i, value):\n raise NotImplementedError\n\n @property\n def nchunks(self):\n return len(self._chunks)\n\n @property\n def is_consolidated(self):\n return self.nchunks == 1\n\n def consolidate(self, inplace=True):\n \"\"\"\n Internally consolidate chunks of data\n\n Parameters\n ----------\n inplace : boolean, default True\n Modify the calling object instead of constructing a new one\n\n Returns\n -------\n splist : SparseList\n If inplace=False, new object, otherwise reference to existing\n object\n \"\"\"\n if not inplace:\n result = self.copy()\n else:\n result = self\n\n if result.is_consolidated:\n return result\n\n result._consolidate_inplace()\n return result\n\n def _consolidate_inplace(self):\n new_values = np.concatenate([c.sp_values for c in self._chunks])\n new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])\n new_arr = SparseArray(new_values, sparse_index=new_index,\n fill_value=self.fill_value)\n self._chunks = [new_arr]\n\n def copy(self):\n \"\"\"\n Return copy of the list\n\n Returns\n -------\n new_list : SparseList\n \"\"\"\n new_splist = SparseList(fill_value=self.fill_value)\n new_splist._chunks = list(self._chunks)\n return new_splist\n\n def to_array(self):\n \"\"\"\n Return SparseArray from data stored in the SparseList\n\n Returns\n -------\n sparr : SparseArray\n \"\"\"\n self.consolidate(inplace=True)\n return self._chunks[0]\n\n def append(self, value):\n \"\"\"\n Append element or array-like chunk of data to the SparseList\n\n Parameters\n ----------\n value: scalar or array-like\n \"\"\"\n if is_scalar(value):\n value = [value]\n\n sparr = SparseArray(value, fill_value=self.fill_value)\n self._chunks.append(sparr)\n self._consolidated = False\n\n\ndef _concat_sparse_indexes(indexes):\n all_indices = []\n total_length = 0\n\n for index in indexes:\n # increment by offset\n inds = index.to_int_index().indices + total_length\n\n all_indices.append(inds)\n total_length += index.length\n\n return splib.IntIndex(total_length, np.concatenate(all_indices))\n", "path": "pandas/sparse/list.py"}]} | 1,726 | 165 |
gh_patches_debug_17785 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1717 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up test noise (that includes EPP and migration scripts)
### Issue description
Right now if you run the test suite locally or see the output from github, there is a lot of added prints and logs that make it hard to troubleshoot where your particular error is coming from. This ticket is clean up test noise in general including EPP and migration scripts.
### Acceptance criteria
- [ ] unnecessary prints/logs on tests are removed
### Additional context
_No response_
### Links to other issues
_No response_
</issue>
<code>
[start of src/epplibwrapper/utility/pool.py]
1 import logging
2 from typing import List
3 import gevent
4 from geventconnpool import ConnectionPool
5 from epplibwrapper.socket import Socket
6 from epplibwrapper.utility.pool_error import PoolError, PoolErrorCodes
7
8 try:
9 from epplib.commands import Hello
10 from epplib.exceptions import TransportError
11 except ImportError:
12 pass
13
14 from gevent.lock import BoundedSemaphore
15 from collections import deque
16
17 logger = logging.getLogger(__name__)
18
19
20 class EPPConnectionPool(ConnectionPool):
21 """A connection pool for EPPLib.
22
23 Args:
24 client (Client): The client
25 login (commands.Login): Login creds
26 options (dict): Options for the ConnectionPool
27 base class
28 """
29
30 def __init__(self, client, login, options: dict):
31 # For storing shared credentials
32 self._client = client
33 self._login = login
34
35 # Keep track of each greenlet
36 self.greenlets: List[gevent.Greenlet] = []
37
38 # Define optional pool settings.
39 # Kept in a dict so that the parent class,
40 # client.py, can maintain seperation/expandability
41 self.size = 1
42 if "size" in options:
43 self.size = options["size"]
44
45 self.exc_classes = tuple((TransportError,))
46 if "exc_classes" in options:
47 self.exc_classes = options["exc_classes"]
48
49 self.keepalive = None
50 if "keepalive" in options:
51 self.keepalive = options["keepalive"]
52
53 # Determines the period in which new
54 # gevent threads are spun up.
55 # This time period is in seconds. So for instance, .1 would be .1 seconds.
56 self.spawn_frequency = 0.1
57 if "spawn_frequency" in options:
58 self.spawn_frequency = options["spawn_frequency"]
59
60 self.conn: deque = deque()
61 self.lock = BoundedSemaphore(self.size)
62
63 self.populate_all_connections()
64
65 def _new_connection(self):
66 socket = self._create_socket(self._client, self._login)
67 try:
68 connection = socket.connect()
69 return connection
70 except Exception as err:
71 message = f"Failed to execute due to a registry error: {err}"
72 logger.error(message, exc_info=True)
73 # We want to raise a pool error rather than a LoginError here
74 # because if this occurs internally, we should handle this
75 # differently than we otherwise would for LoginError.
76 raise PoolError(code=PoolErrorCodes.NEW_CONNECTION_FAILED) from err
77
78 def _keepalive(self, c):
79 """Sends a command to the server to keep the connection alive."""
80 try:
81 # Sends a ping to the registry via EPPLib
82 c.send(Hello())
83 except Exception as err:
84 message = "Failed to keep the connection alive."
85 logger.error(message, exc_info=True)
86 raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err
87
88 def _create_socket(self, client, login) -> Socket:
89 """Creates and returns a socket instance"""
90 socket = Socket(client, login)
91 return socket
92
93 def get_connections(self):
94 """Returns the connection queue"""
95 return self.conn
96
97 def kill_all_connections(self):
98 """Kills all active connections in the pool."""
99 try:
100 if len(self.conn) > 0 or len(self.greenlets) > 0:
101 logger.info("Attempting to kill connections")
102 gevent.killall(self.greenlets)
103
104 self.greenlets.clear()
105 for connection in self.conn:
106 connection.disconnect()
107 self.conn.clear()
108
109 # Clear the semaphore
110 self.lock = BoundedSemaphore(self.size)
111 logger.info("Finished killing connections")
112 else:
113 logger.info("No connections to kill.")
114 except Exception as err:
115 logger.error("Could not kill all connections.")
116 raise PoolError(code=PoolErrorCodes.KILL_ALL_FAILED) from err
117
118 def populate_all_connections(self):
119 """Generates the connection pool.
120 If any connections exist, kill them first.
121 Based off of the __init__ definition for geventconnpool.
122 """
123 if len(self.conn) > 0 or len(self.greenlets) > 0:
124 self.kill_all_connections()
125
126 # Setup the lock
127 for i in range(self.size):
128 self.lock.acquire()
129
130 # Open multiple connections
131 for i in range(self.size):
132 self.greenlets.append(gevent.spawn_later(self.spawn_frequency * i, self._addOne))
133
134 # Open a "keepalive" thread if we want to ping open connections
135 if self.keepalive:
136 self.greenlets.append(gevent.spawn(self._keepalive_periodic))
137
[end of src/epplibwrapper/utility/pool.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/epplibwrapper/utility/pool.py b/src/epplibwrapper/utility/pool.py
--- a/src/epplibwrapper/utility/pool.py
+++ b/src/epplibwrapper/utility/pool.py
@@ -85,6 +85,21 @@
logger.error(message, exc_info=True)
raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err
+ def _keepalive_periodic(self):
+ """Overriding _keepalive_periodic from geventconnpool so that PoolErrors
+ are properly handled, as opposed to printing to stdout"""
+ delay = float(self.keepalive) / self.size
+ while 1:
+ try:
+ with self.get() as c:
+ self._keepalive(c)
+ except PoolError as err:
+ logger.error(err.message, exc_info=True)
+ except self.exc_classes:
+ # Nothing to do, the pool will generate a new connection later
+ pass
+ gevent.sleep(delay)
+
def _create_socket(self, client, login) -> Socket:
"""Creates and returns a socket instance"""
socket = Socket(client, login)
| {"golden_diff": "diff --git a/src/epplibwrapper/utility/pool.py b/src/epplibwrapper/utility/pool.py\n--- a/src/epplibwrapper/utility/pool.py\n+++ b/src/epplibwrapper/utility/pool.py\n@@ -85,6 +85,21 @@\n logger.error(message, exc_info=True)\n raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err\n \n+ def _keepalive_periodic(self):\n+ \"\"\"Overriding _keepalive_periodic from geventconnpool so that PoolErrors\n+ are properly handled, as opposed to printing to stdout\"\"\"\n+ delay = float(self.keepalive) / self.size\n+ while 1:\n+ try:\n+ with self.get() as c:\n+ self._keepalive(c)\n+ except PoolError as err:\n+ logger.error(err.message, exc_info=True)\n+ except self.exc_classes:\n+ # Nothing to do, the pool will generate a new connection later\n+ pass\n+ gevent.sleep(delay)\n+\n def _create_socket(self, client, login) -> Socket:\n \"\"\"Creates and returns a socket instance\"\"\"\n socket = Socket(client, login)\n", "issue": "Clean up test noise (that includes EPP and migration scripts)\n### Issue description\r\n\r\nRight now if you run the test suite locally or see the output from github, there is a lot of added prints and logs that make it hard to troubleshoot where your particular error is coming from. This ticket is clean up test noise in general including EPP and migration scripts. \r\n\r\n\r\n\r\n### Acceptance criteria\r\n\r\n- [ ] unnecessary prints/logs on tests are removed\r\n\r\n### Additional context\r\n\r\n_No response_\r\n\r\n### Links to other issues\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nfrom typing import List\nimport gevent\nfrom geventconnpool import ConnectionPool\nfrom epplibwrapper.socket import Socket\nfrom epplibwrapper.utility.pool_error import PoolError, PoolErrorCodes\n\ntry:\n from epplib.commands import Hello\n from epplib.exceptions import TransportError\nexcept ImportError:\n pass\n\nfrom gevent.lock import BoundedSemaphore\nfrom collections import deque\n\nlogger = logging.getLogger(__name__)\n\n\nclass EPPConnectionPool(ConnectionPool):\n \"\"\"A connection pool for EPPLib.\n\n Args:\n client (Client): The client\n login (commands.Login): Login creds\n options (dict): Options for the ConnectionPool\n base class\n \"\"\"\n\n def __init__(self, client, login, options: dict):\n # For storing shared credentials\n self._client = client\n self._login = login\n\n # Keep track of each greenlet\n self.greenlets: List[gevent.Greenlet] = []\n\n # Define optional pool settings.\n # Kept in a dict so that the parent class,\n # client.py, can maintain seperation/expandability\n self.size = 1\n if \"size\" in options:\n self.size = options[\"size\"]\n\n self.exc_classes = tuple((TransportError,))\n if \"exc_classes\" in options:\n self.exc_classes = options[\"exc_classes\"]\n\n self.keepalive = None\n if \"keepalive\" in options:\n self.keepalive = options[\"keepalive\"]\n\n # Determines the period in which new\n # gevent threads are spun up.\n # This time period is in seconds. So for instance, .1 would be .1 seconds.\n self.spawn_frequency = 0.1\n if \"spawn_frequency\" in options:\n self.spawn_frequency = options[\"spawn_frequency\"]\n\n self.conn: deque = deque()\n self.lock = BoundedSemaphore(self.size)\n\n self.populate_all_connections()\n\n def _new_connection(self):\n socket = self._create_socket(self._client, self._login)\n try:\n connection = socket.connect()\n return connection\n except Exception as err:\n message = f\"Failed to execute due to a registry error: {err}\"\n logger.error(message, exc_info=True)\n # We want to raise a pool error rather than a LoginError here\n # because if this occurs internally, we should handle this\n # differently than we otherwise would for LoginError.\n raise PoolError(code=PoolErrorCodes.NEW_CONNECTION_FAILED) from err\n\n def _keepalive(self, c):\n \"\"\"Sends a command to the server to keep the connection alive.\"\"\"\n try:\n # Sends a ping to the registry via EPPLib\n c.send(Hello())\n except Exception as err:\n message = \"Failed to keep the connection alive.\"\n logger.error(message, exc_info=True)\n raise PoolError(code=PoolErrorCodes.KEEP_ALIVE_FAILED) from err\n\n def _create_socket(self, client, login) -> Socket:\n \"\"\"Creates and returns a socket instance\"\"\"\n socket = Socket(client, login)\n return socket\n\n def get_connections(self):\n \"\"\"Returns the connection queue\"\"\"\n return self.conn\n\n def kill_all_connections(self):\n \"\"\"Kills all active connections in the pool.\"\"\"\n try:\n if len(self.conn) > 0 or len(self.greenlets) > 0:\n logger.info(\"Attempting to kill connections\")\n gevent.killall(self.greenlets)\n\n self.greenlets.clear()\n for connection in self.conn:\n connection.disconnect()\n self.conn.clear()\n\n # Clear the semaphore\n self.lock = BoundedSemaphore(self.size)\n logger.info(\"Finished killing connections\")\n else:\n logger.info(\"No connections to kill.\")\n except Exception as err:\n logger.error(\"Could not kill all connections.\")\n raise PoolError(code=PoolErrorCodes.KILL_ALL_FAILED) from err\n\n def populate_all_connections(self):\n \"\"\"Generates the connection pool.\n If any connections exist, kill them first.\n Based off of the __init__ definition for geventconnpool.\n \"\"\"\n if len(self.conn) > 0 or len(self.greenlets) > 0:\n self.kill_all_connections()\n\n # Setup the lock\n for i in range(self.size):\n self.lock.acquire()\n\n # Open multiple connections\n for i in range(self.size):\n self.greenlets.append(gevent.spawn_later(self.spawn_frequency * i, self._addOne))\n\n # Open a \"keepalive\" thread if we want to ping open connections\n if self.keepalive:\n self.greenlets.append(gevent.spawn(self._keepalive_periodic))\n", "path": "src/epplibwrapper/utility/pool.py"}]} | 1,962 | 257 |
gh_patches_debug_10114 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-982 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mockserver server.py does not work in Python 3
```mockserver_1 | Traceback (most recent call last):
mockserver_1 | File "../server.py", line 5, in <module>
mockserver_1 | from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer
mockserver_1 | ModuleNotFoundError: No module named 'SimpleHTTPServer'
```
Looks like some modules have been reorganized in Python 3. Hopefully this is just a matter of updating the imports.
</issue>
<code>
[start of mockserver/server.py]
1 #! /usr/bin/env python
2
3 # Usage: python __file__.py <port>
4
5 from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer
6
7 class CORSRequestHandler(SimpleHTTPRequestHandler):
8 def do_OPTIONS(self):
9 self.send_response(200, 'OK')
10 self.end_headers()
11
12 def end_headers(self):
13 self.send_header('Access-Control-Allow-Origin', '*')
14 self.send_header('Access-Control-Allow-Headers', 'x-request-timestamp, x-signature, electricitymap-token')
15 SimpleHTTPRequestHandler.end_headers(self)
16
17 if __name__ == '__main__':
18 BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)
19
[end of mockserver/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mockserver/server.py b/mockserver/server.py
--- a/mockserver/server.py
+++ b/mockserver/server.py
@@ -2,7 +2,7 @@
# Usage: python __file__.py <port>
-from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer
+from http.server import SimpleHTTPRequestHandler, HTTPServer, test
class CORSRequestHandler(SimpleHTTPRequestHandler):
def do_OPTIONS(self):
@@ -15,4 +15,4 @@
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
- BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)
+ test(CORSRequestHandler, HTTPServer)
| {"golden_diff": "diff --git a/mockserver/server.py b/mockserver/server.py\n--- a/mockserver/server.py\n+++ b/mockserver/server.py\n@@ -2,7 +2,7 @@\n \n # Usage: python __file__.py <port>\n \n-from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer\n+from http.server import SimpleHTTPRequestHandler, HTTPServer, test\n \n class CORSRequestHandler(SimpleHTTPRequestHandler):\n def do_OPTIONS(self):\n@@ -15,4 +15,4 @@\n SimpleHTTPRequestHandler.end_headers(self)\n \n if __name__ == '__main__':\n- BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)\n+ test(CORSRequestHandler, HTTPServer)\n", "issue": "Mockserver server.py does not work in Python 3\n```mockserver_1 | Traceback (most recent call last):\r\nmockserver_1 | File \"../server.py\", line 5, in <module>\r\nmockserver_1 | from SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer\r\nmockserver_1 | ModuleNotFoundError: No module named 'SimpleHTTPServer'\r\n```\r\nLooks like some modules have been reorganized in Python 3. Hopefully this is just a matter of updating the imports.\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# Usage: python __file__.py <port>\n\nfrom SimpleHTTPServer import SimpleHTTPRequestHandler, BaseHTTPServer\n\nclass CORSRequestHandler(SimpleHTTPRequestHandler):\n def do_OPTIONS(self):\n self.send_response(200, 'OK')\n self.end_headers()\n\n def end_headers(self):\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers', 'x-request-timestamp, x-signature, electricitymap-token')\n SimpleHTTPRequestHandler.end_headers(self)\n\nif __name__ == '__main__':\n BaseHTTPServer.test(CORSRequestHandler, BaseHTTPServer.HTTPServer)\n", "path": "mockserver/server.py"}]} | 817 | 151 |
gh_patches_debug_148 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-7583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: vae does not appear when clicking refresh button in models/VAE
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
Pressing the button to update the VAE list does not update the VAE list.
### Steps to reproduce the problem
1. Insert new VAE file to models/VAE
2. Press buttion Refresh VAE list
### What should have happened?
Apprear new VAE file in list
### Commit where the problem happens
Lastest
### What platforms do you use to access the UI ?
_No response_
### What browsers do you use to access the UI ?
_No response_
### Command Line Arguments
```Shell
No
```
### List of extensions
No
### Console logs
```Shell
Nothing
```
### Additional information
_No response_
</issue>
<code>
[start of modules/shared_items.py]
1
2
3 def realesrgan_models_names():
4 import modules.realesrgan_model
5 return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
6
7
8 def postprocessing_scripts():
9 import modules.scripts
10
11 return modules.scripts.scripts_postproc.scripts
12
13
14 def sd_vae_items():
15 import modules.sd_vae
16
17 return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
18
19
20 def refresh_vae_list():
21 import modules.sd_vae
22
23 return modules.sd_vae.refresh_vae_list
24
[end of modules/shared_items.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modules/shared_items.py b/modules/shared_items.py
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -20,4 +20,4 @@
def refresh_vae_list():
import modules.sd_vae
- return modules.sd_vae.refresh_vae_list
+ return modules.sd_vae.refresh_vae_list()
| {"golden_diff": "diff --git a/modules/shared_items.py b/modules/shared_items.py\n--- a/modules/shared_items.py\n+++ b/modules/shared_items.py\n@@ -20,4 +20,4 @@\n def refresh_vae_list():\r\n import modules.sd_vae\r\n \r\n- return modules.sd_vae.refresh_vae_list\r\n+ return modules.sd_vae.refresh_vae_list()\n", "issue": "[Bug]: vae does not appear when clicking refresh button in models/VAE\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\nPressing the button to update the VAE list does not update the VAE list.\n\n### Steps to reproduce the problem\n\n1. Insert new VAE file to models/VAE\r\n2. Press buttion Refresh VAE list \n\n### What should have happened?\n\nApprear new VAE file in list\n\n### Commit where the problem happens\n\nLastest\n\n### What platforms do you use to access the UI ?\n\n_No response_\n\n### What browsers do you use to access the UI ?\n\n_No response_\n\n### Command Line Arguments\n\n```Shell\nNo\n```\n\n\n### List of extensions\n\nNo\n\n### Console logs\n\n```Shell\nNothing\n```\n\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "\r\n\r\ndef realesrgan_models_names():\r\n import modules.realesrgan_model\r\n return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]\r\n\r\n\r\ndef postprocessing_scripts():\r\n import modules.scripts\r\n\r\n return modules.scripts.scripts_postproc.scripts\r\n\r\n\r\ndef sd_vae_items():\r\n import modules.sd_vae\r\n\r\n return [\"Automatic\", \"None\"] + list(modules.sd_vae.vae_dict)\r\n\r\n\r\ndef refresh_vae_list():\r\n import modules.sd_vae\r\n\r\n return modules.sd_vae.refresh_vae_list\r\n", "path": "modules/shared_items.py"}]} | 886 | 79 |
gh_patches_debug_23129 | rasdani/github-patches | git_diff | saleor__saleor-1567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove or block impersonate's "list" and "search" urls
Saleor uses the [django-impersonate](https://bitbucket.org/petersanchez/django-impersonate/overview) for client impersonation feature. While working on #1549 I've found out that in addition to two views that we are using (start and stop impersonating the user), the library brings additional two views that we don't really want to support:
https://demo.getsaleor.com/impersonate/list/
https://demo.getsaleor.com/impersonate/search/?q=admin (note: this one 500's on link)
Ideally, library would've provided us with a settings to disable those views, but this isn't the case.
So its worth asking ourselves what harm is there in keeping those views around, and if we really want to get rid of those two views, how would we go about it?
Looking at the [imersonate.urls](https://bitbucket.org/petersanchez/django-impersonate/src/f898c697b2bd9945187f8667d680e6d10d06dc33/impersonate/urls.py?at=default&fileviewer=file-view-default), it may be as simple as updating our `urls.py` to explictly define `impersonate-start` and `impersonate-stop`, or perhaps we should open the issue upstream and see what library's author thinks about it?
</issue>
<code>
[start of saleor/urls.py]
1 from django.conf import settings
2 from django.conf.urls import url, include
3 from django.conf.urls.static import static
4 from django.contrib.sitemaps.views import sitemap
5 from django.contrib.staticfiles.views import serve
6 from django.views.i18n import JavaScriptCatalog
7 from graphene_django.views import GraphQLView
8
9 from .cart.urls import urlpatterns as cart_urls
10 from .checkout.urls import urlpatterns as checkout_urls
11 from .core.sitemaps import sitemaps
12 from .core.urls import urlpatterns as core_urls
13 from .dashboard.urls import urlpatterns as dashboard_urls
14 from .data_feeds.urls import urlpatterns as feed_urls
15 from .order.urls import urlpatterns as order_urls
16 from .product.urls import urlpatterns as product_urls
17 from .registration.urls import urlpatterns as registration_urls
18 from .search.urls import urlpatterns as search_urls
19 from .userprofile.urls import urlpatterns as userprofile_urls
20
21 urlpatterns = [
22 url(r'^', include(core_urls)),
23 url(r'^account/', include(registration_urls)),
24 url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),
25 url(r'^checkout/',
26 include((checkout_urls, 'checkout'), namespace='checkout')),
27 url(r'^dashboard/',
28 include((dashboard_urls, 'dashboard'), namespace='dashboard')),
29 url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),
30 url(r'^impersonate/', include('impersonate.urls')),
31 url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
32 url(r'^order/', include((order_urls, 'order'), namespace='order')),
33 url(r'^products/',
34 include((product_urls, 'product'), namespace='product')),
35 url(r'^profile/',
36 include((userprofile_urls, 'profile'), namespace='profile')),
37 url(r'^feeds/',
38 include((feed_urls, 'data_feeds'), namespace='data_feeds')),
39 url(r'^search/', include((search_urls, 'search'), namespace='search')),
40 url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps},
41 name='django.contrib.sitemaps.views.sitemap'),
42 url(r'', include('payments.urls')),
43 url('', include('social_django.urls', namespace='social')),
44 ]
45
46 if settings.DEBUG:
47 # static files (images, css, javascript, etc.)
48 urlpatterns += [
49 url(r'^static/(?P<path>.*)$', serve)
50 ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
51
[end of saleor/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/urls.py b/saleor/urls.py
--- a/saleor/urls.py
+++ b/saleor/urls.py
@@ -5,6 +5,7 @@
from django.contrib.staticfiles.views import serve
from django.views.i18n import JavaScriptCatalog
from graphene_django.views import GraphQLView
+from impersonate.views import impersonate, stop_impersonate
from .cart.urls import urlpatterns as cart_urls
from .checkout.urls import urlpatterns as checkout_urls
@@ -27,7 +28,8 @@
url(r'^dashboard/',
include((dashboard_urls, 'dashboard'), namespace='dashboard')),
url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),
- url(r'^impersonate/', include('impersonate.urls')),
+ url(r'^impersonate/stop/$', stop_impersonate, name='impersonate-stop'),
+ url(r'^impersonate/(?P<uid>\d+)/$', impersonate, name='impersonate-start'),
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),
url(r'^order/', include((order_urls, 'order'), namespace='order')),
url(r'^products/',
| {"golden_diff": "diff --git a/saleor/urls.py b/saleor/urls.py\n--- a/saleor/urls.py\n+++ b/saleor/urls.py\n@@ -5,6 +5,7 @@\n from django.contrib.staticfiles.views import serve\n from django.views.i18n import JavaScriptCatalog\n from graphene_django.views import GraphQLView\n+from impersonate.views import impersonate, stop_impersonate\n \n from .cart.urls import urlpatterns as cart_urls\n from .checkout.urls import urlpatterns as checkout_urls\n@@ -27,7 +28,8 @@\n url(r'^dashboard/',\n include((dashboard_urls, 'dashboard'), namespace='dashboard')),\n url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),\n- url(r'^impersonate/', include('impersonate.urls')),\n+ url(r'^impersonate/stop/$', stop_impersonate, name='impersonate-stop'),\n+ url(r'^impersonate/(?P<uid>\\d+)/$', impersonate, name='impersonate-start'),\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n url(r'^order/', include((order_urls, 'order'), namespace='order')),\n url(r'^products/',\n", "issue": "Remove or block impersonate's \"list\" and \"search\" urls\nSaleor uses the [django-impersonate](https://bitbucket.org/petersanchez/django-impersonate/overview) for client impersonation feature. While working on #1549 I've found out that in addition to two views that we are using (start and stop impersonating the user), the library brings additional two views that we don't really want to support:\r\n\r\nhttps://demo.getsaleor.com/impersonate/list/\r\nhttps://demo.getsaleor.com/impersonate/search/?q=admin (note: this one 500's on link)\r\n\r\nIdeally, library would've provided us with a settings to disable those views, but this isn't the case.\r\n\r\nSo its worth asking ourselves what harm is there in keeping those views around, and if we really want to get rid of those two views, how would we go about it?\r\n\r\nLooking at the [imersonate.urls](https://bitbucket.org/petersanchez/django-impersonate/src/f898c697b2bd9945187f8667d680e6d10d06dc33/impersonate/urls.py?at=default&fileviewer=file-view-default), it may be as simple as updating our `urls.py` to explictly define `impersonate-start` and `impersonate-stop`, or perhaps we should open the issue upstream and see what library's author thinks about it?\r\n \n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.views import serve\nfrom django.views.i18n import JavaScriptCatalog\nfrom graphene_django.views import GraphQLView\n\nfrom .cart.urls import urlpatterns as cart_urls\nfrom .checkout.urls import urlpatterns as checkout_urls\nfrom .core.sitemaps import sitemaps\nfrom .core.urls import urlpatterns as core_urls\nfrom .dashboard.urls import urlpatterns as dashboard_urls\nfrom .data_feeds.urls import urlpatterns as feed_urls\nfrom .order.urls import urlpatterns as order_urls\nfrom .product.urls import urlpatterns as product_urls\nfrom .registration.urls import urlpatterns as registration_urls\nfrom .search.urls import urlpatterns as search_urls\nfrom .userprofile.urls import urlpatterns as userprofile_urls\n\nurlpatterns = [\n url(r'^', include(core_urls)),\n url(r'^account/', include(registration_urls)),\n url(r'^cart/', include((cart_urls, 'cart'), namespace='cart')),\n url(r'^checkout/',\n include((checkout_urls, 'checkout'), namespace='checkout')),\n url(r'^dashboard/',\n include((dashboard_urls, 'dashboard'), namespace='dashboard')),\n url(r'^graphql', GraphQLView.as_view(graphiql=settings.DEBUG)),\n url(r'^impersonate/', include('impersonate.urls')),\n url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n url(r'^order/', include((order_urls, 'order'), namespace='order')),\n url(r'^products/',\n include((product_urls, 'product'), namespace='product')),\n url(r'^profile/',\n include((userprofile_urls, 'profile'), namespace='profile')),\n url(r'^feeds/',\n include((feed_urls, 'data_feeds'), namespace='data_feeds')),\n url(r'^search/', include((search_urls, 'search'), namespace='search')),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n url(r'', include('payments.urls')),\n url('', include('social_django.urls', namespace='social')),\n]\n\nif settings.DEBUG:\n # static files (images, css, javascript, etc.)\n urlpatterns += [\n url(r'^static/(?P<path>.*)$', serve)\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "saleor/urls.py"}]} | 1,468 | 267 |
gh_patches_debug_16929 | rasdani/github-patches | git_diff | pulp__pulpcore-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix content app not showing file size for 0 byte files
fixes: #5100
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 with open('README.md') as f:
4 long_description = f.read()
5
6 requirements = [
7 'coreapi~=2.3.3',
8 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to
9 'django-filter~=2.2.0',
10 'djangorestframework~=3.10.2',
11 'djangorestframework-queryfields~=1.0.0',
12 'drf-nested-routers~=0.91.0',
13 'drf-yasg~=1.16.1',
14 'gunicorn~=19.9.0',
15 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
16 'PyYAML~=5.1.1',
17 'rq~=1.1.0',
18 'redis~=3.1.0',
19 'setuptools>=41.0.1,<41.3.0',
20 'dynaconf~=2.1.0',
21 'whitenoise~=4.1.3',
22 ]
23
24 setup(
25 name='pulpcore',
26 version='3.0.0rc6.dev',
27 description='Pulp Django Application and Related Modules',
28 long_description=long_description,
29 long_description_content_type="text/markdown",
30 license='GPLv2+',
31 packages=find_packages(exclude=['test']),
32 author='Pulp Team',
33 author_email='[email protected]',
34 url='http://www.pulpproject.org',
35 python_requires='>=3.6',
36 install_requires=requirements,
37 extras_require={
38 'postgres': ['psycopg2-binary'],
39 'mysql': ['mysqlclient']
40 },
41 include_package_data=True,
42 classifiers=(
43 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
44 'Operating System :: POSIX :: Linux',
45 'Development Status :: 4 - Beta',
46 'Framework :: Django',
47 'Programming Language :: Python',
48 'Programming Language :: Python :: 3',
49 'Programming Language :: Python :: 3.6',
50 'Programming Language :: Python :: 3.7',
51 ),
52 scripts=['bin/pulp-content'],
53 )
54
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,6 +13,7 @@
'drf-yasg~=1.16.1',
'gunicorn~=19.9.0',
'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412
+ 'psycopg2-binary',
'PyYAML~=5.1.1',
'rq~=1.1.0',
'redis~=3.1.0',
@@ -34,10 +35,6 @@
url='http://www.pulpproject.org',
python_requires='>=3.6',
install_requires=requirements,
- extras_require={
- 'postgres': ['psycopg2-binary'],
- 'mysql': ['mysqlclient']
- },
include_package_data=True,
classifiers=(
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,6 +13,7 @@\n 'drf-yasg~=1.16.1',\n 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n+ 'psycopg2-binary',\n 'PyYAML~=5.1.1',\n 'rq~=1.1.0',\n 'redis~=3.1.0',\n@@ -34,10 +35,6 @@\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n- extras_require={\n- 'postgres': ['psycopg2-binary'],\n- 'mysql': ['mysqlclient']\n- },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n", "issue": "Fix content app not showing file size for 0 byte files\nfixes: #5100\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nrequirements = [\n 'coreapi~=2.3.3',\n 'Django~=2.2.3', # LTS version, switch only if we have a compelling reason to\n 'django-filter~=2.2.0',\n 'djangorestframework~=3.10.2',\n 'djangorestframework-queryfields~=1.0.0',\n 'drf-nested-routers~=0.91.0',\n 'drf-yasg~=1.16.1',\n 'gunicorn~=19.9.0',\n 'packaging', # until drf-yasg 1.16.2 is out https://github.com/axnsan12/drf-yasg/issues/412\n 'PyYAML~=5.1.1',\n 'rq~=1.1.0',\n 'redis~=3.1.0',\n 'setuptools>=41.0.1,<41.3.0',\n 'dynaconf~=2.1.0',\n 'whitenoise~=4.1.3',\n]\n\nsetup(\n name='pulpcore',\n version='3.0.0rc6.dev',\n description='Pulp Django Application and Related Modules',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='GPLv2+',\n packages=find_packages(exclude=['test']),\n author='Pulp Team',\n author_email='[email protected]',\n url='http://www.pulpproject.org',\n python_requires='>=3.6',\n install_requires=requirements,\n extras_require={\n 'postgres': ['psycopg2-binary'],\n 'mysql': ['mysqlclient']\n },\n include_package_data=True,\n classifiers=(\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Operating System :: POSIX :: Linux',\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ),\n scripts=['bin/pulp-content'],\n)\n", "path": "setup.py"}]} | 1,165 | 242 |
gh_patches_debug_23526 | rasdani/github-patches | git_diff | OpenMined__PySyft-3589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sy.grid.register() should print useful information
**Is your feature request related to a problem? Please describe.**
When registering a node on OpenGrid, we want to convey some information to the user using sys.stdout.write()
A few things we thought to add.
- Information: connecting to opengrid...etc.
- Information: Can I connect to the main grid node... graceful error message if you can't.
- Disclaimer: OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.
- Where to get Help:
- Join our slack (slack.openmined.org) and ask for help in the #lib_syft channel.
- File a Github Issue: https://github.com/OpenMined/PySyft and add the string "#opengrid" in the issue title.
</issue>
<code>
[start of syft/grid/__init__.py]
1 from .network import Network
2 import uuid
3
4 DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
5
6
7 def register(**kwargs):
8 """ Add this process as a new peer registering it in the grid network.
9
10 Returns:
11 peer: Peer Network instance.
12 """
13 if not kwargs:
14 args = args = {"max_size": None, "timeout": 444, "url": DEFAULT_NETWORK_URL}
15 else:
16 args = kwargs
17
18 peer_id = str(uuid.uuid4())
19 peer = Network(peer_id, **args)
20 peer.start()
21
22 return peer
23
[end of syft/grid/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py
--- a/syft/grid/__init__.py
+++ b/syft/grid/__init__.py
@@ -1,4 +1,5 @@
from .network import Network
+import sys
import uuid
DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
@@ -16,7 +17,32 @@
args = kwargs
peer_id = str(uuid.uuid4())
+ sys.stdout.write(
+ "Connecting to OpenGrid (" + "\033[94m" + DEFAULT_NETWORK_URL + "\033[0m" + ") ... "
+ )
peer = Network(peer_id, **args)
+
+ sys.stdout.write("\033[92m" + "OK" + "\033[0m" + "\n")
+ sys.stdout.write("Peer ID: " + peer_id + "\n")
+
+ sys.stdout.write(
+ "\033[93m" + "DISCLAIMER" + "\033[0m"
+ ":"
+ + "\033[1m"
+ + " OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\n"
+ + "\033[0m"
+ )
+
+ sys.stdout.write("Where to get help: \n")
+ sys.stdout.write(
+ " - Join our slack (https://slack.openmined.org) and ask for help in the #lib_syft channel.\n"
+ )
+ sys.stdout.write(
+ " - File a Github Issue: https://github.com/OpenMined/PySyft and add the string '#opengrid' in the issue title.\n"
+ )
+ sys.stdout.write(
+ " - Want to join in our development team? Apply here: https://forms.gle/wcH1vxzvPyDSbSVW6\n"
+ )
peer.start()
return peer
| {"golden_diff": "diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py\n--- a/syft/grid/__init__.py\n+++ b/syft/grid/__init__.py\n@@ -1,4 +1,5 @@\n from .network import Network\n+import sys\n import uuid\n \n DEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n@@ -16,7 +17,32 @@\n args = kwargs\n \n peer_id = str(uuid.uuid4())\n+ sys.stdout.write(\n+ \"Connecting to OpenGrid (\" + \"\\033[94m\" + DEFAULT_NETWORK_URL + \"\\033[0m\" + \") ... \"\n+ )\n peer = Network(peer_id, **args)\n+\n+ sys.stdout.write(\"\\033[92m\" + \"OK\" + \"\\033[0m\" + \"\\n\")\n+ sys.stdout.write(\"Peer ID: \" + peer_id + \"\\n\")\n+\n+ sys.stdout.write(\n+ \"\\033[93m\" + \"DISCLAIMER\" + \"\\033[0m\"\n+ \":\"\n+ + \"\\033[1m\"\n+ + \" OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\\n\"\n+ + \"\\033[0m\"\n+ )\n+\n+ sys.stdout.write(\"Where to get help: \\n\")\n+ sys.stdout.write(\n+ \" - Join our slack (https://slack.openmined.org) and ask for help in the #lib_syft channel.\\n\"\n+ )\n+ sys.stdout.write(\n+ \" - File a Github Issue: https://github.com/OpenMined/PySyft and add the string '#opengrid' in the issue title.\\n\"\n+ )\n+ sys.stdout.write(\n+ \" - Want to join in our development team? Apply here: https://forms.gle/wcH1vxzvPyDSbSVW6\\n\"\n+ )\n peer.start()\n \n return peer\n", "issue": "sy.grid.register() should print useful information\n**Is your feature request related to a problem? Please describe.**\r\nWhen registering a node on OpenGrid, we want to convey some information to the user using sys.stdout.write()\r\n\r\nA few things we thought to add.\r\n\r\n- Information: connecting to opengrid...etc.\r\n - Information: Can I connect to the main grid node... graceful error message if you can't.\r\n- Disclaimer: OpenGrid is an experimental feature currently in alpha. Do not use this to protect real-world data.\r\n- Where to get Help:\r\n - Join our slack (slack.openmined.org) and ask for help in the #lib_syft channel.\r\n - File a Github Issue: https://github.com/OpenMined/PySyft and add the string \"#opengrid\" in the issue title.\r\n \r\n\n", "before_files": [{"content": "from .network import Network\nimport uuid\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer_id = str(uuid.uuid4())\n peer = Network(peer_id, **args)\n peer.start()\n\n return peer\n", "path": "syft/grid/__init__.py"}]} | 894 | 469 |
gh_patches_debug_23054 | rasdani/github-patches | git_diff | scikit-hep__pyhf-862 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update PyPI keywords and classifies in setup.py
# Description
As JAX is now a supported backend then it should additionally be added to the [list of keywords in `setup.py`](https://github.com/scikit-hep/pyhf/blob/917bd5127c1da023b279c076bb41614fbb859487/setup.py#L85). Additionally, the [classifies](https://packaging.python.org/guides/distributing-packages-using-setuptools/#classifiers) should be updated as well to include a `Development Status`, `License`, `Intended Audience`, and `Topic`.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 "Programming Language :: Python :: 3.8",
91 ],
92 package_dir={'': 'src'},
93 packages=find_packages(where='src'),
94 include_package_data=True,
95 python_requires=">=3.6",
96 install_requires=[
97 'scipy', # requires numpy, which is required by pyhf and tensorflow
98 'click>=6.0', # for console scripts,
99 'tqdm', # for readxml
100 'jsonschema>=3.2.0', # for utils
101 'jsonpatch',
102 'pyyaml', # for parsing CLI equal-delimited options
103 ],
104 extras_require=extras_require,
105 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
106 dependency_links=[],
107 use_scm_version=lambda: {'local_scheme': lambda version: ''},
108 )
109
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -79,11 +79,21 @@
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/scikit-hep/pyhf',
+ project_urls={
+ "Documentation": "https://scikit-hep.org/pyhf/",
+ "Source": "https://github.com/scikit-hep/pyhf",
+ "Tracker": "https://github.com/scikit-hep/pyhf/issues",
+ },
author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
author_email='[email protected], [email protected], [email protected]',
license='Apache',
- keywords='physics fitting numpy scipy tensorflow pytorch',
+ keywords='physics fitting numpy scipy tensorflow pytorch jax',
classifiers=[
+ "Development Status :: 4 - Beta",
+ "License :: OSI Approved :: Apache Software License",
+ "Intended Audience :: Science/Research",
+ "Topic :: Scientific/Engineering",
+ "Topic :: Scientific/Engineering :: Physics",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -79,11 +79,21 @@\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n+ project_urls={\n+ \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n+ \"Source\": \"https://github.com/scikit-hep/pyhf\",\n+ \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n+ },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n- keywords='physics fitting numpy scipy tensorflow pytorch',\n+ keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n+ \"Development Status :: 4 - Beta\",\n+ \"License :: OSI Approved :: Apache Software License\",\n+ \"Intended Audience :: Science/Research\",\n+ \"Topic :: Scientific/Engineering\",\n+ \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n", "issue": "Update PyPI keywords and classifies in setup.py\n# Description\r\n\r\nAs JAX is now a supported backend then it should additionally be added to the [list of keywords in `setup.py`](https://github.com/scikit-hep/pyhf/blob/917bd5127c1da023b279c076bb41614fbb859487/setup.py#L85). Additionally, the [classifies](https://packaging.python.org/guides/distributing-packages-using-setuptools/#classifiers) should be updated as well to include a `Development Status`, `License`, `Intended Audience`, and `Topic`.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,751 | 295 |
gh_patches_debug_2538 | rasdani/github-patches | git_diff | Parsl__parsl-328 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git
Hi
When I try to run parsl I am getting the following issue:
fatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git
Is it a real issue?
I am using python3 and jupyter but run parsl in a terminal.
Issue in parsl.log
I tried to run a simple script given in the parsl documentation
```
import parsl
from parsl import *
import time
workers = ThreadPoolExecutor(max_workers=4)
dfk = DataFlowKernel(executors=[workers])
print(1)
@App('python', dfk)
def hello ():
import time
time.sleep(5)
return 'Hello World!'
print(2)
app_future = hello()
print ('Done: %s' % app_future.done())
print ('Result: %s' % app_future.result())
print ('Done: %s' % app_future.done())
```
However, in the parsl.log shows this issue
2018-06-07 21:45:37 parsl.utils:24 [ERROR] Unable to determine code state
Traceback (most recent call last):
File "/homes/vvikraman/anaconda3/lib/python3.6/site-packages/parsl/utils.py", line 19, in get_version
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
File "/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.
</issue>
<code>
[start of parsl/utils.py]
1 import logging
2 import os
3 import shlex
4 import subprocess
5 import threading
6 import time
7 from contextlib import contextmanager
8 from functools import wraps
9
10 import parsl
11 from parsl.version import VERSION
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_version():
17 version = parsl.__version__
18 work_tree = os.path.dirname(os.path.dirname(__file__))
19 git_dir = os.path.join(work_tree, '.git')
20 env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
21 try:
22 cmd = shlex.split('git rev-parse --short HEAD')
23 head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
24 diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
25 status = 'dirty' if diff else 'clean'
26 version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
27 except Exception as e:
28 logger.exception("Unable to determine code state")
29
30 return version
31
32
33 def get_all_checkpoints(rundir="runinfo"):
34 """Finds the checkpoints from all last runs.
35
36 Note that checkpoints are incremental, and this helper will not find
37 previous checkpoints from earlier than the most recent run. It probably
38 should be made to do so.
39
40 Kwargs:
41 - rundir(str) : Path to the runinfo directory
42
43 Returns:
44 - a list suitable for the checkpointFiles parameter of DataFlowKernel
45 constructor
46
47 """
48
49 if(not(os.path.isdir(rundir))):
50 return []
51
52 dirs = sorted(os.listdir(rundir))
53
54 checkpoints = []
55
56 for runid in dirs:
57
58 checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
59
60 if(os.path.isdir(checkpoint)):
61 checkpoints.append(checkpoint)
62
63 return checkpoints
64
65
66 def get_last_checkpoint(rundir="runinfo"):
67 """Finds the checkpoint from the last run, if one exists.
68
69 Note that checkpoints are incremental, and this helper will not find
70 previous checkpoints from earlier than the most recent run. It probably
71 should be made to do so.
72
73 Kwargs:
74 - rundir(str) : Path to the runinfo directory
75
76 Returns:
77 - a list suitable for checkpointFiles parameter of DataFlowKernel
78 constructor, with 0 or 1 elements
79
80 """
81
82 if(not(os.path.isdir(rundir))):
83 return []
84
85 dirs = sorted(os.listdir(rundir))
86
87 if(len(dirs) == 0):
88 return []
89
90 last_runid = dirs[-1]
91 last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
92
93 if(not(os.path.isdir(last_checkpoint))):
94 return []
95
96 return [last_checkpoint]
97
98
99 def timeout(seconds=None):
100 def decorator(func, *args, **kwargs):
101 @wraps(func)
102 def wrapper(*args, **kwargs):
103 t = threading.Thread(target=func, args=args, kwargs=kwargs)
104 t.start()
105 result = t.join(seconds)
106 if t.is_alive():
107 raise RuntimeError('timed out in {}'.format(func))
108 return result
109 return wrapper
110 return decorator
111
112
113 @contextmanager
114 def time_limited_open(path, mode, seconds=1):
115 @timeout(seconds)
116 def check_path(path):
117 while not os.path.exists(path):
118 time.sleep(0.1)
119 check_path(path)
120 f = open(path, mode)
121 yield f
122 f.close()
123
[end of parsl/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/utils.py b/parsl/utils.py
--- a/parsl/utils.py
+++ b/parsl/utils.py
@@ -25,7 +25,7 @@
status = 'dirty' if diff else 'clean'
version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
except Exception as e:
- logger.exception("Unable to determine code state")
+ pass
return version
| {"golden_diff": "diff --git a/parsl/utils.py b/parsl/utils.py\n--- a/parsl/utils.py\n+++ b/parsl/utils.py\n@@ -25,7 +25,7 @@\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n- logger.exception(\"Unable to determine code state\")\n+ pass\n \n return version\n", "issue": "fatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git\nHi \r\nWhen I try to run parsl I am getting the following issue:\r\n\r\nfatal: Not a git repository: '/homes/vvikraman/anaconda3/lib/python3.6/site-packages/.git\r\n\r\nIs it a real issue?\r\n\r\nI am using python3 and jupyter but run parsl in a terminal. \nIssue in parsl.log\nI tried to run a simple script given in the parsl documentation \r\n\r\n```\r\nimport parsl\r\nfrom parsl import *\r\nimport time\r\n\r\nworkers = ThreadPoolExecutor(max_workers=4)\r\ndfk = DataFlowKernel(executors=[workers])\r\nprint(1)\r\n@App('python', dfk)\r\ndef hello ():\r\n import time\r\n time.sleep(5)\r\n return 'Hello World!'\r\nprint(2)\r\napp_future = hello()\r\nprint ('Done: %s' % app_future.done())\r\nprint ('Result: %s' % app_future.result())\r\nprint ('Done: %s' % app_future.done())\r\n```\r\nHowever, in the parsl.log shows this issue\r\n\r\n2018-06-07 21:45:37 parsl.utils:24 [ERROR] Unable to determine code state\r\nTraceback (most recent call last):\r\n File \"/homes/vvikraman/anaconda3/lib/python3.6/site-packages/parsl/utils.py\", line 19, in get_version\r\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\r\n File \"/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py\", line 336, in check_output\r\n **kwargs).stdout\r\n File \"/homes/vvikraman/anaconda3/lib/python3.6/subprocess.py\", line 418, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n logger.exception(\"Unable to determine code state\")\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n @timeout(seconds)\n def check_path(path):\n while not os.path.exists(path):\n time.sleep(0.1)\n check_path(path)\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}]} | 2,006 | 103 |
gh_patches_debug_9546 | rasdani/github-patches | git_diff | fossasia__open-event-server-5266 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User order_expiry_time as the parameter to expire orders
**Describe the bug**
Currently we are expiring orders after 10 minutes. We should change it to order_expiry_time parameter.
</issue>
<code>
[start of app/api/helpers/order.py]
1 import logging
2 from datetime import timedelta, datetime, timezone
3
4 from flask import render_template
5
6 from app.api.helpers import ticketing
7 from app.api.helpers.db import save_to_db, safe_query_without_soft_deleted_entries, get_count
8 from app.api.helpers.exceptions import UnprocessableEntity, ConflictException
9 from app.api.helpers.files import create_save_pdf
10 from app.api.helpers.storage import UPLOAD_PATHS
11 from app.models import db
12 from app.models.ticket import Ticket
13 from app.models.ticket_holder import TicketHolder
14
15
16 def delete_related_attendees_for_order(order):
17 """
18 Delete the associated attendees of an order when it is cancelled/deleted/expired
19 :param order: Order whose attendees have to be deleted.
20 :return:
21 """
22 for ticket_holder in order.ticket_holders:
23 db.session.delete(ticket_holder)
24 try:
25 db.session.commit()
26 except Exception as e:
27 logging.error('DB Exception! %s' % e)
28 db.session.rollback()
29
30
31 def set_expiry_for_order(order, override=False):
32 """
33 Expire the order after the time slot(10 minutes) if the order is pending.
34 Also expires the order if we want to expire an order regardless of the state and time.
35 :param order: Order to be expired.
36 :param override: flag to force expiry.
37 :return:
38 """
39 if order and not order.paid_via and (override or (order.status == 'pending' and (
40 order.created_at +
41 timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):
42 order.status = 'expired'
43 delete_related_attendees_for_order(order)
44 save_to_db(order)
45 return order
46
47
48 def create_pdf_tickets_for_holder(order):
49 """
50 Create tickets for the holders of an order.
51 :param order: The order for which to create tickets for.
52 """
53 if order.status == 'completed':
54 pdf = create_save_pdf(render_template('pdf/ticket_purchaser.html', order=order),
55 UPLOAD_PATHS['pdf']['ticket_attendee'],
56 dir_path='/static/uploads/pdf/tickets/')
57 order.tickets_pdf_url = pdf
58
59 for holder in order.ticket_holders:
60 if (not holder.user) or holder.user.id != order.user_id:
61 # holder is not the order buyer.
62 pdf = create_save_pdf(render_template('pdf/ticket_attendee.html', order=order, holder=holder),
63 UPLOAD_PATHS['pdf']['ticket_attendee'],
64 dir_path='/static/uploads/pdf/tickets/')
65 else:
66 # holder is the order buyer.
67 pdf = order.tickets_pdf_url
68 holder.pdf_url = pdf
69 save_to_db(holder)
70
71 save_to_db(order)
72
73
74 def create_onsite_attendees_for_order(data):
75 """
76 Creates on site ticket holders for an order and adds it into the request data.
77 :param data: data initially passed in the POST request for order.
78 :return:
79 """
80 on_site_tickets = data.get('on_site_tickets')
81
82 if not on_site_tickets:
83 raise UnprocessableEntity({'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing')
84
85 data['ticket_holders'] = []
86
87 for on_site_ticket in on_site_tickets:
88 ticket_id = on_site_ticket['id']
89 quantity = int(on_site_ticket['quantity'])
90
91 ticket = safe_query_without_soft_deleted_entries(db, Ticket, 'id', ticket_id, 'ticket_id')
92
93 ticket_sold_count = get_count(db.session.query(TicketHolder.id).
94 filter_by(ticket_id=int(ticket.id), deleted_at=None))
95
96 # Check if the ticket is already sold out or not.
97 if ticket_sold_count + quantity > ticket.quantity:
98 # delete the already created attendees.
99 for holder in data['ticket_holders']:
100 ticket_holder = db.session.query(TicketHolder).filter(id == int(holder)).one()
101 db.session.delete(ticket_holder)
102 try:
103 db.session.commit()
104 except Exception as e:
105 logging.error('DB Exception! %s' % e)
106 db.session.rollback()
107
108 raise ConflictException(
109 {'pointer': '/data/attributes/on_site_tickets'},
110 "Ticket with id: {} already sold out. You can buy at most {} tickets".format(ticket_id,
111 ticket.quantity -
112 ticket_sold_count)
113 )
114
115 for _ in range(1, quantity):
116 ticket_holder = TicketHolder(firstname='onsite', lastname='attendee', email='[email protected]',
117 ticket_id=ticket.id, event_id=data.get('event'))
118 save_to_db(ticket_holder)
119 data['ticket_holders'].append(ticket_holder.id)
120
121 # delete from the data.
122 del data['on_site_tickets']
123
[end of app/api/helpers/order.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/helpers/order.py b/app/api/helpers/order.py
--- a/app/api/helpers/order.py
+++ b/app/api/helpers/order.py
@@ -38,7 +38,7 @@
"""
if order and not order.paid_via and (override or (order.status == 'pending' and (
order.created_at +
- timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):
+ timedelta(minutes=order.event.order_expiry_time)) < datetime.now(timezone.utc))):
order.status = 'expired'
delete_related_attendees_for_order(order)
save_to_db(order)
| {"golden_diff": "diff --git a/app/api/helpers/order.py b/app/api/helpers/order.py\n--- a/app/api/helpers/order.py\n+++ b/app/api/helpers/order.py\n@@ -38,7 +38,7 @@\n \"\"\"\n if order and not order.paid_via and (override or (order.status == 'pending' and (\n order.created_at +\n- timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):\n+ timedelta(minutes=order.event.order_expiry_time)) < datetime.now(timezone.utc))):\n order.status = 'expired'\n delete_related_attendees_for_order(order)\n save_to_db(order)\n", "issue": "User order_expiry_time as the parameter to expire orders\n**Describe the bug**\r\nCurrently we are expiring orders after 10 minutes. We should change it to order_expiry_time parameter. \n", "before_files": [{"content": "import logging\nfrom datetime import timedelta, datetime, timezone\n\nfrom flask import render_template\n\nfrom app.api.helpers import ticketing\nfrom app.api.helpers.db import save_to_db, safe_query_without_soft_deleted_entries, get_count\nfrom app.api.helpers.exceptions import UnprocessableEntity, ConflictException\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.models import db\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\n\n\ndef delete_related_attendees_for_order(order):\n \"\"\"\n Delete the associated attendees of an order when it is cancelled/deleted/expired\n :param order: Order whose attendees have to be deleted.\n :return:\n \"\"\"\n for ticket_holder in order.ticket_holders:\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception as e:\n logging.error('DB Exception! %s' % e)\n db.session.rollback()\n\n\ndef set_expiry_for_order(order, override=False):\n \"\"\"\n Expire the order after the time slot(10 minutes) if the order is pending.\n Also expires the order if we want to expire an order regardless of the state and time.\n :param order: Order to be expired.\n :param override: flag to force expiry.\n :return:\n \"\"\"\n if order and not order.paid_via and (override or (order.status == 'pending' and (\n order.created_at +\n timedelta(minutes=ticketing.TicketingManager.get_order_expiry())) < datetime.now(timezone.utc))):\n order.status = 'expired'\n delete_related_attendees_for_order(order)\n save_to_db(order)\n return order\n\n\ndef create_pdf_tickets_for_holder(order):\n \"\"\"\n Create tickets for the holders of an order.\n :param order: The order for which to create tickets for.\n \"\"\"\n if order.status == 'completed':\n pdf = create_save_pdf(render_template('pdf/ticket_purchaser.html', order=order),\n UPLOAD_PATHS['pdf']['ticket_attendee'],\n dir_path='/static/uploads/pdf/tickets/')\n order.tickets_pdf_url = pdf\n\n for holder in order.ticket_holders:\n if (not holder.user) or holder.user.id != order.user_id:\n # holder is not the order buyer.\n pdf = create_save_pdf(render_template('pdf/ticket_attendee.html', order=order, holder=holder),\n UPLOAD_PATHS['pdf']['ticket_attendee'],\n dir_path='/static/uploads/pdf/tickets/')\n else:\n # holder is the order buyer.\n pdf = order.tickets_pdf_url\n holder.pdf_url = pdf\n save_to_db(holder)\n\n save_to_db(order)\n\n\ndef create_onsite_attendees_for_order(data):\n \"\"\"\n Creates on site ticket holders for an order and adds it into the request data.\n :param data: data initially passed in the POST request for order.\n :return:\n \"\"\"\n on_site_tickets = data.get('on_site_tickets')\n\n if not on_site_tickets:\n raise UnprocessableEntity({'pointer': 'data/attributes/on_site_tickets'}, 'on_site_tickets info missing')\n\n data['ticket_holders'] = []\n\n for on_site_ticket in on_site_tickets:\n ticket_id = on_site_ticket['id']\n quantity = int(on_site_ticket['quantity'])\n\n ticket = safe_query_without_soft_deleted_entries(db, Ticket, 'id', ticket_id, 'ticket_id')\n\n ticket_sold_count = get_count(db.session.query(TicketHolder.id).\n filter_by(ticket_id=int(ticket.id), deleted_at=None))\n\n # Check if the ticket is already sold out or not.\n if ticket_sold_count + quantity > ticket.quantity:\n # delete the already created attendees.\n for holder in data['ticket_holders']:\n ticket_holder = db.session.query(TicketHolder).filter(id == int(holder)).one()\n db.session.delete(ticket_holder)\n try:\n db.session.commit()\n except Exception as e:\n logging.error('DB Exception! %s' % e)\n db.session.rollback()\n\n raise ConflictException(\n {'pointer': '/data/attributes/on_site_tickets'},\n \"Ticket with id: {} already sold out. You can buy at most {} tickets\".format(ticket_id,\n ticket.quantity -\n ticket_sold_count)\n )\n\n for _ in range(1, quantity):\n ticket_holder = TicketHolder(firstname='onsite', lastname='attendee', email='[email protected]',\n ticket_id=ticket.id, event_id=data.get('event'))\n save_to_db(ticket_holder)\n data['ticket_holders'].append(ticket_holder.id)\n\n # delete from the data.\n del data['on_site_tickets']\n", "path": "app/api/helpers/order.py"}]} | 1,841 | 137 |
gh_patches_debug_18443 | rasdani/github-patches | git_diff | sunpy__sunpy-3398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add missing ASDF schemas for new coordinate frames in 1.1
Whoops
</issue>
<code>
[start of sunpy/io/special/asdf/tags/coordinates/frames.py]
1 import os
2 import glob
3
4 from astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType
5
6 import sunpy.coordinates
7
8 from ...types import SunPyType
9
10 __all__ = ['SunPyCoordType']
11
12
13 SCHEMA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
14 '..', '..',
15 'schemas',
16 'sunpy.org',
17 'sunpy'))
18
19
20 def _get_frames():
21 """
22 By reading the schema files, get the list of all the frames we can
23 save/load.
24 """
25 search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')
26 files = glob.glob(search)
27
28 names = []
29 for fpath in files:
30 path, fname = os.path.split(fpath)
31 frame, _ = fname.split('-')
32 exclude_schemas = []
33 if frame not in exclude_schemas:
34 names.append(frame)
35
36 return names
37
38
39 class SunPyCoordType(BaseCoordType, SunPyType):
40 _tag_prefix = "coordinates/frames/"
41 name = ["coordinates/frames/" + f for f in _get_frames()]
42 types = [
43 sunpy.coordinates.HeliographicCarrington,
44 sunpy.coordinates.HeliographicStonyhurst,
45 sunpy.coordinates.Heliocentric,
46 sunpy.coordinates.Helioprojective,
47 ]
48 requires = ['sunpy', 'astropy>=3.1']
49 version = "1.0.0"
50
51 @classmethod
52 def assert_equal(cls, old, new):
53 assert isinstance(new, type(old))
54
[end of sunpy/io/special/asdf/tags/coordinates/frames.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/io/special/asdf/tags/coordinates/frames.py b/sunpy/io/special/asdf/tags/coordinates/frames.py
--- a/sunpy/io/special/asdf/tags/coordinates/frames.py
+++ b/sunpy/io/special/asdf/tags/coordinates/frames.py
@@ -3,7 +3,9 @@
from astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType
-import sunpy.coordinates
+from sunpy.coordinates import frames
+
+sunpy_frames = list(map(lambda name: getattr(frames, name), frames.__all__))
from ...types import SunPyType
@@ -39,12 +41,7 @@
class SunPyCoordType(BaseCoordType, SunPyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
- types = [
- sunpy.coordinates.HeliographicCarrington,
- sunpy.coordinates.HeliographicStonyhurst,
- sunpy.coordinates.Heliocentric,
- sunpy.coordinates.Helioprojective,
- ]
+ types = sunpy_frames
requires = ['sunpy', 'astropy>=3.1']
version = "1.0.0"
| {"golden_diff": "diff --git a/sunpy/io/special/asdf/tags/coordinates/frames.py b/sunpy/io/special/asdf/tags/coordinates/frames.py\n--- a/sunpy/io/special/asdf/tags/coordinates/frames.py\n+++ b/sunpy/io/special/asdf/tags/coordinates/frames.py\n@@ -3,7 +3,9 @@\n \n from astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType\n \n-import sunpy.coordinates\n+from sunpy.coordinates import frames\n+\n+sunpy_frames = list(map(lambda name: getattr(frames, name), frames.__all__))\n \n from ...types import SunPyType\n \n@@ -39,12 +41,7 @@\n class SunPyCoordType(BaseCoordType, SunPyType):\n _tag_prefix = \"coordinates/frames/\"\n name = [\"coordinates/frames/\" + f for f in _get_frames()]\n- types = [\n- sunpy.coordinates.HeliographicCarrington,\n- sunpy.coordinates.HeliographicStonyhurst,\n- sunpy.coordinates.Heliocentric,\n- sunpy.coordinates.Helioprojective,\n- ]\n+ types = sunpy_frames\n requires = ['sunpy', 'astropy>=3.1']\n version = \"1.0.0\"\n", "issue": "Add missing ASDF schemas for new coordinate frames in 1.1\nWhoops\n", "before_files": [{"content": "import os\nimport glob\n\nfrom astropy.io.misc.asdf.tags.coordinates.frames import BaseCoordType\n\nimport sunpy.coordinates\n\nfrom ...types import SunPyType\n\n__all__ = ['SunPyCoordType']\n\n\nSCHEMA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', '..',\n 'schemas',\n 'sunpy.org',\n 'sunpy'))\n\n\ndef _get_frames():\n \"\"\"\n By reading the schema files, get the list of all the frames we can\n save/load.\n \"\"\"\n search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')\n files = glob.glob(search)\n\n names = []\n for fpath in files:\n path, fname = os.path.split(fpath)\n frame, _ = fname.split('-')\n exclude_schemas = []\n if frame not in exclude_schemas:\n names.append(frame)\n\n return names\n\n\nclass SunPyCoordType(BaseCoordType, SunPyType):\n _tag_prefix = \"coordinates/frames/\"\n name = [\"coordinates/frames/\" + f for f in _get_frames()]\n types = [\n sunpy.coordinates.HeliographicCarrington,\n sunpy.coordinates.HeliographicStonyhurst,\n sunpy.coordinates.Heliocentric,\n sunpy.coordinates.Helioprojective,\n ]\n requires = ['sunpy', 'astropy>=3.1']\n version = \"1.0.0\"\n\n @classmethod\n def assert_equal(cls, old, new):\n assert isinstance(new, type(old))\n", "path": "sunpy/io/special/asdf/tags/coordinates/frames.py"}]} | 1,006 | 275 |
gh_patches_debug_789 | rasdani/github-patches | git_diff | geopandas__geopandas-372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bbox filter from read_file doesn't take advantage of fiona filtering
In line: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L28
The function goes through the trouble of checking if `bbox` is not null, but just calls `f` in `from_features` just the same.
Line 28 just needs to be changed to the intended `f_filt` to return filtered results or non-filtered if no bbox is passed in.
</issue>
<code>
[start of geopandas/io/file.py]
1 import os
2
3 import fiona
4 import numpy as np
5 from shapely.geometry import mapping
6
7 from six import iteritems
8 from geopandas import GeoDataFrame
9
10
11 def read_file(filename, **kwargs):
12 """
13 Returns a GeoDataFrame from a file.
14
15 *filename* is either the absolute or relative path to the file to be
16 opened and *kwargs* are keyword args to be passed to the `open` method
17 in the fiona library when opening the file. For more information on
18 possible keywords, type: ``import fiona; help(fiona.open)``
19 """
20 bbox = kwargs.pop('bbox', None)
21 with fiona.open(filename, **kwargs) as f:
22 crs = f.crs
23 if bbox is not None:
24 assert len(bbox)==4
25 f_filt = f.filter(bbox=bbox)
26 else:
27 f_filt = f
28 gdf = GeoDataFrame.from_features(f, crs=crs)
29
30 return gdf
31
32
33 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
34 **kwargs):
35 """
36 Write this GeoDataFrame to an OGR data source
37
38 A dictionary of supported OGR providers is available via:
39 >>> import fiona
40 >>> fiona.supported_drivers
41
42 Parameters
43 ----------
44 df : GeoDataFrame to be written
45 filename : string
46 File path or file handle to write to.
47 driver : string, default 'ESRI Shapefile'
48 The OGR format driver used to write the vector file.
49 schema : dict, default None
50 If specified, the schema dictionary is passed to Fiona to
51 better control how the file is written. If None, GeoPandas
52 will determine the schema based on each column's dtype
53
54 The *kwargs* are passed to fiona.open and can be used to write
55 to multi-layer data, store data within archives (zip files), etc.
56 """
57 if schema is None:
58 schema = infer_schema(df)
59 filename = os.path.abspath(os.path.expanduser(filename))
60 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
61 schema=schema, **kwargs) as c:
62 for feature in df.iterfeatures():
63 c.write(feature)
64
65
66 def infer_schema(df):
67 try:
68 from collections import OrderedDict
69 except ImportError:
70 from ordereddict import OrderedDict
71
72 def convert_type(in_type):
73 if in_type == object:
74 return 'str'
75 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
76 if out_type == 'long':
77 out_type = 'int'
78 return out_type
79
80 properties = OrderedDict([
81 (col, convert_type(_type)) for col, _type in
82 zip(df.columns, df.dtypes) if col != df._geometry_column_name
83 ])
84
85 geom_type = _common_geom_type(df)
86 if not geom_type:
87 raise ValueError("Geometry column cannot contain mutiple "
88 "geometry types when writing to file.")
89
90 schema = {'geometry': geom_type, 'properties': properties}
91
92 return schema
93
94
95 def _common_geom_type(df):
96 # Need to check geom_types before we write to file...
97 # Some (most?) providers expect a single geometry type:
98 # Point, LineString, or Polygon
99 geom_types = df.geometry.geom_type.unique()
100
101 from os.path import commonprefix # To find longest common prefix
102 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse
103 if not geom_type:
104 geom_type = None
105
106 return geom_type
107
[end of geopandas/io/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -25,7 +25,7 @@
f_filt = f.filter(bbox=bbox)
else:
f_filt = f
- gdf = GeoDataFrame.from_features(f, crs=crs)
+ gdf = GeoDataFrame.from_features(f_filt, crs=crs)
return gdf
| {"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -25,7 +25,7 @@\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n- gdf = GeoDataFrame.from_features(f, crs=crs)\n+ gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n \n return gdf\n", "issue": "bbox filter from read_file doesn't take advantage of fiona filtering\nIn line: https://github.com/geopandas/geopandas/blob/master/geopandas/io/file.py#L28\n\nThe function goes through the trouble of checking if `bbox` is not null, but just calls `f` in `from_features` just the same.\n\nLine 28 just needs to be changed to the intended `f_filt` to return filtered results or non-filtered if no bbox is passed in.\n\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]} | 1,644 | 109 |
gh_patches_debug_18008 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google logins broken with django-allauth 0.62+
# Recipe
- Open incognito window (just in case it matters)
- Navigate to grand-challenge.org
- Click Third party auth -> Google to login

- Acknowledge that you are sent to a "third party" by clicking continue on the next page.

# Result

> Unexpected Error
No login possible.
@amickan reported that no sentry errors are being recorded. I cannot login, presumably many other people cannot login either.
</issue>
<code>
[start of app/grandchallenge/profiles/providers/gmail/views.py]
1 from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
2 from allauth.socialaccount.providers.oauth2.views import (
3 OAuth2CallbackView,
4 OAuth2LoginView,
5 )
6
7 from grandchallenge.profiles.providers.gmail.provider import GmailProvider
8
9
10 class GmailOAuth2Adapter(GoogleOAuth2Adapter):
11 provider_id = GmailProvider.id
12
13
14 oauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)
15 oauth2_callback = OAuth2CallbackView.adapter_view(GmailOAuth2Adapter)
16
[end of app/grandchallenge/profiles/providers/gmail/views.py]
[start of app/grandchallenge/profiles/providers/gmail/provider.py]
1 from allauth.socialaccount.providers.google.provider import GoogleProvider
2
3
4 class GmailProvider(GoogleProvider):
5 id = "gmail"
6 name = "Google"
7
8 def extract_uid(self, data):
9 return str(data["email"])
10
11
12 provider_classes = [GmailProvider]
13
[end of app/grandchallenge/profiles/providers/gmail/provider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/profiles/providers/gmail/provider.py b/app/grandchallenge/profiles/providers/gmail/provider.py
--- a/app/grandchallenge/profiles/providers/gmail/provider.py
+++ b/app/grandchallenge/profiles/providers/gmail/provider.py
@@ -1,9 +1,12 @@
from allauth.socialaccount.providers.google.provider import GoogleProvider
+from grandchallenge.profiles.providers.gmail.views import GmailOAuth2Adapter
+
class GmailProvider(GoogleProvider):
id = "gmail"
name = "Google"
+ oauth2_adapter_class = GmailOAuth2Adapter
def extract_uid(self, data):
return str(data["email"])
diff --git a/app/grandchallenge/profiles/providers/gmail/views.py b/app/grandchallenge/profiles/providers/gmail/views.py
--- a/app/grandchallenge/profiles/providers/gmail/views.py
+++ b/app/grandchallenge/profiles/providers/gmail/views.py
@@ -4,11 +4,9 @@
OAuth2LoginView,
)
-from grandchallenge.profiles.providers.gmail.provider import GmailProvider
-
class GmailOAuth2Adapter(GoogleOAuth2Adapter):
- provider_id = GmailProvider.id
+ provider_id = "gmail"
oauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)
| {"golden_diff": "diff --git a/app/grandchallenge/profiles/providers/gmail/provider.py b/app/grandchallenge/profiles/providers/gmail/provider.py\n--- a/app/grandchallenge/profiles/providers/gmail/provider.py\n+++ b/app/grandchallenge/profiles/providers/gmail/provider.py\n@@ -1,9 +1,12 @@\n from allauth.socialaccount.providers.google.provider import GoogleProvider\n \n+from grandchallenge.profiles.providers.gmail.views import GmailOAuth2Adapter\n+\n \n class GmailProvider(GoogleProvider):\n id = \"gmail\"\n name = \"Google\"\n+ oauth2_adapter_class = GmailOAuth2Adapter\n \n def extract_uid(self, data):\n return str(data[\"email\"])\ndiff --git a/app/grandchallenge/profiles/providers/gmail/views.py b/app/grandchallenge/profiles/providers/gmail/views.py\n--- a/app/grandchallenge/profiles/providers/gmail/views.py\n+++ b/app/grandchallenge/profiles/providers/gmail/views.py\n@@ -4,11 +4,9 @@\n OAuth2LoginView,\n )\n \n-from grandchallenge.profiles.providers.gmail.provider import GmailProvider\n-\n \n class GmailOAuth2Adapter(GoogleOAuth2Adapter):\n- provider_id = GmailProvider.id\n+ provider_id = \"gmail\"\n \n \n oauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)\n", "issue": "Google logins broken with django-allauth 0.62+\n# Recipe\r\n\r\n- Open incognito window (just in case it matters)\r\n- Navigate to grand-challenge.org\r\n- Click Third party auth -> Google to login\r\n \r\n\r\n\r\n- Acknowledge that you are sent to a \"third party\" by clicking continue on the next page.\r\n\r\n\r\n\r\n# Result\r\n\r\n\r\n\r\n> Unexpected Error\r\n\r\nNo login possible.\r\n\r\n@amickan reported that no sentry errors are being recorded. I cannot login, presumably many other people cannot login either.\r\n\n", "before_files": [{"content": "from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.views import (\n OAuth2CallbackView,\n OAuth2LoginView,\n)\n\nfrom grandchallenge.profiles.providers.gmail.provider import GmailProvider\n\n\nclass GmailOAuth2Adapter(GoogleOAuth2Adapter):\n provider_id = GmailProvider.id\n\n\noauth2_login = OAuth2LoginView.adapter_view(GmailOAuth2Adapter)\noauth2_callback = OAuth2CallbackView.adapter_view(GmailOAuth2Adapter)\n", "path": "app/grandchallenge/profiles/providers/gmail/views.py"}, {"content": "from allauth.socialaccount.providers.google.provider import GoogleProvider\n\n\nclass GmailProvider(GoogleProvider):\n id = \"gmail\"\n name = \"Google\"\n\n def extract_uid(self, data):\n return str(data[\"email\"])\n\n\nprovider_classes = [GmailProvider]\n", "path": "app/grandchallenge/profiles/providers/gmail/provider.py"}]} | 1,062 | 277 |
gh_patches_debug_16795 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[chatgpt] change critic input as state
> ## 📌 Checklist before creating the PR
> * [x] I have created an issue for this PR for traceability
> * [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
> * [ ] I have added relevant tags if possible for us to better distinguish different PRs
>
> ## 🚨 Issue number
> > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge
> > e.g. `fixed #1234`, `closed #1234`, `resolved #1234`
> > fixed #3042
>
> ## 📝 What does this PR do?
> > Summarize your work here.
> > if you have any plots/diagrams/screenshots/tables, please attach them here.
>
> This commit fix chatgpt critic input as state according to A2C RL algorithm.
>
> ## 💥 Checklist before requesting a review
> * [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
> * [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
> * [x] I have performed a self-review of my code
> * [ ] I have added thorough tests.
> * [ ] I have added docstrings for all the functions/methods I implemented
>
> ## ⭐️ Do you enjoy contributing to Colossal-AI?
> * [x] 🌝 Yes, I do.
> * [ ] 🌚 No, I don't.
>
> Tell us more if you don't enjoy contributing to Colossal-AI.
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/chatgpt/models/base/critic.py]
1 from typing import Optional
2
3 import torch
4 import torch.nn as nn
5
6 from ..lora import LoRAModule
7 from ..utils import masked_mean
8
9
10 class Critic(LoRAModule):
11 """
12 Critic model base class.
13
14 Args:
15 model (nn.Module): Critic model.
16 value_head (nn.Module): Value head to get value.
17 lora_rank (int): LoRA rank.
18 lora_train_bias (str): LoRA bias training mode.
19 """
20
21 def __init__(self,
22 model: nn.Module,
23 value_head: nn.Module,
24 lora_rank: int = 0,
25 lora_train_bias: str = 'none') -> None:
26
27 super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
28 self.model = model
29 self.value_head = value_head
30 self.convert_to_lora()
31
32 def forward(self,
33 sequences: torch.LongTensor,
34 action_mask: Optional[torch.Tensor] = None,
35 attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
36 outputs = self.model(sequences, attention_mask=attention_mask)
37 last_hidden_states = outputs['last_hidden_state']
38
39 values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]
40
41 if action_mask is not None:
42 num_actions = action_mask.size(1)
43 values = values[:, -num_actions:]
44 value = masked_mean(values, action_mask, dim=1)
45 return value
46 value = values.mean(dim=1).squeeze(1)
47 return value
48
[end of applications/ChatGPT/chatgpt/models/base/critic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/ChatGPT/chatgpt/models/base/critic.py b/applications/ChatGPT/chatgpt/models/base/critic.py
--- a/applications/ChatGPT/chatgpt/models/base/critic.py
+++ b/applications/ChatGPT/chatgpt/models/base/critic.py
@@ -36,12 +36,15 @@
outputs = self.model(sequences, attention_mask=attention_mask)
last_hidden_states = outputs['last_hidden_state']
- values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]
+ values = self.value_head(last_hidden_states).squeeze(-1)
if action_mask is not None:
num_actions = action_mask.size(1)
- values = values[:, -num_actions:]
- value = masked_mean(values, action_mask, dim=1)
+ prompt_mask = attention_mask[:, :-num_actions]
+ values = values[:, :-num_actions]
+ value = masked_mean(values, prompt_mask, dim=1)
return value
+
+ values = values[:, :-1]
value = values.mean(dim=1).squeeze(1)
return value
| {"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/models/base/critic.py b/applications/ChatGPT/chatgpt/models/base/critic.py\n--- a/applications/ChatGPT/chatgpt/models/base/critic.py\n+++ b/applications/ChatGPT/chatgpt/models/base/critic.py\n@@ -36,12 +36,15 @@\n outputs = self.model(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n \n- values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]\n+ values = self.value_head(last_hidden_states).squeeze(-1)\n \n if action_mask is not None:\n num_actions = action_mask.size(1)\n- values = values[:, -num_actions:]\n- value = masked_mean(values, action_mask, dim=1)\n+ prompt_mask = attention_mask[:, :-num_actions]\n+ values = values[:, :-num_actions]\n+ value = masked_mean(values, prompt_mask, dim=1)\n return value\n+\n+ values = values[:, :-1]\n value = values.mean(dim=1).squeeze(1)\n return value\n", "issue": "[chatgpt] change critic input as state\n> ## \ud83d\udccc Checklist before creating the PR\r\n> * [x] I have created an issue for this PR for traceability\r\n> * [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`\r\n> * [ ] I have added relevant tags if possible for us to better distinguish different PRs\r\n> \r\n> ## \ud83d\udea8 Issue number\r\n> > Link this PR to your issue with words like fixed to automatically close the linked issue upon merge\r\n> > e.g. `fixed #1234`, `closed #1234`, `resolved #1234`\r\n> > fixed #3042\r\n> \r\n> ## \ud83d\udcdd What does this PR do?\r\n> > Summarize your work here.\r\n> > if you have any plots/diagrams/screenshots/tables, please attach them here.\r\n> \r\n> This commit fix chatgpt critic input as state according to A2C RL algorithm.\r\n> \r\n> ## \ud83d\udca5 Checklist before requesting a review\r\n> * [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))\r\n> * [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible\r\n> * [x] I have performed a self-review of my code\r\n> * [ ] I have added thorough tests.\r\n> * [ ] I have added docstrings for all the functions/methods I implemented\r\n> \r\n> ## \u2b50\ufe0f Do you enjoy contributing to Colossal-AI?\r\n> * [x] \ud83c\udf1d Yes, I do.\r\n> * [ ] \ud83c\udf1a No, I don't.\r\n> \r\n> Tell us more if you don't enjoy contributing to Colossal-AI.\r\n\r\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom ..lora import LoRAModule\nfrom ..utils import masked_mean\n\n\nclass Critic(LoRAModule):\n \"\"\"\n Critic model base class.\n\n Args:\n model (nn.Module): Critic model.\n value_head (nn.Module): Value head to get value.\n lora_rank (int): LoRA rank.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n value_head: nn.Module,\n lora_rank: int = 0,\n lora_train_bias: str = 'none') -> None:\n\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n self.model = model\n self.value_head = value_head\n self.convert_to_lora()\n\n def forward(self,\n sequences: torch.LongTensor,\n action_mask: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n outputs = self.model(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n\n values = self.value_head(last_hidden_states).squeeze(-1)[:, :-1]\n\n if action_mask is not None:\n num_actions = action_mask.size(1)\n values = values[:, -num_actions:]\n value = masked_mean(values, action_mask, dim=1)\n return value\n value = values.mean(dim=1).squeeze(1)\n return value\n", "path": "applications/ChatGPT/chatgpt/models/base/critic.py"}]} | 1,405 | 255 |
gh_patches_debug_36284 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2966 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider albert_heijn is broken
During the global build at 2021-06-02-14-42-40, spider **albert_heijn** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/albert_heijn.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson))
</issue>
<code>
[start of locations/spiders/albert_heijn.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5
6 class AlbertHeijnSpider(scrapy.Spider):
7 name = 'albert_heijn'
8 item_attributes = {'brand': "Albert Heijn"}
9 allowed_domains = ['www.ah.nl']
10
11 def start_requests(self):
12 url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'
13 yield scrapy.Request(url, callback=self.parse)
14
15 def parse(self, response):
16 stores = json.loads(response.body_as_unicode())
17 for store in stores['stores']:
18 try:
19 phone_number = store['phoneNumber']
20 except:
21 phone_number = ""
22 yield GeojsonPointItem(
23 lat=store['lat'],
24 lon=store['lng'],
25 addr_full="%s %s" % (store['street'], store["housenr"]),
26 city=store['city'],
27 phone=phone_number,
28 state="",
29 postcode=store['zip'],
30 ref=store['no'],
31 country="Netherlands",
32 website="https://www.ah.nl/winkel/albert-heijn/%s/%s/%s" % (store['city'], store['street'], store['no'])
33 )
34
[end of locations/spiders/albert_heijn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/albert_heijn.py b/locations/spiders/albert_heijn.py
--- a/locations/spiders/albert_heijn.py
+++ b/locations/spiders/albert_heijn.py
@@ -1,33 +1,53 @@
# -*- coding: utf-8 -*-
+import json
+import re
+
import scrapy
+
+from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
-import json
-class AlbertHeijnSpider(scrapy.Spider):
- name = 'albert_heijn'
- item_attributes = {'brand': "Albert Heijn"}
- allowed_domains = ['www.ah.nl']
- def start_requests(self):
- url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'
- yield scrapy.Request(url, callback=self.parse)
+class AlbertHeijnSpider(scrapy.Spider):
+ name = "albert_heijn"
+ item_attributes = {"brand": "Albert Heijn", "brand_wikidata": "Q1653985"}
+ allowed_domains = ["www.ah.nl", "www.ah.be"]
+ start_urls = (
+ "https://www.ah.nl/sitemaps/entities/stores/stores.xml",
+ "https://www.ah.be/sitemaps/entities/stores/stores.xml",
+ )
def parse(self, response):
- stores = json.loads(response.body_as_unicode())
- for store in stores['stores']:
- try:
- phone_number = store['phoneNumber']
- except:
- phone_number = ""
- yield GeojsonPointItem(
- lat=store['lat'],
- lon=store['lng'],
- addr_full="%s %s" % (store['street'], store["housenr"]),
- city=store['city'],
- phone=phone_number,
- state="",
- postcode=store['zip'],
- ref=store['no'],
- country="Netherlands",
- website="https://www.ah.nl/winkel/albert-heijn/%s/%s/%s" % (store['city'], store['street'], store['no'])
- )
+ response.selector.remove_namespaces()
+ for url in response.xpath("//loc/text()").extract():
+ if re.search("/winkel/albert-heijn/", url):
+ yield scrapy.Request(url, callback=self.parse_store)
+
+ def parse_store(self, response):
+ for ldjson in response.xpath(
+ '//script[@type="application/ld+json"]/text()'
+ ).extract():
+ data = json.loads(ldjson)
+ if data["@type"] != "GroceryStore":
+ continue
+
+ opening_hours = OpeningHours()
+ for spec in data["openingHoursSpecification"]:
+ opening_hours.add_range(
+ spec["dayOfWeek"][:2], spec["opens"], spec["closes"]
+ )
+
+ properties = {
+ "ref": response.url,
+ "website": response.url,
+ "name": data["name"],
+ "phone": data["telephone"],
+ "lat": data["geo"]["latitude"],
+ "lon": data["geo"]["longitude"],
+ "addr_full": data["address"]["streetAddress"],
+ "city": data["address"]["addressLocality"],
+ "postcode": data["address"]["postalCode"],
+ "country": data["address"]["addressCountry"],
+ "opening_hours": opening_hours.as_opening_hours(),
+ }
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/albert_heijn.py b/locations/spiders/albert_heijn.py\n--- a/locations/spiders/albert_heijn.py\n+++ b/locations/spiders/albert_heijn.py\n@@ -1,33 +1,53 @@\n # -*- coding: utf-8 -*-\n+import json\n+import re\n+\n import scrapy\n+\n+from locations.hours import OpeningHours\n from locations.items import GeojsonPointItem\n-import json\n \n-class AlbertHeijnSpider(scrapy.Spider):\n- name = 'albert_heijn'\n- item_attributes = {'brand': \"Albert Heijn\"}\n- allowed_domains = ['www.ah.nl']\n \n- def start_requests(self):\n- url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'\n- yield scrapy.Request(url, callback=self.parse)\n+class AlbertHeijnSpider(scrapy.Spider):\n+ name = \"albert_heijn\"\n+ item_attributes = {\"brand\": \"Albert Heijn\", \"brand_wikidata\": \"Q1653985\"}\n+ allowed_domains = [\"www.ah.nl\", \"www.ah.be\"]\n+ start_urls = (\n+ \"https://www.ah.nl/sitemaps/entities/stores/stores.xml\",\n+ \"https://www.ah.be/sitemaps/entities/stores/stores.xml\",\n+ )\n \n def parse(self, response):\n- stores = json.loads(response.body_as_unicode())\n- for store in stores['stores']:\n- try:\n- phone_number = store['phoneNumber']\n- except:\n- phone_number = \"\"\n- yield GeojsonPointItem(\n- lat=store['lat'],\n- lon=store['lng'],\n- addr_full=\"%s %s\" % (store['street'], store[\"housenr\"]),\n- city=store['city'],\n- phone=phone_number,\n- state=\"\",\n- postcode=store['zip'],\n- ref=store['no'],\n- country=\"Netherlands\",\n- website=\"https://www.ah.nl/winkel/albert-heijn/%s/%s/%s\" % (store['city'], store['street'], store['no'])\n- )\n+ response.selector.remove_namespaces()\n+ for url in response.xpath(\"//loc/text()\").extract():\n+ if re.search(\"/winkel/albert-heijn/\", url):\n+ yield scrapy.Request(url, callback=self.parse_store)\n+\n+ def parse_store(self, response):\n+ for ldjson in response.xpath(\n+ '//script[@type=\"application/ld+json\"]/text()'\n+ ).extract():\n+ data = json.loads(ldjson)\n+ if data[\"@type\"] != \"GroceryStore\":\n+ continue\n+\n+ opening_hours = OpeningHours()\n+ for spec in data[\"openingHoursSpecification\"]:\n+ opening_hours.add_range(\n+ spec[\"dayOfWeek\"][:2], spec[\"opens\"], spec[\"closes\"]\n+ )\n+\n+ properties = {\n+ \"ref\": response.url,\n+ \"website\": response.url,\n+ \"name\": data[\"name\"],\n+ \"phone\": data[\"telephone\"],\n+ \"lat\": data[\"geo\"][\"latitude\"],\n+ \"lon\": data[\"geo\"][\"longitude\"],\n+ \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"city\": data[\"address\"][\"addressLocality\"],\n+ \"postcode\": data[\"address\"][\"postalCode\"],\n+ \"country\": data[\"address\"][\"addressCountry\"],\n+ \"opening_hours\": opening_hours.as_opening_hours(),\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider albert_heijn is broken\nDuring the global build at 2021-06-02-14-42-40, spider **albert_heijn** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/albert_heijn.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/albert_heijn.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AlbertHeijnSpider(scrapy.Spider):\n name = 'albert_heijn'\n item_attributes = {'brand': \"Albert Heijn\"}\n allowed_domains = ['www.ah.nl']\n\n def start_requests(self):\n url = 'https://www.ah.nl/data/winkelinformatie/winkels/json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n stores = json.loads(response.body_as_unicode())\n for store in stores['stores']:\n try:\n phone_number = store['phoneNumber']\n except:\n phone_number = \"\"\n yield GeojsonPointItem(\n lat=store['lat'],\n lon=store['lng'],\n addr_full=\"%s %s\" % (store['street'], store[\"housenr\"]),\n city=store['city'],\n phone=phone_number,\n state=\"\",\n postcode=store['zip'],\n ref=store['no'],\n country=\"Netherlands\",\n website=\"https://www.ah.nl/winkel/albert-heijn/%s/%s/%s\" % (store['city'], store['street'], store['no'])\n )\n", "path": "locations/spiders/albert_heijn.py"}]} | 1,056 | 783 |
gh_patches_debug_25605 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1878 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider tesla is broken
During the global build at 2021-05-26-14-42-23, spider **tesla** failed with **486 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tesla.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson))
</issue>
<code>
[start of locations/spiders/tesla.py]
1 # -*- coding: utf-8 -*-
2 import re
3 import scrapy
4 import urllib.parse
5 from locations.items import GeojsonPointItem
6
7
8 class TeslaSpider(scrapy.Spider):
9 name = "tesla"
10 item_attributes = { 'brand': "Tesla" }
11 allowed_domains = ['www.tesla.com']
12 start_urls = [
13 'https://www.tesla.com/findus/list',
14 ]
15 download_delay = 0.5
16 custom_settings = {
17 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
18 }
19
20 def parse(self, response):
21 # Only scrape stores and service centers
22 country_urls = response.xpath('//a[contains(@href,"stores") or contains(@href,"services")]/@href').extract()
23 for country_url in country_urls:
24 yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)
25
26 def parse_store_list(self, response):
27 store_urls = response.xpath('//a[@class="fn org url"]/@href').extract()
28 for store_url in store_urls:
29 yield scrapy.Request(response.urljoin(store_url), callback=self.parse_store)
30
31 def parse_store(self, response):
32 # Skip if "Coming Soon" - no content to capture yet
33 if response.xpath('//span[@class="coming-soon"]/text()').extract_first() == "Coming Soon":
34 pass
35 else:
36 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
37
38 # city, state, and zip do not have separate classes - contained together in locality class as text
39 name = response.xpath('normalize-space(//header/h1/text())').extract_first()
40 common_name = response.xpath('normalize-space(//span[@class="common-name"]//text())').extract_first()
41 street_address = response.xpath('normalize-space(//span[@class="street-address"]//text())').extract_first()
42 city_state_zip = response.xpath('normalize-space(//span[@class="locality"]//text())').extract_first()
43
44 if common_name and street_address and city_state_zip:
45 addr_full = common_name + ' ' + street_address + ', ' + city_state_zip
46 elif street_address and not city_state_zip:
47 addr_full = street_address
48 elif city_state_zip and not street_address:
49 addr_full = city_state_zip
50 elif street_address and city_state_zip:
51 addr_full = street_address + ', ' + city_state_zip
52
53 country_url = response.xpath('//header[@class="findus-list-header"]/a/@href').extract_first()
54 country = urllib.parse.unquote_plus(re.search(r'.+/(.+?)/?(?:\.html|$)', country_url).group(1))
55 phone = response.xpath('normalize-space(//span[@class="tel"]/span[2]/text())').extract_first()
56 location_type = re.search(r".+/(.+?)/(.+?)/?(?:\.html|$)", response.url).group(1)
57
58 # map link varies across store pages
59 if response.xpath('normalize-space(//a[contains(@href,"maps.google")]/@href)').extract_first():
60 map_link = response.xpath('normalize-space(//a[contains(@href,"maps.google")]/@href)').extract_first()
61 else:
62 map_link = response.xpath('normalize-space(//img[contains(@src,"maps.google")]/@src)').extract_first()
63
64 # extract coordinates from map link
65 if re.search(r'.+=([0-9.-]+),\s?([0-9.-]+)', map_link):
66 lat = re.search(r'.+=([0-9.-]+),\s?([0-9.-]+)', map_link).group(1)
67 lon = re.search(r'.+=([0-9.-]+),\s?([0-9.-]+)', map_link).group(2)
68 elif re.search(r'.+@([0-9.-]+),\s?([0-9.-]+)', map_link):
69 lat = re.search(r'.+@([0-9.-]+),\s?([0-9.-]+)', map_link).group(1)
70 lon = re.search(r'.+@([0-9.-]+),\s?([0-9.-]+)', map_link).group(2)
71 else:
72 lat = None
73 lon = None
74
75 properties = {
76 'ref': ref,
77 'name': name,
78 'addr_full': addr_full,
79 'country': country,
80 'phone': phone,
81 'website': response.url,
82 'lat': lat,
83 'lon': lon,
84 'extras':
85 {
86 'location_type': location_type # Is this a service center or store/gallery
87 }
88 }
89
90 yield GeojsonPointItem(**properties)
91
[end of locations/spiders/tesla.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/tesla.py b/locations/spiders/tesla.py
--- a/locations/spiders/tesla.py
+++ b/locations/spiders/tesla.py
@@ -19,7 +19,7 @@
def parse(self, response):
# Only scrape stores and service centers
- country_urls = response.xpath('//a[contains(@href,"stores") or contains(@href,"services")]/@href').extract()
+ country_urls = response.xpath('//a[contains(@href,"stores") or contains(@href,"services") or contains(@href,"superchargers")]/@href').extract()
for country_url in country_urls:
yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)
@@ -41,6 +41,7 @@
street_address = response.xpath('normalize-space(//span[@class="street-address"]//text())').extract_first()
city_state_zip = response.xpath('normalize-space(//span[@class="locality"]//text())').extract_first()
+ addr_full = ""
if common_name and street_address and city_state_zip:
addr_full = common_name + ' ' + street_address + ', ' + city_state_zip
elif street_address and not city_state_zip:
| {"golden_diff": "diff --git a/locations/spiders/tesla.py b/locations/spiders/tesla.py\n--- a/locations/spiders/tesla.py\n+++ b/locations/spiders/tesla.py\n@@ -19,7 +19,7 @@\n \n def parse(self, response):\n # Only scrape stores and service centers\n- country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\")]/@href').extract()\n+ country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\") or contains(@href,\"superchargers\")]/@href').extract()\n for country_url in country_urls:\n yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)\n \n@@ -41,6 +41,7 @@\n street_address = response.xpath('normalize-space(//span[@class=\"street-address\"]//text())').extract_first()\n city_state_zip = response.xpath('normalize-space(//span[@class=\"locality\"]//text())').extract_first()\n \n+ addr_full = \"\"\n if common_name and street_address and city_state_zip:\n addr_full = common_name + ' ' + street_address + ', ' + city_state_zip\n elif street_address and not city_state_zip:\n", "issue": "Spider tesla is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tesla** failed with **486 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tesla.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tesla.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\nimport scrapy\nimport urllib.parse\nfrom locations.items import GeojsonPointItem\n\n\nclass TeslaSpider(scrapy.Spider):\n name = \"tesla\"\n item_attributes = { 'brand': \"Tesla\" }\n allowed_domains = ['www.tesla.com']\n start_urls = [\n 'https://www.tesla.com/findus/list',\n ]\n download_delay = 0.5\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse(self, response):\n # Only scrape stores and service centers\n country_urls = response.xpath('//a[contains(@href,\"stores\") or contains(@href,\"services\")]/@href').extract()\n for country_url in country_urls:\n yield scrapy.Request(response.urljoin(country_url), callback=self.parse_store_list)\n\n def parse_store_list(self, response):\n store_urls = response.xpath('//a[@class=\"fn org url\"]/@href').extract()\n for store_url in store_urls:\n yield scrapy.Request(response.urljoin(store_url), callback=self.parse_store)\n\n def parse_store(self, response):\n # Skip if \"Coming Soon\" - no content to capture yet\n if response.xpath('//span[@class=\"coming-soon\"]/text()').extract_first() == \"Coming Soon\":\n pass\n else:\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # city, state, and zip do not have separate classes - contained together in locality class as text\n name = response.xpath('normalize-space(//header/h1/text())').extract_first()\n common_name = response.xpath('normalize-space(//span[@class=\"common-name\"]//text())').extract_first()\n street_address = response.xpath('normalize-space(//span[@class=\"street-address\"]//text())').extract_first()\n city_state_zip = response.xpath('normalize-space(//span[@class=\"locality\"]//text())').extract_first()\n\n if common_name and street_address and city_state_zip:\n addr_full = common_name + ' ' + street_address + ', ' + city_state_zip\n elif street_address and not city_state_zip:\n addr_full = street_address\n elif city_state_zip and not street_address:\n addr_full = city_state_zip\n elif street_address and city_state_zip:\n addr_full = street_address + ', ' + city_state_zip\n\n country_url = response.xpath('//header[@class=\"findus-list-header\"]/a/@href').extract_first()\n country = urllib.parse.unquote_plus(re.search(r'.+/(.+?)/?(?:\\.html|$)', country_url).group(1))\n phone = response.xpath('normalize-space(//span[@class=\"tel\"]/span[2]/text())').extract_first()\n location_type = re.search(r\".+/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).group(1)\n\n # map link varies across store pages\n if response.xpath('normalize-space(//a[contains(@href,\"maps.google\")]/@href)').extract_first():\n map_link = response.xpath('normalize-space(//a[contains(@href,\"maps.google\")]/@href)').extract_first()\n else:\n map_link = response.xpath('normalize-space(//img[contains(@src,\"maps.google\")]/@src)').extract_first()\n\n # extract coordinates from map link\n if re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link):\n lat = re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link).group(1)\n lon = re.search(r'.+=([0-9.-]+),\\s?([0-9.-]+)', map_link).group(2)\n elif re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link):\n lat = re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link).group(1)\n lon = re.search(r'.+@([0-9.-]+),\\s?([0-9.-]+)', map_link).group(2)\n else:\n lat = None\n lon = None\n\n properties = {\n 'ref': ref,\n 'name': name,\n 'addr_full': addr_full,\n 'country': country,\n 'phone': phone,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n 'extras':\n {\n 'location_type': location_type # Is this a service center or store/gallery\n }\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tesla.py"}]} | 1,971 | 276 |
gh_patches_debug_19241 | rasdani/github-patches | git_diff | Gallopsled__pwntools-2240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not overwrite global `bytes` in code or examples
It looks like there's a few places we overwrite `bytes` (the type identifier) with a local variable.
```
$ git grep -E -e '^ +bytes *=' -- '*.py'
pwnlib/commandline/disasm.py:81: bytes = disasm(dat, vma=safeeval.const(args.address), instructions=False, offset=False)
pwnlib/commandline/elfpatch.py:29: bytes = unhex(a.bytes)
pwnlib/elf/elf.py:195: bytes = 4
```
And a few cases we do it in tests, which could have cross-test impact if the global state isn't reset (hint: it isn't).
```
~/pwntools $ git grep -E -e '^ +>>> bytes *=' -- '*.py'
pwnlib/runner.py:42: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
pwnlib/runner.py:48: >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
pwnlib/runner.py:87: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
```
</issue>
<code>
[start of pwnlib/runner.py]
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import os
5 import tempfile
6
7 from pwnlib.context import LocalContext
8 from pwnlib.elf import ELF
9 from pwnlib.tubes.process import process
10
11 __all__ = ['run_assembly', 'run_shellcode', 'run_assembly_exitcode', 'run_shellcode_exitcode']
12
13 @LocalContext
14 def run_assembly(assembly):
15 """
16 Given an assembly listing, assemble and execute it.
17
18 Returns:
19
20 A :class:`pwnlib.tubes.process.process` tube to interact with the process.
21
22 Example:
23
24 >>> p = run_assembly('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
25 >>> p.wait_for_close()
26 >>> p.poll()
27 3
28
29 >>> p = run_assembly('mov r0, #12; mov r7, #1; svc #0', arch='arm')
30 >>> p.wait_for_close()
31 >>> p.poll()
32 12
33 """
34 return ELF.from_assembly(assembly).process()
35
36 @LocalContext
37 def run_shellcode(bytes, **kw):
38 """Given assembled machine code bytes, execute them.
39
40 Example:
41
42 >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
43 >>> p = run_shellcode(bytes)
44 >>> p.wait_for_close()
45 >>> p.poll()
46 3
47
48 >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
49 >>> p = run_shellcode(bytes, arch='arm')
50 >>> p.wait_for_close()
51 >>> p.poll()
52 12
53 """
54 return ELF.from_bytes(bytes, **kw).process()
55
56 @LocalContext
57 def run_assembly_exitcode(assembly):
58 """
59 Given an assembly listing, assemble and execute it, and wait for
60 the process to die.
61
62 Returns:
63
64 The exit code of the process.
65
66 Example:
67
68 >>> run_assembly_exitcode('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
69 3
70 """
71 p = run_assembly(assembly)
72 p.wait_for_close()
73 return p.poll()
74
75 @LocalContext
76 def run_shellcode_exitcode(bytes):
77 """
78 Given assembled machine code bytes, execute them, and wait for
79 the process to die.
80
81 Returns:
82
83 The exit code of the process.
84
85 Example:
86
87 >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
88 >>> run_shellcode_exitcode(bytes)
89 3
90 """
91 p = run_shellcode(bytes)
92 p.wait_for_close()
93 return p.poll()
94
[end of pwnlib/runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/runner.py b/pwnlib/runner.py
--- a/pwnlib/runner.py
+++ b/pwnlib/runner.py
@@ -39,14 +39,14 @@
Example:
- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
- >>> p = run_shellcode(bytes)
+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
+ >>> p = run_shellcode(insn_bytes)
>>> p.wait_for_close()
>>> p.poll()
3
- >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
- >>> p = run_shellcode(bytes, arch='arm')
+ >>> insn_bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')
+ >>> p = run_shellcode(insn_bytes, arch='arm')
>>> p.wait_for_close()
>>> p.poll()
12
@@ -84,8 +84,8 @@
Example:
- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
- >>> run_shellcode_exitcode(bytes)
+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')
+ >>> run_shellcode_exitcode(insn_bytes)
3
"""
p = run_shellcode(bytes)
| {"golden_diff": "diff --git a/pwnlib/runner.py b/pwnlib/runner.py\n--- a/pwnlib/runner.py\n+++ b/pwnlib/runner.py\n@@ -39,14 +39,14 @@\n \n Example:\n \n- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n- >>> p = run_shellcode(bytes)\n+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n+ >>> p = run_shellcode(insn_bytes)\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n \n- >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n- >>> p = run_shellcode(bytes, arch='arm')\n+ >>> insn_bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n+ >>> p = run_shellcode(insn_bytes, arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n@@ -84,8 +84,8 @@\n \n Example:\n \n- >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n- >>> run_shellcode_exitcode(bytes)\n+ >>> insn_bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n+ >>> run_shellcode_exitcode(insn_bytes)\n 3\n \"\"\"\n p = run_shellcode(bytes)\n", "issue": "Do not overwrite global `bytes` in code or examples\nIt looks like there's a few places we overwrite `bytes` (the type identifier) with a local variable.\r\n\r\n```\r\n$ git grep -E -e '^ +bytes *=' -- '*.py'\r\npwnlib/commandline/disasm.py:81: bytes = disasm(dat, vma=safeeval.const(args.address), instructions=False, offset=False)\r\npwnlib/commandline/elfpatch.py:29: bytes = unhex(a.bytes)\r\npwnlib/elf/elf.py:195: bytes = 4\r\n```\r\n\r\nAnd a few cases we do it in tests, which could have cross-test impact if the global state isn't reset (hint: it isn't).\r\n\r\n```\r\n~/pwntools $ git grep -E -e '^ +>>> bytes *=' -- '*.py'\r\npwnlib/runner.py:42: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\r\npwnlib/runner.py:48: >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\r\npwnlib/runner.py:87: >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport tempfile\n\nfrom pwnlib.context import LocalContext\nfrom pwnlib.elf import ELF\nfrom pwnlib.tubes.process import process\n\n__all__ = ['run_assembly', 'run_shellcode', 'run_assembly_exitcode', 'run_shellcode_exitcode']\n\n@LocalContext\ndef run_assembly(assembly):\n \"\"\"\n Given an assembly listing, assemble and execute it.\n\n Returns:\n\n A :class:`pwnlib.tubes.process.process` tube to interact with the process.\n\n Example:\n\n >>> p = run_assembly('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n\n >>> p = run_assembly('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n \"\"\"\n return ELF.from_assembly(assembly).process()\n\n@LocalContext\ndef run_shellcode(bytes, **kw):\n \"\"\"Given assembled machine code bytes, execute them.\n\n Example:\n\n >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> p = run_shellcode(bytes)\n >>> p.wait_for_close()\n >>> p.poll()\n 3\n\n >>> bytes = asm('mov r0, #12; mov r7, #1; svc #0', arch='arm')\n >>> p = run_shellcode(bytes, arch='arm')\n >>> p.wait_for_close()\n >>> p.poll()\n 12\n \"\"\"\n return ELF.from_bytes(bytes, **kw).process()\n\n@LocalContext\ndef run_assembly_exitcode(assembly):\n \"\"\"\n Given an assembly listing, assemble and execute it, and wait for\n the process to die.\n\n Returns:\n\n The exit code of the process.\n\n Example:\n\n >>> run_assembly_exitcode('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n 3\n \"\"\"\n p = run_assembly(assembly)\n p.wait_for_close()\n return p.poll()\n\n@LocalContext\ndef run_shellcode_exitcode(bytes):\n \"\"\"\n Given assembled machine code bytes, execute them, and wait for\n the process to die.\n\n Returns:\n\n The exit code of the process.\n\n Example:\n\n >>> bytes = asm('mov ebx, 3; mov eax, SYS_exit; int 0x80;')\n >>> run_shellcode_exitcode(bytes)\n 3\n \"\"\"\n p = run_shellcode(bytes)\n p.wait_for_close()\n return p.poll()\n", "path": "pwnlib/runner.py"}]} | 1,633 | 365 |
gh_patches_debug_35111 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2854 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
King Island: battery never seems to discharge
I've been keeping an eye on AUS-TAS-KI since it was added to the map. Charging works fine, discharging doesn't show up.
</issue>
<code>
[start of parsers/AUS_TAS_KI.py]
1 # Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456
2 # Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636
3 # A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island
4 # As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html
5 # About the data, the feed we get seems to be counters with a 2 seconds interval.
6 # That means that if we fetch these counters every 15 minutes, we only are reading "instantaneous" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.
7
8 import asyncio
9 import json
10 import logging
11 import arrow
12 from signalr import Connection
13 from requests import Session
14
15 class SignalR:
16 def __init__(self, url):
17 self.url = url
18
19 def update_res(self, msg):
20 if (msg != {}):
21 self.res = msg
22
23 def get_value(self, hub, method):
24 self.res = {}
25 with Session() as session:
26 #create a connection
27 connection = Connection(self.url, session)
28 chat = connection.register_hub(hub)
29 chat.client.on(method, self.update_res)
30 connection.start()
31 connection.wait(3)
32 connection.close()
33 return self.res
34
35 def parse_payload(logger, payload):
36 technologies_parsed = {}
37 if not 'technologies' in payload:
38 raise KeyError(
39 f"No 'technologies' in payload\n"
40 f"serie : {json.dumps(payload)}"
41 )
42 else:
43 logger.debug(f"serie : {json.dumps(payload)}")
44 for technology in payload['technologies']:
45 assert technology['unit'] == 'kW'
46 # The upstream API gives us kW, we need MW
47 technologies_parsed[technology['id']] = int(technology['value'])/1000
48 logger.debug(f"production : {json.dumps(technologies_parsed)}")
49
50 biodiesel_percent = payload['biodiesel']['percent']
51
52 return technologies_parsed, biodiesel_percent
53
54 # Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid
55 def format_storage_techs(technologies_parsed):
56 storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']
57 battery_production = storage_techs if storage_techs > 0 else 0
58 battery_storage = storage_techs if storage_techs < 0 else 0
59
60 return battery_production, battery_storage
61
62 def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
63
64 if target_datetime is not None:
65 raise NotImplementedError('The datasource currently implemented is only real time')
66
67 payload = SignalR("https://data.ajenti.com.au/live/signalr").get_value("TagHub", "Dashboard")
68 technologies_parsed, biodiesel_percent = parse_payload(logger, payload)
69 battery_production, battery_storage = format_storage_techs(technologies_parsed)
70 return {
71 'zoneKey': zone_key,
72 'datetime': arrow.now(tz='Australia/Currie').datetime,
73 'production': {
74 'battery discharge': battery_production,
75 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,
76 'coal': 0,
77 'gas': 0,
78 'hydro': 0,
79 'nuclear': 0,
80 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,
81 'solar': technologies_parsed['solar'],
82 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption
83 'geothermal': 0,
84 'unknown': 0
85 },
86 'storage': {
87 'battery': battery_storage*-1
88 },
89 'source': 'https://data.ajenti.com.au/KIREIP/index.html'
90 }
91
92 if __name__ == '__main__':
93 print(fetch_production())
94
[end of parsers/AUS_TAS_KI.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/AUS_TAS_KI.py b/parsers/AUS_TAS_KI.py
--- a/parsers/AUS_TAS_KI.py
+++ b/parsers/AUS_TAS_KI.py
@@ -52,12 +52,10 @@
return technologies_parsed, biodiesel_percent
# Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid
-def format_storage_techs(technologies_parsed):
+def sum_storage_techs(technologies_parsed):
storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']
- battery_production = storage_techs if storage_techs > 0 else 0
- battery_storage = storage_techs if storage_techs < 0 else 0
- return battery_production, battery_storage
+ return storage_techs
def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
@@ -66,12 +64,11 @@
payload = SignalR("https://data.ajenti.com.au/live/signalr").get_value("TagHub", "Dashboard")
technologies_parsed, biodiesel_percent = parse_payload(logger, payload)
- battery_production, battery_storage = format_storage_techs(technologies_parsed)
+ storage_techs = sum_storage_techs(technologies_parsed)
return {
'zoneKey': zone_key,
'datetime': arrow.now(tz='Australia/Currie').datetime,
'production': {
- 'battery discharge': battery_production,
'biomass': technologies_parsed['diesel']*biodiesel_percent/100,
'coal': 0,
'gas': 0,
@@ -84,9 +81,9 @@
'unknown': 0
},
'storage': {
- 'battery': battery_storage*-1
+ 'battery': storage_techs*-1 #Somewhat counterintuitively,to ElectricityMap positive means charging and negative means discharging
},
- 'source': 'https://data.ajenti.com.au/KIREIP/index.html'
+ 'source': 'https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island' #Iframe: https://data.ajenti.com.au/KIREIP/index.html
}
if __name__ == '__main__':
| {"golden_diff": "diff --git a/parsers/AUS_TAS_KI.py b/parsers/AUS_TAS_KI.py\n--- a/parsers/AUS_TAS_KI.py\n+++ b/parsers/AUS_TAS_KI.py\n@@ -52,12 +52,10 @@\n return technologies_parsed, biodiesel_percent\n \n # Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid\n-def format_storage_techs(technologies_parsed):\n+def sum_storage_techs(technologies_parsed):\n storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']\n- battery_production = storage_techs if storage_techs > 0 else 0\n- battery_storage = storage_techs if storage_techs < 0 else 0\n \n- return battery_production, battery_storage\n+ return storage_techs\n \n def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n \n@@ -66,12 +64,11 @@\n \n payload = SignalR(\"https://data.ajenti.com.au/live/signalr\").get_value(\"TagHub\", \"Dashboard\")\n technologies_parsed, biodiesel_percent = parse_payload(logger, payload)\n- battery_production, battery_storage = format_storage_techs(technologies_parsed)\n+ storage_techs = sum_storage_techs(technologies_parsed)\n return {\n 'zoneKey': zone_key,\n 'datetime': arrow.now(tz='Australia/Currie').datetime,\n 'production': {\n- 'battery discharge': battery_production,\n 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,\n 'coal': 0,\n 'gas': 0,\n@@ -84,9 +81,9 @@\n 'unknown': 0\n },\n 'storage': {\n- 'battery': battery_storage*-1\n+ 'battery': storage_techs*-1 #Somewhat counterintuitively,to ElectricityMap positive means charging and negative means discharging\n },\n- 'source': 'https://data.ajenti.com.au/KIREIP/index.html'\n+ 'source': 'https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island' #Iframe: https://data.ajenti.com.au/KIREIP/index.html\n }\n \n if __name__ == '__main__':\n", "issue": "King Island: battery never seems to discharge \nI've been keeping an eye on AUS-TAS-KI since it was added to the map. Charging works fine, discharging doesn't show up.\n", "before_files": [{"content": "# Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456\n# Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636\n# A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island\n# As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html\n# About the data, the feed we get seems to be counters with a 2 seconds interval.\n# That means that if we fetch these counters every 15 minutes, we only are reading \"instantaneous\" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.\n\nimport asyncio\nimport json\nimport logging\nimport arrow\nfrom signalr import Connection\nfrom requests import Session\n\nclass SignalR:\n def __init__(self, url):\n self.url = url\n \n def update_res(self, msg):\n if (msg != {}):\n self.res = msg\n\n def get_value(self, hub, method):\n self.res = {}\n with Session() as session:\n #create a connection\n connection = Connection(self.url, session)\n chat = connection.register_hub(hub)\n chat.client.on(method, self.update_res)\n connection.start()\n connection.wait(3)\n connection.close()\n return self.res\n \ndef parse_payload(logger, payload):\n technologies_parsed = {}\n if not 'technologies' in payload:\n raise KeyError(\n f\"No 'technologies' in payload\\n\"\n f\"serie : {json.dumps(payload)}\"\n )\n else:\n logger.debug(f\"serie : {json.dumps(payload)}\")\n for technology in payload['technologies']:\n assert technology['unit'] == 'kW'\n # The upstream API gives us kW, we need MW\n technologies_parsed[technology['id']] = int(technology['value'])/1000\n logger.debug(f\"production : {json.dumps(technologies_parsed)}\")\n\n biodiesel_percent = payload['biodiesel']['percent']\n\n return technologies_parsed, biodiesel_percent\n\n# Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid\ndef format_storage_techs(technologies_parsed):\n storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']\n battery_production = storage_techs if storage_techs > 0 else 0\n battery_storage = storage_techs if storage_techs < 0 else 0\n\n return battery_production, battery_storage\n\ndef fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n\n if target_datetime is not None:\n raise NotImplementedError('The datasource currently implemented is only real time')\n \n payload = SignalR(\"https://data.ajenti.com.au/live/signalr\").get_value(\"TagHub\", \"Dashboard\")\n technologies_parsed, biodiesel_percent = parse_payload(logger, payload)\n battery_production, battery_storage = format_storage_techs(technologies_parsed)\n return {\n 'zoneKey': zone_key,\n 'datetime': arrow.now(tz='Australia/Currie').datetime,\n 'production': {\n 'battery discharge': battery_production,\n 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,\n 'coal': 0,\n 'gas': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,\n 'solar': technologies_parsed['solar'],\n 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption\n 'geothermal': 0,\n 'unknown': 0\n },\n 'storage': {\n 'battery': battery_storage*-1\n },\n 'source': 'https://data.ajenti.com.au/KIREIP/index.html'\n }\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AUS_TAS_KI.py"}]} | 1,727 | 541 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.