problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_38716
rasdani/github-patches
git_diff
psychopy__psychopy-1542
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> no documentation for trialHandler.data.addDataType The TrialHandler.py demo uses the function addDataType: ``` trials.data.addDataType('choice') # this will help store things with the stimuli trials.data.addDataType('RT') # add as many types as you like ``` But I haven't been able to find any documentation for addDataType in the psychopy.data page (http://www.psychopy.org/api/data.html). (I'd like to know what it does) </issue> <code> [start of psychopy/demos/coder/experiment control/TrialHandler.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 Demo of TrialHandler 6 """ 7 8 from __future__ import division 9 from __future__ import print_function 10 11 from builtins import range 12 from random import random 13 from psychopy import data 14 15 # create your list of stimuli 16 # NB as of version 1.62 you could simply import an excel spreadsheet with this 17 # using data.importConditions('someFile.xlsx') 18 stimList = [] 19 for ori in range(90, 180, 30): 20 for sf in [0.5, 1.0, 2.0]: 21 # append a python 'dictionary' to the list 22 stimList.append({'sf':sf, 'ori':ori}) 23 24 # organize them with the trial handler 25 trials = data.TrialHandler(stimList, 10, extraInfo= {'participant':"Nobody", 'session':1}) 26 trials.data.addDataType('choice') # this will help store things with the stimuli 27 trials.data.addDataType('RT') # add as many types as you like 28 29 # run the experiment 30 nDone = 0 31 for thisTrial in trials: # handler can act like a for loop 32 # simulate some data 33 thisReactionTime = random() + float(thisTrial['sf']) / 2.0 34 thisChoice = round(random()) 35 trials.data.add('RT', thisReactionTime) # add the data to our set 36 trials.data.add('choice', thisChoice) 37 nDone += 1 # just for a quick reference 38 39 msg = 'trial %i had position %s in the list (sf=%.1f)' 40 print(msg % (nDone, trials.thisIndex, thisTrial['sf'])) 41 42 # after the experiment 43 print('\n') 44 trials.printAsText(stimOut=['sf', 'ori'], # write summary data to screen 45 dataOut=['RT_mean', 'RT_std', 'choice_raw']) 46 trials.saveAsText(fileName='testData', # also write summary data to a text file 47 stimOut=['sf', 'ori'], 48 dataOut=['RT_mean', 'RT_std', 'choice_raw']) 49 trials.saveAsExcel(fileName='testData', # ...or an xlsx file (which supports sheets) 50 sheetName = 'rawData', 51 stimOut=['sf', 'ori'], 52 dataOut=['RT_mean', 'RT_std', 'choice_raw']) 53 trials.saveAsPickle(fileName = 'testData') # this saves a copy of the whole object 54 df = trials.saveAsWideText("testDataWide.txt") # wide is useful for analysis with R or SPSS. Also returns dataframe df 55 56 # The contents of this file are in the public domain. 57 [end of psychopy/demos/coder/experiment control/TrialHandler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/psychopy/demos/coder/experiment control/TrialHandler.py b/psychopy/demos/coder/experiment control/TrialHandler.py --- a/psychopy/demos/coder/experiment control/TrialHandler.py +++ b/psychopy/demos/coder/experiment control/TrialHandler.py @@ -3,6 +3,9 @@ """ Demo of TrialHandler + +The contents of this file are in the public domain. + """ from __future__ import division @@ -10,6 +13,7 @@ from builtins import range from random import random + from psychopy import data # create your list of stimuli @@ -19,12 +23,11 @@ for ori in range(90, 180, 30): for sf in [0.5, 1.0, 2.0]: # append a python 'dictionary' to the list - stimList.append({'sf':sf, 'ori':ori}) + stimList.append({'sf': sf, 'ori': ori}) # organize them with the trial handler -trials = data.TrialHandler(stimList, 10, extraInfo= {'participant':"Nobody", 'session':1}) -trials.data.addDataType('choice') # this will help store things with the stimuli -trials.data.addDataType('RT') # add as many types as you like +trials = data.TrialHandler(stimList, 10, + extraInfo={'participant': "Nobody", 'session': 1}) # run the experiment nDone = 0 @@ -39,18 +42,27 @@ msg = 'trial %i had position %s in the list (sf=%.1f)' print(msg % (nDone, trials.thisIndex, thisTrial['sf'])) -# after the experiment +# After the experiment, print a new line print('\n') -trials.printAsText(stimOut=['sf', 'ori'], # write summary data to screen - dataOut=['RT_mean', 'RT_std', 'choice_raw']) -trials.saveAsText(fileName='testData', # also write summary data to a text file - stimOut=['sf', 'ori'], - dataOut=['RT_mean', 'RT_std', 'choice_raw']) -trials.saveAsExcel(fileName='testData', # ...or an xlsx file (which supports sheets) - sheetName = 'rawData', + +# Write summary data to screen +trials.printAsText(stimOut=['sf', 'ori'], + dataOut=['RT_mean', 'RT_std', 'choice_raw']) + +# Write summary data to a text file ... +trials.saveAsText(fileName='testData', stimOut=['sf', 'ori'], dataOut=['RT_mean', 'RT_std', 'choice_raw']) -trials.saveAsPickle(fileName = 'testData') # this saves a copy of the whole object -df = trials.saveAsWideText("testDataWide.txt") # wide is useful for analysis with R or SPSS. Also returns dataframe df -# The contents of this file are in the public domain. +# ... or an xlsx file (which supports sheets) +trials.saveAsExcel(fileName='testData', + sheetName='rawData', + stimOut=['sf', 'ori'], + dataOut=['RT_mean', 'RT_std', 'choice_raw']) + +# Save a copy of the whole TrialHandler object, which can be reloaded later to +# re-create the experiment. +trials.saveAsPickle(fileName='testData') + +# Wide format is useful for analysis with R or SPSS. +df = trials.saveAsWideText('testDataWide.txt')
{"golden_diff": "diff --git a/psychopy/demos/coder/experiment control/TrialHandler.py b/psychopy/demos/coder/experiment control/TrialHandler.py\n--- a/psychopy/demos/coder/experiment control/TrialHandler.py\t\n+++ b/psychopy/demos/coder/experiment control/TrialHandler.py\t\n@@ -3,6 +3,9 @@\n \n \"\"\"\n Demo of TrialHandler\n+\n+The contents of this file are in the public domain.\n+\n \"\"\"\n \n from __future__ import division\n@@ -10,6 +13,7 @@\n \n from builtins import range\n from random import random\n+\n from psychopy import data\n \n # create your list of stimuli\n@@ -19,12 +23,11 @@\n for ori in range(90, 180, 30):\n for sf in [0.5, 1.0, 2.0]:\n # append a python 'dictionary' to the list\n- stimList.append({'sf':sf, 'ori':ori})\n+ stimList.append({'sf': sf, 'ori': ori})\n \n # organize them with the trial handler\n-trials = data.TrialHandler(stimList, 10, extraInfo= {'participant':\"Nobody\", 'session':1})\n-trials.data.addDataType('choice') # this will help store things with the stimuli\n-trials.data.addDataType('RT') # add as many types as you like\n+trials = data.TrialHandler(stimList, 10,\n+ extraInfo={'participant': \"Nobody\", 'session': 1})\n \n # run the experiment\n nDone = 0\n@@ -39,18 +42,27 @@\n msg = 'trial %i had position %s in the list (sf=%.1f)'\n print(msg % (nDone, trials.thisIndex, thisTrial['sf']))\n \n-# after the experiment\n+# After the experiment, print a new line\n print('\\n')\n-trials.printAsText(stimOut=['sf', 'ori'], # write summary data to screen\n- dataOut=['RT_mean', 'RT_std', 'choice_raw'])\n-trials.saveAsText(fileName='testData', # also write summary data to a text file\n- stimOut=['sf', 'ori'],\n- dataOut=['RT_mean', 'RT_std', 'choice_raw'])\n-trials.saveAsExcel(fileName='testData', # ...or an xlsx file (which supports sheets)\n- sheetName = 'rawData',\n+\n+# Write summary data to screen\n+trials.printAsText(stimOut=['sf', 'ori'],\n+ dataOut=['RT_mean', 'RT_std', 'choice_raw'])\n+\n+# Write summary data to a text file ...\n+trials.saveAsText(fileName='testData',\n stimOut=['sf', 'ori'],\n dataOut=['RT_mean', 'RT_std', 'choice_raw'])\n-trials.saveAsPickle(fileName = 'testData') # this saves a copy of the whole object\n-df = trials.saveAsWideText(\"testDataWide.txt\") # wide is useful for analysis with R or SPSS. Also returns dataframe df\n \n-# The contents of this file are in the public domain.\n+# ... or an xlsx file (which supports sheets)\n+trials.saveAsExcel(fileName='testData',\n+ sheetName='rawData',\n+ stimOut=['sf', 'ori'],\n+ dataOut=['RT_mean', 'RT_std', 'choice_raw'])\n+\n+# Save a copy of the whole TrialHandler object, which can be reloaded later to\n+# re-create the experiment.\n+trials.saveAsPickle(fileName='testData')\n+\n+# Wide format is useful for analysis with R or SPSS.\n+df = trials.saveAsWideText('testDataWide.txt')\n", "issue": "no documentation for trialHandler.data.addDataType\nThe TrialHandler.py demo uses the function addDataType:\r\n\r\n```\r\ntrials.data.addDataType('choice') # this will help store things with the stimuli\r\ntrials.data.addDataType('RT') # add as many types as you like\r\n```\r\n\r\nBut I haven't been able to find any documentation for addDataType in the psychopy.data page (http://www.psychopy.org/api/data.html). (I'd like to know what it does)\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDemo of TrialHandler\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom builtins import range\nfrom random import random\nfrom psychopy import data\n\n# create your list of stimuli\n# NB as of version 1.62 you could simply import an excel spreadsheet with this\n# using data.importConditions('someFile.xlsx')\nstimList = []\nfor ori in range(90, 180, 30):\n for sf in [0.5, 1.0, 2.0]:\n # append a python 'dictionary' to the list\n stimList.append({'sf':sf, 'ori':ori})\n\n# organize them with the trial handler\ntrials = data.TrialHandler(stimList, 10, extraInfo= {'participant':\"Nobody\", 'session':1})\ntrials.data.addDataType('choice') # this will help store things with the stimuli\ntrials.data.addDataType('RT') # add as many types as you like\n\n# run the experiment\nnDone = 0\nfor thisTrial in trials: # handler can act like a for loop\n # simulate some data\n thisReactionTime = random() + float(thisTrial['sf']) / 2.0\n thisChoice = round(random())\n trials.data.add('RT', thisReactionTime) # add the data to our set\n trials.data.add('choice', thisChoice)\n nDone += 1 # just for a quick reference\n\n msg = 'trial %i had position %s in the list (sf=%.1f)'\n print(msg % (nDone, trials.thisIndex, thisTrial['sf']))\n\n# after the experiment\nprint('\\n')\ntrials.printAsText(stimOut=['sf', 'ori'], # write summary data to screen\n dataOut=['RT_mean', 'RT_std', 'choice_raw'])\ntrials.saveAsText(fileName='testData', # also write summary data to a text file\n stimOut=['sf', 'ori'],\n dataOut=['RT_mean', 'RT_std', 'choice_raw'])\ntrials.saveAsExcel(fileName='testData', # ...or an xlsx file (which supports sheets)\n sheetName = 'rawData',\n stimOut=['sf', 'ori'],\n dataOut=['RT_mean', 'RT_std', 'choice_raw'])\ntrials.saveAsPickle(fileName = 'testData') # this saves a copy of the whole object\ndf = trials.saveAsWideText(\"testDataWide.txt\") # wide is useful for analysis with R or SPSS. Also returns dataframe df\n\n# The contents of this file are in the public domain.\n", "path": "psychopy/demos/coder/experiment control/TrialHandler.py"}]}
1,345
821
gh_patches_debug_2575
rasdani/github-patches
git_diff
coala__coala-3608
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove call_without_output from Shell.py L7 This line was used by the requirement classes, it isnt used anymore as they use sarge, so it should be removed. difficulty/newcomer </issue> <code> [start of coalib/misc/Shell.py] 1 from contextlib import contextmanager 2 import functools 3 import shlex 4 from subprocess import PIPE, Popen, call, DEVNULL 5 6 7 call_without_output = functools.partial(call, stdout=DEVNULL, stderr=DEVNULL) 8 """ 9 Uses subprocess.call to execute a command, but suppresses the output and 10 the errors. 11 """ 12 13 14 @contextmanager 15 def run_interactive_shell_command(command, **kwargs): 16 """ 17 Runs a single command in shell and provides stdout, stderr and stdin 18 streams. 19 20 This function creates a context manager that sets up the process (using 21 ``subprocess.Popen()``), returns to caller and waits for process to exit on 22 leaving. 23 24 By default the process is opened in ``universal_newlines`` mode and creates 25 pipes for all streams (stdout, stderr and stdin) using ``subprocess.PIPE`` 26 special value. These pipes are closed automatically, so if you want to get 27 the contents of the streams you should retrieve them before the context 28 manager exits. 29 30 >>> with run_interactive_shell_command(["echo", "TEXT"]) as p: 31 ... stdout = p.stdout 32 ... stdout_text = stdout.read() 33 >>> stdout_text 34 'TEXT\\n' 35 >>> stdout.closed 36 True 37 38 Custom streams provided are not closed except of ``subprocess.PIPE``. 39 40 >>> from tempfile import TemporaryFile 41 >>> stream = TemporaryFile() 42 >>> with run_interactive_shell_command(["echo", "TEXT"], 43 ... stdout=stream) as p: 44 ... stderr = p.stderr 45 >>> stderr.closed 46 True 47 >>> stream.closed 48 False 49 50 :param command: The command to run on shell. This parameter can either 51 be a sequence of arguments that are directly passed to 52 the process or a string. A string gets splitted beforehand 53 using ``shlex.split()``. If providing ``shell=True`` as a 54 keyword-argument, no ``shlex.split()`` is performed and the 55 command string goes directly to ``subprocess.Popen()``. 56 :param kwargs: Additional keyword arguments to pass to 57 ``subprocess.Popen`` that are used to spawn the process. 58 :return: A context manager yielding the process started from the 59 command. 60 """ 61 if not kwargs.get('shell', False) and isinstance(command, str): 62 command = shlex.split(command) 63 64 args = {'stdout': PIPE, 65 'stderr': PIPE, 66 'stdin': PIPE, 67 'universal_newlines': True} 68 args.update(kwargs) 69 70 process = Popen(command, **args) 71 try: 72 yield process 73 finally: 74 if args['stdout'] is PIPE: 75 process.stdout.close() 76 if args['stderr'] is PIPE: 77 process.stderr.close() 78 if args['stdin'] is PIPE: 79 process.stdin.close() 80 81 process.wait() 82 83 84 def run_shell_command(command, stdin=None, **kwargs): 85 """ 86 Runs a single command in shell and returns the read stdout and stderr data. 87 88 This function waits for the process (created using ``subprocess.Popen()``) 89 to exit. Effectively it wraps ``run_interactive_shell_command()`` and uses 90 ``communicate()`` on the process. 91 92 See also ``run_interactive_shell_command()``. 93 94 :param command: The command to run on shell. This parameter can either 95 be a sequence of arguments that are directly passed to 96 the process or a string. A string gets splitted beforehand 97 using ``shlex.split()``. 98 :param stdin: Initial input to send to the process. 99 :param kwargs: Additional keyword arguments to pass to 100 ``subprocess.Popen`` that is used to spawn the process. 101 :return: A tuple with ``(stdoutstring, stderrstring)``. 102 """ 103 with run_interactive_shell_command(command, **kwargs) as p: 104 ret = p.communicate(stdin) 105 return ret 106 107 108 def get_shell_type(): # pragma: no cover 109 """ 110 Finds the current shell type based on the outputs of common pre-defined 111 variables in them. This is useful to identify which sort of escaping 112 is required for strings. 113 114 :return: The shell type. This can be either "powershell" if Windows 115 Powershell is detected, "cmd" if command prompt is been 116 detected or "sh" if it's neither of these. 117 """ 118 out = run_shell_command('echo $host.name', shell=True)[0] 119 if out.strip() == 'ConsoleHost': 120 return 'powershell' 121 out = run_shell_command('echo $0', shell=True)[0] 122 if out.strip() == '$0': 123 return 'cmd' 124 return 'sh' 125 [end of coalib/misc/Shell.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/coalib/misc/Shell.py b/coalib/misc/Shell.py --- a/coalib/misc/Shell.py +++ b/coalib/misc/Shell.py @@ -1,14 +1,6 @@ from contextlib import contextmanager -import functools import shlex -from subprocess import PIPE, Popen, call, DEVNULL - - -call_without_output = functools.partial(call, stdout=DEVNULL, stderr=DEVNULL) -""" -Uses subprocess.call to execute a command, but suppresses the output and -the errors. -""" +from subprocess import PIPE, Popen @contextmanager
{"golden_diff": "diff --git a/coalib/misc/Shell.py b/coalib/misc/Shell.py\n--- a/coalib/misc/Shell.py\n+++ b/coalib/misc/Shell.py\n@@ -1,14 +1,6 @@\n from contextlib import contextmanager\n-import functools\n import shlex\n-from subprocess import PIPE, Popen, call, DEVNULL\n-\n-\n-call_without_output = functools.partial(call, stdout=DEVNULL, stderr=DEVNULL)\n-\"\"\"\n-Uses subprocess.call to execute a command, but suppresses the output and\n-the errors.\n-\"\"\"\n+from subprocess import PIPE, Popen\n \n \n @contextmanager\n", "issue": "Remove call_without_output from Shell.py L7\nThis line was used by the requirement classes, it isnt used anymore as they use sarge, so it should be removed.\r\ndifficulty/newcomer\n", "before_files": [{"content": "from contextlib import contextmanager\nimport functools\nimport shlex\nfrom subprocess import PIPE, Popen, call, DEVNULL\n\n\ncall_without_output = functools.partial(call, stdout=DEVNULL, stderr=DEVNULL)\n\"\"\"\nUses subprocess.call to execute a command, but suppresses the output and\nthe errors.\n\"\"\"\n\n\n@contextmanager\ndef run_interactive_shell_command(command, **kwargs):\n \"\"\"\n Runs a single command in shell and provides stdout, stderr and stdin\n streams.\n\n This function creates a context manager that sets up the process (using\n ``subprocess.Popen()``), returns to caller and waits for process to exit on\n leaving.\n\n By default the process is opened in ``universal_newlines`` mode and creates\n pipes for all streams (stdout, stderr and stdin) using ``subprocess.PIPE``\n special value. These pipes are closed automatically, so if you want to get\n the contents of the streams you should retrieve them before the context\n manager exits.\n\n >>> with run_interactive_shell_command([\"echo\", \"TEXT\"]) as p:\n ... stdout = p.stdout\n ... stdout_text = stdout.read()\n >>> stdout_text\n 'TEXT\\\\n'\n >>> stdout.closed\n True\n\n Custom streams provided are not closed except of ``subprocess.PIPE``.\n\n >>> from tempfile import TemporaryFile\n >>> stream = TemporaryFile()\n >>> with run_interactive_shell_command([\"echo\", \"TEXT\"],\n ... stdout=stream) as p:\n ... stderr = p.stderr\n >>> stderr.closed\n True\n >>> stream.closed\n False\n\n :param command: The command to run on shell. This parameter can either\n be a sequence of arguments that are directly passed to\n the process or a string. A string gets splitted beforehand\n using ``shlex.split()``. If providing ``shell=True`` as a\n keyword-argument, no ``shlex.split()`` is performed and the\n command string goes directly to ``subprocess.Popen()``.\n :param kwargs: Additional keyword arguments to pass to\n ``subprocess.Popen`` that are used to spawn the process.\n :return: A context manager yielding the process started from the\n command.\n \"\"\"\n if not kwargs.get('shell', False) and isinstance(command, str):\n command = shlex.split(command)\n\n args = {'stdout': PIPE,\n 'stderr': PIPE,\n 'stdin': PIPE,\n 'universal_newlines': True}\n args.update(kwargs)\n\n process = Popen(command, **args)\n try:\n yield process\n finally:\n if args['stdout'] is PIPE:\n process.stdout.close()\n if args['stderr'] is PIPE:\n process.stderr.close()\n if args['stdin'] is PIPE:\n process.stdin.close()\n\n process.wait()\n\n\ndef run_shell_command(command, stdin=None, **kwargs):\n \"\"\"\n Runs a single command in shell and returns the read stdout and stderr data.\n\n This function waits for the process (created using ``subprocess.Popen()``)\n to exit. Effectively it wraps ``run_interactive_shell_command()`` and uses\n ``communicate()`` on the process.\n\n See also ``run_interactive_shell_command()``.\n\n :param command: The command to run on shell. This parameter can either\n be a sequence of arguments that are directly passed to\n the process or a string. A string gets splitted beforehand\n using ``shlex.split()``.\n :param stdin: Initial input to send to the process.\n :param kwargs: Additional keyword arguments to pass to\n ``subprocess.Popen`` that is used to spawn the process.\n :return: A tuple with ``(stdoutstring, stderrstring)``.\n \"\"\"\n with run_interactive_shell_command(command, **kwargs) as p:\n ret = p.communicate(stdin)\n return ret\n\n\ndef get_shell_type(): # pragma: no cover\n \"\"\"\n Finds the current shell type based on the outputs of common pre-defined\n variables in them. This is useful to identify which sort of escaping\n is required for strings.\n\n :return: The shell type. This can be either \"powershell\" if Windows\n Powershell is detected, \"cmd\" if command prompt is been\n detected or \"sh\" if it's neither of these.\n \"\"\"\n out = run_shell_command('echo $host.name', shell=True)[0]\n if out.strip() == 'ConsoleHost':\n return 'powershell'\n out = run_shell_command('echo $0', shell=True)[0]\n if out.strip() == '$0':\n return 'cmd'\n return 'sh'\n", "path": "coalib/misc/Shell.py"}]}
1,857
136
gh_patches_debug_52259
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-6127
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `view.flows.add` command does not exist but the examples reference it #### Problem Description The `view.flows.add` command does not exist but the example `duplicate-modify-replay.py` shows this command being used. `replay.client` seems to perform both the "add to view" and "replay" function. </issue> <code> [start of examples/addons/duplicate-modify-replay.py] 1 """Take incoming HTTP requests and replay them with modified parameters.""" 2 from mitmproxy import ctx 3 4 5 def request(flow): 6 # Avoid an infinite loop by not replaying already replayed requests 7 if flow.is_replay == "request": 8 return 9 flow = flow.copy() 10 # Only interactive tools have a view. If we have one, add a duplicate entry 11 # for our flow. 12 if "view" in ctx.master.addons: 13 ctx.master.commands.call("view.flows.add", [flow]) 14 flow.request.path = "/changed" 15 ctx.master.commands.call("replay.client", [flow]) 16 [end of examples/addons/duplicate-modify-replay.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/addons/duplicate-modify-replay.py b/examples/addons/duplicate-modify-replay.py --- a/examples/addons/duplicate-modify-replay.py +++ b/examples/addons/duplicate-modify-replay.py @@ -10,6 +10,6 @@ # Only interactive tools have a view. If we have one, add a duplicate entry # for our flow. if "view" in ctx.master.addons: - ctx.master.commands.call("view.flows.add", [flow]) + ctx.master.commands.call("view.flows.duplicate", [flow]) flow.request.path = "/changed" ctx.master.commands.call("replay.client", [flow])
{"golden_diff": "diff --git a/examples/addons/duplicate-modify-replay.py b/examples/addons/duplicate-modify-replay.py\n--- a/examples/addons/duplicate-modify-replay.py\n+++ b/examples/addons/duplicate-modify-replay.py\n@@ -10,6 +10,6 @@\n # Only interactive tools have a view. If we have one, add a duplicate entry\n # for our flow.\n if \"view\" in ctx.master.addons:\n- ctx.master.commands.call(\"view.flows.add\", [flow])\n+ ctx.master.commands.call(\"view.flows.duplicate\", [flow])\n flow.request.path = \"/changed\"\n ctx.master.commands.call(\"replay.client\", [flow])\n", "issue": "`view.flows.add` command does not exist but the examples reference it\n#### Problem Description\r\n\r\nThe `view.flows.add` command does not exist but the example `duplicate-modify-replay.py` shows this command being used.\r\n\r\n`replay.client` seems to perform both the \"add to view\" and \"replay\" function.\n", "before_files": [{"content": "\"\"\"Take incoming HTTP requests and replay them with modified parameters.\"\"\"\nfrom mitmproxy import ctx\n\n\ndef request(flow):\n # Avoid an infinite loop by not replaying already replayed requests\n if flow.is_replay == \"request\":\n return\n flow = flow.copy()\n # Only interactive tools have a view. If we have one, add a duplicate entry\n # for our flow.\n if \"view\" in ctx.master.addons:\n ctx.master.commands.call(\"view.flows.add\", [flow])\n flow.request.path = \"/changed\"\n ctx.master.commands.call(\"replay.client\", [flow])\n", "path": "examples/addons/duplicate-modify-replay.py"}]}
769
153
gh_patches_debug_33382
rasdani/github-patches
git_diff
conan-io__conan-center-index-438
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] sqlite3/all: Discussion of default options Having a look at the options of the sqlite3 recipe you see most of them defaulted to false. However, there are some recipes like qt from bincrafters that already require this package with specific option enabled: https://github.com/bincrafters/conan-qt/blob/594e40f036adcbf22d23dc2da228f389d45a76dc/conanfile.py#L256 Moreover, the sqlite3 is provided with a custom *CMakeLists.txt* with the CMake options, so we could say there are *no defaults* for these options recommended by the library authors/maintainers. Additionally, these options are enabled by default in vcpkg repo: ``` -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_ENABLE_COLUMN_METADATA ``` This issue is open for discussion regarding the options of this package and its default values. cc/ @ericLemanissier @SSE4 @uilianries </issue> <code> [start of recipes/sqlite3/all/conanfile.py] 1 import os 2 from conans import ConanFile, CMake, tools 3 4 5 class ConanSqlite3(ConanFile): 6 name = "sqlite3" 7 description = "Self-contained, serverless, in-process SQL database engine." 8 url = "https://github.com/conan-io/conan-center-index" 9 homepage = "https://www.sqlite.org" 10 topics = ("conan", "sqlite", "database", "sql", "serverless") 11 license = "Public Domain" 12 generators = "cmake" 13 settings = "os", "compiler", "arch", "build_type" 14 exports_sources = ["CMakeLists.txt"] 15 options = {"shared": [True, False], 16 "fPIC": [True, False], 17 "threadsafe": [0, 1, 2], 18 "enable_column_metadata": [True, False], 19 "enable_explain_comments": [True, False], 20 "enable_fts3": [True, False], 21 "enable_fts4": [True, False], 22 "enable_fts5": [True, False], 23 "enable_json1": [True, False], 24 "enable_rtree": [True, False], 25 "omit_load_extension": [True, False] 26 } 27 default_options = {"shared": False, 28 "fPIC": True, 29 "threadsafe": 1, 30 "enable_column_metadata": False, 31 "enable_explain_comments": False, 32 "enable_fts3": False, 33 "enable_fts4": False, 34 "enable_fts5": False, 35 "enable_json1": False, 36 "enable_rtree": False, 37 "omit_load_extension": False 38 } 39 _source_subfolder = "source_subfolder" 40 41 def source(self): 42 tools.get(**self.conan_data["sources"][self.version]) 43 url = self.conan_data["sources"][self.version]["url"] 44 archive_name = os.path.basename(url) 45 archive_name = os.path.splitext(archive_name)[0] 46 os.rename(archive_name, self._source_subfolder) 47 48 def config_options(self): 49 if self.settings.os == "Windows": 50 del self.options.fPIC 51 52 def configure(self): 53 del self.settings.compiler.libcxx 54 del self.settings.compiler.cppstd 55 56 def _configure_cmake(self): 57 cmake = CMake(self) 58 cmake.definitions["THREADSAFE"] = self.options.threadsafe 59 cmake.definitions["ENABLE_COLUMN_METADATA"] = self.options.enable_column_metadata 60 cmake.definitions["ENABLE_EXPLAIN_COMMENTS"] = self.options.enable_explain_comments 61 cmake.definitions["ENABLE_FTS3"] = self.options.enable_fts3 62 cmake.definitions["ENABLE_FTS4"] = self.options.enable_fts4 63 cmake.definitions["ENABLE_FTS5"] = self.options.enable_fts5 64 cmake.definitions["ENABLE_JSON1"] = self.options.enable_json1 65 cmake.definitions["ENABLE_RTREE"] = self.options.enable_rtree 66 cmake.definitions["OMIT_LOAD_EXTENSION"] = self.options.omit_load_extension 67 cmake.definitions["HAVE_FDATASYNC"] = True 68 cmake.definitions["HAVE_GMTIME_R"] = True 69 cmake.definitions["HAVE_LOCALTIME_R"] = True 70 cmake.definitions["HAVE_POSIX_FALLOCATE"] = True 71 cmake.definitions["HAVE_STRERROR_R"] = True 72 cmake.definitions["HAVE_USLEEP"] = True 73 if self.settings.os == "Windows": 74 cmake.definitions["HAVE_LOCALTIME_R"] = False 75 cmake.definitions["HAVE_POSIX_FALLOCATE"] = False 76 if tools.is_apple_os(self.settings.os): 77 cmake.definitions["HAVE_POSIX_FALLOCATE"] = False 78 if self.settings.os == "Android": 79 cmake.definitions["HAVE_POSIX_FALLOCATE"] = False 80 cmake.configure() 81 return cmake 82 83 def build(self): 84 cmake = self._configure_cmake() 85 cmake.build() 86 87 def package(self): 88 header = tools.load(os.path.join(self._source_subfolder, "sqlite3.h")) 89 license_content = header[3:header.find("***", 1)] 90 tools.save("LICENSE", license_content) 91 92 self.copy("LICENSE", dst="licenses") 93 94 cmake = self._configure_cmake() 95 cmake.install() 96 97 def package_info(self): 98 self.cpp_info.libs = tools.collect_libs(self) 99 if self.settings.os == "Linux": 100 if self.options.threadsafe: 101 self.cpp_info.libs.append("pthread") 102 if self.options.omit_load_extension == "False": 103 self.cpp_info.libs.append("dl") 104 [end of recipes/sqlite3/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/sqlite3/all/conanfile.py b/recipes/sqlite3/all/conanfile.py --- a/recipes/sqlite3/all/conanfile.py +++ b/recipes/sqlite3/all/conanfile.py @@ -22,19 +22,21 @@ "enable_fts5": [True, False], "enable_json1": [True, False], "enable_rtree": [True, False], - "omit_load_extension": [True, False] + "omit_load_extension": [True, False], + "enable_unlock_notify": [True, False] } default_options = {"shared": False, "fPIC": True, "threadsafe": 1, - "enable_column_metadata": False, + "enable_column_metadata": True, "enable_explain_comments": False, "enable_fts3": False, "enable_fts4": False, "enable_fts5": False, "enable_json1": False, - "enable_rtree": False, - "omit_load_extension": False + "enable_rtree": True, + "omit_load_extension": False, + "enable_unlock_notify": True } _source_subfolder = "source_subfolder" @@ -64,6 +66,7 @@ cmake.definitions["ENABLE_JSON1"] = self.options.enable_json1 cmake.definitions["ENABLE_RTREE"] = self.options.enable_rtree cmake.definitions["OMIT_LOAD_EXTENSION"] = self.options.omit_load_extension + cmake.definitions["SQLITE_ENABLE_UNLOCK_NOTIFY"] = self.options.enable_unlock_notify cmake.definitions["HAVE_FDATASYNC"] = True cmake.definitions["HAVE_GMTIME_R"] = True cmake.definitions["HAVE_LOCALTIME_R"] = True
{"golden_diff": "diff --git a/recipes/sqlite3/all/conanfile.py b/recipes/sqlite3/all/conanfile.py\n--- a/recipes/sqlite3/all/conanfile.py\n+++ b/recipes/sqlite3/all/conanfile.py\n@@ -22,19 +22,21 @@\n \"enable_fts5\": [True, False],\n \"enable_json1\": [True, False],\n \"enable_rtree\": [True, False],\n- \"omit_load_extension\": [True, False]\n+ \"omit_load_extension\": [True, False],\n+ \"enable_unlock_notify\": [True, False]\n }\n default_options = {\"shared\": False,\n \"fPIC\": True,\n \"threadsafe\": 1,\n- \"enable_column_metadata\": False,\n+ \"enable_column_metadata\": True,\n \"enable_explain_comments\": False,\n \"enable_fts3\": False,\n \"enable_fts4\": False,\n \"enable_fts5\": False,\n \"enable_json1\": False,\n- \"enable_rtree\": False,\n- \"omit_load_extension\": False\n+ \"enable_rtree\": True,\n+ \"omit_load_extension\": False,\n+ \"enable_unlock_notify\": True\n }\n _source_subfolder = \"source_subfolder\"\n \n@@ -64,6 +66,7 @@\n cmake.definitions[\"ENABLE_JSON1\"] = self.options.enable_json1\n cmake.definitions[\"ENABLE_RTREE\"] = self.options.enable_rtree\n cmake.definitions[\"OMIT_LOAD_EXTENSION\"] = self.options.omit_load_extension\n+ cmake.definitions[\"SQLITE_ENABLE_UNLOCK_NOTIFY\"] = self.options.enable_unlock_notify \n cmake.definitions[\"HAVE_FDATASYNC\"] = True\n cmake.definitions[\"HAVE_GMTIME_R\"] = True\n cmake.definitions[\"HAVE_LOCALTIME_R\"] = True\n", "issue": "[package] sqlite3/all: Discussion of default options\nHaving a look at the options of the sqlite3 recipe you see most of them defaulted to false. However, there are some recipes like qt from bincrafters that already require this package with specific option enabled:\r\n\r\nhttps://github.com/bincrafters/conan-qt/blob/594e40f036adcbf22d23dc2da228f389d45a76dc/conanfile.py#L256\r\n\r\nMoreover, the sqlite3 is provided with a custom *CMakeLists.txt* with the CMake options, so we could say there are *no defaults* for these options recommended by the library authors/maintainers.\r\n\r\nAdditionally, these options are enabled by default in vcpkg repo:\r\n\r\n```\r\n-DSQLITE_ENABLE_RTREE\r\n-DSQLITE_ENABLE_UNLOCK_NOTIFY\r\n-DSQLITE_ENABLE_COLUMN_METADATA\r\n```\r\n\r\nThis issue is open for discussion regarding the options of this package and its default values.\r\n\r\ncc/ @ericLemanissier @SSE4 @uilianries \n", "before_files": [{"content": "import os\nfrom conans import ConanFile, CMake, tools\n\n\nclass ConanSqlite3(ConanFile):\n name = \"sqlite3\"\n description = \"Self-contained, serverless, in-process SQL database engine.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.sqlite.org\"\n topics = (\"conan\", \"sqlite\", \"database\", \"sql\", \"serverless\")\n license = \"Public Domain\"\n generators = \"cmake\"\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n exports_sources = [\"CMakeLists.txt\"]\n options = {\"shared\": [True, False],\n \"fPIC\": [True, False],\n \"threadsafe\": [0, 1, 2],\n \"enable_column_metadata\": [True, False],\n \"enable_explain_comments\": [True, False],\n \"enable_fts3\": [True, False],\n \"enable_fts4\": [True, False],\n \"enable_fts5\": [True, False],\n \"enable_json1\": [True, False],\n \"enable_rtree\": [True, False],\n \"omit_load_extension\": [True, False]\n }\n default_options = {\"shared\": False,\n \"fPIC\": True,\n \"threadsafe\": 1,\n \"enable_column_metadata\": False,\n \"enable_explain_comments\": False,\n \"enable_fts3\": False,\n \"enable_fts4\": False,\n \"enable_fts5\": False,\n \"enable_json1\": False,\n \"enable_rtree\": False,\n \"omit_load_extension\": False\n }\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n url = self.conan_data[\"sources\"][self.version][\"url\"]\n archive_name = os.path.basename(url)\n archive_name = os.path.splitext(archive_name)[0]\n os.rename(archive_name, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"THREADSAFE\"] = self.options.threadsafe\n cmake.definitions[\"ENABLE_COLUMN_METADATA\"] = self.options.enable_column_metadata\n cmake.definitions[\"ENABLE_EXPLAIN_COMMENTS\"] = self.options.enable_explain_comments\n cmake.definitions[\"ENABLE_FTS3\"] = self.options.enable_fts3\n cmake.definitions[\"ENABLE_FTS4\"] = self.options.enable_fts4\n cmake.definitions[\"ENABLE_FTS5\"] = self.options.enable_fts5\n cmake.definitions[\"ENABLE_JSON1\"] = self.options.enable_json1\n cmake.definitions[\"ENABLE_RTREE\"] = self.options.enable_rtree\n cmake.definitions[\"OMIT_LOAD_EXTENSION\"] = self.options.omit_load_extension\n cmake.definitions[\"HAVE_FDATASYNC\"] = True\n cmake.definitions[\"HAVE_GMTIME_R\"] = True\n cmake.definitions[\"HAVE_LOCALTIME_R\"] = True\n cmake.definitions[\"HAVE_POSIX_FALLOCATE\"] = True\n cmake.definitions[\"HAVE_STRERROR_R\"] = True\n cmake.definitions[\"HAVE_USLEEP\"] = True\n if self.settings.os == \"Windows\":\n cmake.definitions[\"HAVE_LOCALTIME_R\"] = False\n cmake.definitions[\"HAVE_POSIX_FALLOCATE\"] = False\n if tools.is_apple_os(self.settings.os):\n cmake.definitions[\"HAVE_POSIX_FALLOCATE\"] = False\n if self.settings.os == \"Android\":\n cmake.definitions[\"HAVE_POSIX_FALLOCATE\"] = False\n cmake.configure()\n return cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n header = tools.load(os.path.join(self._source_subfolder, \"sqlite3.h\"))\n license_content = header[3:header.find(\"***\", 1)]\n tools.save(\"LICENSE\", license_content)\n\n self.copy(\"LICENSE\", dst=\"licenses\")\n\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n if self.options.threadsafe:\n self.cpp_info.libs.append(\"pthread\")\n if self.options.omit_load_extension == \"False\":\n self.cpp_info.libs.append(\"dl\")\n", "path": "recipes/sqlite3/all/conanfile.py"}]}
1,970
406
gh_patches_debug_26402
rasdani/github-patches
git_diff
statsmodels__statsmodels-6654
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Handle pathlib paths with `get_file_obj` #### Is your feature request related to a problem? Please describe I'm trying to read a statsmodels.tsa.statespace.sarimax.SARIMAXResults pickle file I've saved. I'd like to pass a pathlib.Path object as I find it easier to handle (I work with different machines, Mac and Windows). #### Describe the solution you'd like The function `open` already supports pathlib objects. Why not using it? You could replace `statsmodels.iolib.openfile.get_file_obj` with: ```python if _is_string_like(fname): return _open(fname, mode, encoding) try: # try opening it with 'open' return open(fname, mode, encoding) except TypeError: # else, maybe return context manager (please check if it's always TypeError) try: # Make sure the object has the write methods if 'r' in mode: fname.read if 'w' in mode or 'a' in mode: fname.write except AttributeError: raise ValueError('fname must be a string or a file-like object') return EmptyContextManager(fname) ``` This is just a suggestion though :) </issue> <code> [start of statsmodels/iolib/openfile.py] 1 """ 2 Handle file opening for read/write 3 """ 4 from numpy.lib._iotools import _is_string_like 5 6 7 class EmptyContextManager(object): 8 """ 9 This class is needed to allow file-like object to be used as 10 context manager, but without getting closed. 11 """ 12 def __init__(self, obj): 13 self._obj = obj 14 15 def __enter__(self): 16 '''When entering, return the embedded object''' 17 return self._obj 18 19 def __exit__(self, *args): 20 '''Do not hide anything''' 21 return False 22 23 def __getattr__(self, name): 24 return getattr(self._obj, name) 25 26 27 def _open(fname, mode, encoding): 28 if fname.endswith('.gz'): 29 import gzip 30 return gzip.open(fname, mode, encoding=encoding) 31 else: 32 return open(fname, mode, encoding=encoding) 33 34 35 def get_file_obj(fname, mode='r', encoding=None): 36 """ 37 Light wrapper to handle strings and let files (anything else) pass through. 38 39 It also handle '.gz' files. 40 41 Parameters 42 ---------- 43 fname : str or file-like object 44 File to open / forward 45 mode : str 46 Argument passed to the 'open' or 'gzip.open' function 47 encoding : str 48 For Python 3 only, specify the encoding of the file 49 50 Returns 51 ------- 52 A file-like object that is always a context-manager. If the `fname` was 53 already a file-like object, the returned context manager *will not 54 close the file*. 55 """ 56 if _is_string_like(fname): 57 return _open(fname, mode, encoding) 58 try: 59 # Make sure the object has the write methods 60 if 'r' in mode: 61 fname.read 62 if 'w' in mode or 'a' in mode: 63 fname.write 64 except AttributeError: 65 raise ValueError('fname must be a string or a file-like object') 66 return EmptyContextManager(fname) 67 [end of statsmodels/iolib/openfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/statsmodels/iolib/openfile.py b/statsmodels/iolib/openfile.py --- a/statsmodels/iolib/openfile.py +++ b/statsmodels/iolib/openfile.py @@ -34,13 +34,14 @@ def get_file_obj(fname, mode='r', encoding=None): """ - Light wrapper to handle strings and let files (anything else) pass through. + Light wrapper to handle strings, path objects and let files (anything else) + pass through. It also handle '.gz' files. Parameters ---------- - fname : str or file-like object + fname : str, path object or file-like object File to open / forward mode : str Argument passed to the 'open' or 'gzip.open' function @@ -56,11 +57,14 @@ if _is_string_like(fname): return _open(fname, mode, encoding) try: - # Make sure the object has the write methods - if 'r' in mode: - fname.read - if 'w' in mode or 'a' in mode: - fname.write - except AttributeError: - raise ValueError('fname must be a string or a file-like object') - return EmptyContextManager(fname) + return open(fname, mode, encoding=encoding) # handle pathlib-like objs + except TypeError: + try: + # Make sure the object has the write methods + if "r" in mode: + fname.read + if "w" in mode or "a" in mode: + fname.write + except AttributeError: + raise ValueError("fname must be a string or a file-like object") + return EmptyContextManager(fname)
{"golden_diff": "diff --git a/statsmodels/iolib/openfile.py b/statsmodels/iolib/openfile.py\n--- a/statsmodels/iolib/openfile.py\n+++ b/statsmodels/iolib/openfile.py\n@@ -34,13 +34,14 @@\n \n def get_file_obj(fname, mode='r', encoding=None):\n \"\"\"\n- Light wrapper to handle strings and let files (anything else) pass through.\n+ Light wrapper to handle strings, path objects and let files (anything else)\n+ pass through.\n \n It also handle '.gz' files.\n \n Parameters\n ----------\n- fname : str or file-like object\n+ fname : str, path object or file-like object\n File to open / forward\n mode : str\n Argument passed to the 'open' or 'gzip.open' function\n@@ -56,11 +57,14 @@\n if _is_string_like(fname):\n return _open(fname, mode, encoding)\n try:\n- # Make sure the object has the write methods\n- if 'r' in mode:\n- fname.read\n- if 'w' in mode or 'a' in mode:\n- fname.write\n- except AttributeError:\n- raise ValueError('fname must be a string or a file-like object')\n- return EmptyContextManager(fname)\n+ return open(fname, mode, encoding=encoding) # handle pathlib-like objs\n+ except TypeError:\n+ try:\n+ # Make sure the object has the write methods\n+ if \"r\" in mode:\n+ fname.read\n+ if \"w\" in mode or \"a\" in mode:\n+ fname.write\n+ except AttributeError:\n+ raise ValueError(\"fname must be a string or a file-like object\")\n+ return EmptyContextManager(fname)\n", "issue": "Handle pathlib paths with `get_file_obj`\n#### Is your feature request related to a problem? Please describe\r\nI'm trying to read a statsmodels.tsa.statespace.sarimax.SARIMAXResults pickle file I've saved. I'd like to pass a pathlib.Path object as I find it easier to handle (I work with different machines, Mac and Windows).\r\n\r\n#### Describe the solution you'd like\r\nThe function `open` already supports pathlib objects. Why not using it?\r\n\r\nYou could replace `statsmodels.iolib.openfile.get_file_obj` with:\r\n\r\n```python\r\nif _is_string_like(fname):\r\n return _open(fname, mode, encoding)\r\ntry: # try opening it with 'open'\r\n return open(fname, mode, encoding)\r\nexcept TypeError: # else, maybe return context manager (please check if it's always TypeError)\r\n try:\r\n # Make sure the object has the write methods\r\n if 'r' in mode:\r\n fname.read\r\n if 'w' in mode or 'a' in mode:\r\n fname.write\r\n except AttributeError:\r\n raise ValueError('fname must be a string or a file-like object')\r\n return EmptyContextManager(fname)\r\n```\r\n\r\nThis is just a suggestion though :)\n", "before_files": [{"content": "\"\"\"\nHandle file opening for read/write\n\"\"\"\nfrom numpy.lib._iotools import _is_string_like\n\n\nclass EmptyContextManager(object):\n \"\"\"\n This class is needed to allow file-like object to be used as\n context manager, but without getting closed.\n \"\"\"\n def __init__(self, obj):\n self._obj = obj\n\n def __enter__(self):\n '''When entering, return the embedded object'''\n return self._obj\n\n def __exit__(self, *args):\n '''Do not hide anything'''\n return False\n\n def __getattr__(self, name):\n return getattr(self._obj, name)\n\n\ndef _open(fname, mode, encoding):\n if fname.endswith('.gz'):\n import gzip\n return gzip.open(fname, mode, encoding=encoding)\n else:\n return open(fname, mode, encoding=encoding)\n\n\ndef get_file_obj(fname, mode='r', encoding=None):\n \"\"\"\n Light wrapper to handle strings and let files (anything else) pass through.\n\n It also handle '.gz' files.\n\n Parameters\n ----------\n fname : str or file-like object\n File to open / forward\n mode : str\n Argument passed to the 'open' or 'gzip.open' function\n encoding : str\n For Python 3 only, specify the encoding of the file\n\n Returns\n -------\n A file-like object that is always a context-manager. If the `fname` was\n already a file-like object, the returned context manager *will not\n close the file*.\n \"\"\"\n if _is_string_like(fname):\n return _open(fname, mode, encoding)\n try:\n # Make sure the object has the write methods\n if 'r' in mode:\n fname.read\n if 'w' in mode or 'a' in mode:\n fname.write\n except AttributeError:\n raise ValueError('fname must be a string or a file-like object')\n return EmptyContextManager(fname)\n", "path": "statsmodels/iolib/openfile.py"}]}
1,346
389
gh_patches_debug_29404
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2151
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ID sequence isn't being created for tables ## Description <!-- A clear and concise description of what the bug is. --> I cannot add a new record to a new table because the ID does not have a default value. I set up this table by: 1. Importing [library_books.tsv.zip](https://github.com/centerofci/mathesar/files/10273401/library_books.tsv.zip) as "Items" 2. Pulling "Author First Name" and "Author First Name" into an "Authors" table 3. Moving "Author Website" to "Authors" table 4. Pulling "Publishers" into a "Publishers" table 5. Pulling "Title", "Publication Year", "ISBN", "Authors_id", and "Publishers_id" into a "Books" table ## Expected behavior <!-- A clear and concise description of what you expected to happen. --> I should be able to add a new record! ## To Reproduce <!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. --> ~Try adding a new record on https://library.mathesar.org/mathesar_tables/4/tables/34/~ I needed to nuke this table since it was blocking my work. To reproduce, follow the steps under Description. ## Additional context <!-- Add any other context about the problem or screenshots here. --> This seems related to - #2116 - #1356 </issue> <code> [start of db/tables/operations/split.py] 1 from sqlalchemy import exists, func, literal, select 2 3 from db import constants 4 from db.columns.base import MathesarColumn 5 from db.columns.operations.alter import batch_alter_table_drop_columns 6 from db.columns.operations.select import get_column_names_from_attnums 7 from db.links.operations.create import create_foreign_key_link 8 from db.tables.operations.create import create_mathesar_table 9 from db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid 10 from db.metadata import get_empty_metadata 11 12 13 def _create_split_tables(extracted_table_name, extracted_columns, remainder_table_name, schema, engine, fk_column_name=None): 14 extracted_table = create_mathesar_table( 15 extracted_table_name, 16 schema, 17 extracted_columns, 18 engine, 19 ) 20 fk_column_name = fk_column_name if fk_column_name else f"{extracted_table.name}_{constants.ID}" 21 remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine) 22 extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine) 23 create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid) 24 # TODO reuse metadata 25 remainder_table_with_fk_key = reflect_table(remainder_table_name, schema, engine, metadata=get_empty_metadata()) 26 return extracted_table, remainder_table_with_fk_key, fk_column_name 27 28 29 def _create_split_insert_stmt(old_table, extracted_table, extracted_columns, remainder_fk_name): 30 SPLIT_ID = f"{constants.MATHESAR_PREFIX}_split_column_alias" 31 extracted_column_names = [col.name for col in extracted_columns] 32 split_cte = select( 33 [ 34 old_table, 35 func.dense_rank().over(order_by=extracted_columns).label(SPLIT_ID) 36 ] 37 ).cte() 38 cte_extraction_columns = ( 39 [split_cte.columns[SPLIT_ID]] 40 + [split_cte.columns[n] for n in extracted_column_names] 41 ) 42 extract_sel = select( 43 cte_extraction_columns, 44 distinct=True 45 ) 46 extract_ins_cte = ( 47 extracted_table 48 .insert() 49 .from_select([constants.ID] + extracted_column_names, extract_sel) 50 .returning(literal(1)) 51 .cte() 52 ) 53 fk_update_dict = {remainder_fk_name: split_cte.c[SPLIT_ID]} 54 split_ins = ( 55 old_table 56 .update().values(**fk_update_dict). 57 where(old_table.c[constants.ID] == split_cte.c[constants.ID], 58 exists(extract_ins_cte.select())) 59 ) 60 return split_ins 61 62 63 def extract_columns_from_table(old_table_oid, extracted_column_attnums, extracted_table_name, schema, engine, relationship_fk_column_name=None): 64 # TODO reuse metadata 65 old_table = reflect_table_from_oid(old_table_oid, engine, metadata=get_empty_metadata()) 66 old_table_name = old_table.name 67 old_columns = (MathesarColumn.from_column(col) for col in old_table.columns) 68 old_non_default_columns = [ 69 col for col in old_columns if not col.is_default 70 ] 71 # TODO reuse metadata 72 extracted_column_names = get_column_names_from_attnums(old_table_oid, extracted_column_attnums, engine, metadata=get_empty_metadata()) 73 extracted_columns = [ 74 col for col in old_non_default_columns if col.name in extracted_column_names 75 ] 76 with engine.begin() as conn: 77 extracted_table, remainder_table_with_fk_column, fk_column_name = _create_split_tables( 78 extracted_table_name, 79 extracted_columns, 80 old_table_name, 81 schema, 82 engine, 83 relationship_fk_column_name 84 ) 85 split_ins = _create_split_insert_stmt( 86 remainder_table_with_fk_column, 87 extracted_table, 88 extracted_columns, 89 fk_column_name, 90 ) 91 conn.execute(split_ins) 92 remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine) 93 deletion_column_data = [ 94 {'attnum': column_attnum, 'delete': True} 95 for column_attnum in extracted_column_attnums 96 ] 97 batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine) 98 return extracted_table, remainder_table_with_fk_column, fk_column_name 99 [end of db/tables/operations/split.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/tables/operations/split.py b/db/tables/operations/split.py --- a/db/tables/operations/split.py +++ b/db/tables/operations/split.py @@ -89,6 +89,8 @@ fk_column_name, ) conn.execute(split_ins) + update_pk_sequence_to_latest(conn, engine, extracted_table) + remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine) deletion_column_data = [ {'attnum': column_attnum, 'delete': True} @@ -96,3 +98,28 @@ ] batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine) return extracted_table, remainder_table_with_fk_column, fk_column_name + + +def update_pk_sequence_to_latest(conn, engine, extracted_table): + _preparer = engine.dialect.identifier_preparer + quoted_table_name = _preparer.quote(extracted_table.schema) + "." + _preparer.quote(extracted_table.name) + update_pk_sequence_stmt = func.setval( + # `pg_get_serial_sequence needs a string of the Table name + func.pg_get_serial_sequence( + quoted_table_name, + extracted_table.c[constants.ID].name + ), + # If the table can be empty, start from 1 instead of using Null + func.coalesce( + func.max(extracted_table.c[constants.ID]) + 1, + 1 + ), + # Set the sequence to use the last value of the sequence + # Setting is_called field to false, meaning that the next nextval will not advance the sequence before returning a value. + # We need to do it as our default coalesce value is 1 instead of 0 + # Refer the postgres docs https://www.postgresql.org/docs/current/functions-sequence.html + False + ) + conn.execute( + select(update_pk_sequence_stmt) + )
{"golden_diff": "diff --git a/db/tables/operations/split.py b/db/tables/operations/split.py\n--- a/db/tables/operations/split.py\n+++ b/db/tables/operations/split.py\n@@ -89,6 +89,8 @@\n fk_column_name,\n )\n conn.execute(split_ins)\n+ update_pk_sequence_to_latest(conn, engine, extracted_table)\n+\n remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine)\n deletion_column_data = [\n {'attnum': column_attnum, 'delete': True}\n@@ -96,3 +98,28 @@\n ]\n batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)\n return extracted_table, remainder_table_with_fk_column, fk_column_name\n+\n+\n+def update_pk_sequence_to_latest(conn, engine, extracted_table):\n+ _preparer = engine.dialect.identifier_preparer\n+ quoted_table_name = _preparer.quote(extracted_table.schema) + \".\" + _preparer.quote(extracted_table.name)\n+ update_pk_sequence_stmt = func.setval(\n+ # `pg_get_serial_sequence needs a string of the Table name\n+ func.pg_get_serial_sequence(\n+ quoted_table_name,\n+ extracted_table.c[constants.ID].name\n+ ),\n+ # If the table can be empty, start from 1 instead of using Null\n+ func.coalesce(\n+ func.max(extracted_table.c[constants.ID]) + 1,\n+ 1\n+ ),\n+ # Set the sequence to use the last value of the sequence\n+ # Setting is_called field to false, meaning that the next nextval will not advance the sequence before returning a value.\n+ # We need to do it as our default coalesce value is 1 instead of 0\n+ # Refer the postgres docs https://www.postgresql.org/docs/current/functions-sequence.html\n+ False\n+ )\n+ conn.execute(\n+ select(update_pk_sequence_stmt)\n+ )\n", "issue": "ID sequence isn't being created for tables\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nI cannot add a new record to a new table because the ID does not have a default value. I set up this table by:\r\n1. Importing [library_books.tsv.zip](https://github.com/centerofci/mathesar/files/10273401/library_books.tsv.zip) as \"Items\"\r\n2. Pulling \"Author First Name\" and \"Author First Name\" into an \"Authors\" table\r\n3. Moving \"Author Website\" to \"Authors\" table\r\n4. Pulling \"Publishers\" into a \"Publishers\" table\r\n5. Pulling \"Title\", \"Publication Year\", \"ISBN\", \"Authors_id\", and \"Publishers_id\" into a \"Books\" table\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nI should be able to add a new record!\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n~Try adding a new record on https://library.mathesar.org/mathesar_tables/4/tables/34/~\r\n\r\nI needed to nuke this table since it was blocking my work. To reproduce, follow the steps under Description.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\nThis seems related to\r\n- #2116 \r\n- #1356 \r\n\n", "before_files": [{"content": "from sqlalchemy import exists, func, literal, select\n\nfrom db import constants\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.alter import batch_alter_table_drop_columns\nfrom db.columns.operations.select import get_column_names_from_attnums\nfrom db.links.operations.create import create_foreign_key_link\nfrom db.tables.operations.create import create_mathesar_table\nfrom db.tables.operations.select import get_oid_from_table, reflect_table, reflect_table_from_oid\nfrom db.metadata import get_empty_metadata\n\n\ndef _create_split_tables(extracted_table_name, extracted_columns, remainder_table_name, schema, engine, fk_column_name=None):\n extracted_table = create_mathesar_table(\n extracted_table_name,\n schema,\n extracted_columns,\n engine,\n )\n fk_column_name = fk_column_name if fk_column_name else f\"{extracted_table.name}_{constants.ID}\"\n remainder_table_oid = get_oid_from_table(remainder_table_name, schema, engine)\n extracted_table_oid = get_oid_from_table(extracted_table_name, schema, engine)\n create_foreign_key_link(engine, schema, fk_column_name, remainder_table_oid, extracted_table_oid)\n # TODO reuse metadata\n remainder_table_with_fk_key = reflect_table(remainder_table_name, schema, engine, metadata=get_empty_metadata())\n return extracted_table, remainder_table_with_fk_key, fk_column_name\n\n\ndef _create_split_insert_stmt(old_table, extracted_table, extracted_columns, remainder_fk_name):\n SPLIT_ID = f\"{constants.MATHESAR_PREFIX}_split_column_alias\"\n extracted_column_names = [col.name for col in extracted_columns]\n split_cte = select(\n [\n old_table,\n func.dense_rank().over(order_by=extracted_columns).label(SPLIT_ID)\n ]\n ).cte()\n cte_extraction_columns = (\n [split_cte.columns[SPLIT_ID]]\n + [split_cte.columns[n] for n in extracted_column_names]\n )\n extract_sel = select(\n cte_extraction_columns,\n distinct=True\n )\n extract_ins_cte = (\n extracted_table\n .insert()\n .from_select([constants.ID] + extracted_column_names, extract_sel)\n .returning(literal(1))\n .cte()\n )\n fk_update_dict = {remainder_fk_name: split_cte.c[SPLIT_ID]}\n split_ins = (\n old_table\n .update().values(**fk_update_dict).\n where(old_table.c[constants.ID] == split_cte.c[constants.ID],\n exists(extract_ins_cte.select()))\n )\n return split_ins\n\n\ndef extract_columns_from_table(old_table_oid, extracted_column_attnums, extracted_table_name, schema, engine, relationship_fk_column_name=None):\n # TODO reuse metadata\n old_table = reflect_table_from_oid(old_table_oid, engine, metadata=get_empty_metadata())\n old_table_name = old_table.name\n old_columns = (MathesarColumn.from_column(col) for col in old_table.columns)\n old_non_default_columns = [\n col for col in old_columns if not col.is_default\n ]\n # TODO reuse metadata\n extracted_column_names = get_column_names_from_attnums(old_table_oid, extracted_column_attnums, engine, metadata=get_empty_metadata())\n extracted_columns = [\n col for col in old_non_default_columns if col.name in extracted_column_names\n ]\n with engine.begin() as conn:\n extracted_table, remainder_table_with_fk_column, fk_column_name = _create_split_tables(\n extracted_table_name,\n extracted_columns,\n old_table_name,\n schema,\n engine,\n relationship_fk_column_name\n )\n split_ins = _create_split_insert_stmt(\n remainder_table_with_fk_column,\n extracted_table,\n extracted_columns,\n fk_column_name,\n )\n conn.execute(split_ins)\n remainder_table_oid = get_oid_from_table(remainder_table_with_fk_column.name, schema, engine)\n deletion_column_data = [\n {'attnum': column_attnum, 'delete': True}\n for column_attnum in extracted_column_attnums\n ]\n batch_alter_table_drop_columns(remainder_table_oid, deletion_column_data, conn, engine)\n return extracted_table, remainder_table_with_fk_column, fk_column_name\n", "path": "db/tables/operations/split.py"}]}
1,954
444
gh_patches_debug_12548
rasdani/github-patches
git_diff
mabel-dev__opteryx-1691
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 🪲 VIEWs load error should be in debug mode only ### Thank you for taking the time to report a problem with Opteryx. _To help us to respond to your request we ask that you try to provide the below detail about the bug._ **Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._ **Expected behaviour** _A clear and concise description of what you expected to happen._ **Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._ ~~~sql ~~~ **Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._ </issue> <code> [start of opteryx/planner/views/__init__.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import orjson 14 15 from opteryx.planner.logical_planner import LogicalPlan 16 17 18 def _load_views(): 19 try: 20 with open("views.json", "rb") as defs: 21 return orjson.loads(defs.read()) 22 except Exception as err: 23 # DEBUG:: log (f"[OPTERYX] Unable to open views definition file. {err}") 24 return {} 25 26 27 VIEWS = _load_views() 28 29 30 def is_view(view_name: str) -> bool: 31 return view_name in VIEWS 32 33 34 def view_as_plan(view_name: str) -> LogicalPlan: 35 from opteryx.planner.logical_planner import do_logical_planning_phase 36 from opteryx.third_party import sqloxide 37 from opteryx.utils.sql import clean_statement 38 from opteryx.utils.sql import remove_comments 39 40 operation = VIEWS.get(view_name)["statement"] 41 42 clean_sql = clean_statement(remove_comments(operation)) 43 parsed_statements = sqloxide.parse_sql(clean_sql, dialect="mysql") 44 logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements)) 45 46 return logical_plan 47 [end of opteryx/planner/views/__init__.py] [start of opteryx/__version__.py] 1 __build__ = 523 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Store the version here so: 17 1) we don't load dependencies by storing it in __init__.py 18 2) we can import it in setup.py for the same reason 19 """ 20 from enum import Enum # isort: skip 21 22 23 class VersionStatus(Enum): 24 ALPHA = "alpha" 25 BETA = "beta" 26 RELEASE = "release" 27 28 29 _major = 0 30 _minor = 16 31 _revision = 0 32 _status = VersionStatus.ALPHA 33 34 __author__ = "@joocer" 35 __version__ = f"{_major}.{_minor}.{_revision}" + ( 36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else "" 37 ) 38 [end of opteryx/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opteryx/__version__.py b/opteryx/__version__.py --- a/opteryx/__version__.py +++ b/opteryx/__version__.py @@ -1,4 +1,4 @@ -__build__ = 523 +__build__ = 526 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py --- a/opteryx/planner/views/__init__.py +++ b/opteryx/planner/views/__init__.py @@ -20,7 +20,7 @@ with open("views.json", "rb") as defs: return orjson.loads(defs.read()) except Exception as err: - print(f"[OPTERYX] Unable to open views definition file. {err}") + # DEBUG:: log (f"[OPTERYX] Unable to open views definition file. {err}") return {}
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 523\n+__build__ = 526\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py\n--- a/opteryx/planner/views/__init__.py\n+++ b/opteryx/planner/views/__init__.py\n@@ -20,7 +20,7 @@\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n- print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n+ # DEBUG:: log (f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n", "issue": "\ud83e\udeb2 VIEWs load error should be in debug mode only\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.planner.logical_planner import LogicalPlan\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n # DEBUG:: log (f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 523\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
1,533
239
gh_patches_debug_20772
rasdani/github-patches
git_diff
nvaccess__nvda-10144
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error in alpha with built-in audacity appmod ### Steps to reproduce: 1. open audacity. 2. select audio. ctrl+a works. 3. press alt+c, and press c until "compresser" is found. 4. press enter. 5. press tab. ### Actual behavior: ``` ERROR - eventHandler.executeEvent (23:07:33.553): error executing event: gainFocus on <NVDAObjects.IAccessible.IAccessible object at 0x03318FF0> with extra args of {} Traceback (most recent call last): File "eventHandler.pyc", line 150, in executeEvent File "eventHandler.pyc", line 163, in doPreGainFocus File "api.pyc", line 109, in setFocusObject File "baseObject.pyc", line 46, in __get__ File "baseObject.pyc", line 149, in _getPropertyViaCache File "NVDAObjects\IAccessible\__init__.pyc", line 1695, in _get_container File "IAccessibleHandler.pyc", line 950, in findGroupboxObject File "NVDAObjects\IAccessible\__init__.pyc", line 48, in getNVDAObjectFromEvent File "NVDAObjects\__init__.pyc", line 137, in __call__ File "appModules\audacity.pyc", line 15, in event_NVDAObject_init AttributeError: 'NoneType' object has no attribute 'replace' ``` ### Expected behavior: Items in the UI read. ### System configuration #### NVDA installed/portable/running from source: installed #### NVDA version: Version: alpha-18399,26485a09 #### Windows version: ^10 #### Name and version of other software in use when reproducing the issue: Audacity: 2.3.1 </issue> <code> [start of source/appModules/audacity.py] 1 # -*- coding: UTF-8 -*- 2 #appModules/audacity.py 3 #A part of NonVisual Desktop Access (NVDA) 4 #Copyright (C) 2006-2018 NV Access Limited, Robert Hänggi 5 #This file is covered by the GNU General Public License. 6 #See the file COPYING for more details. 7 8 import appModuleHandler 9 import controlTypes 10 11 class AppModule(appModuleHandler.AppModule): 12 13 def event_NVDAObject_init(self,obj): 14 if obj.windowClassName=="Button" and not obj.role in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]: 15 obj.name=obj.name.replace('&','') 16 [end of source/appModules/audacity.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/appModules/audacity.py b/source/appModules/audacity.py --- a/source/appModules/audacity.py +++ b/source/appModules/audacity.py @@ -1,9 +1,8 @@ # -*- coding: UTF-8 -*- -#appModules/audacity.py -#A part of NonVisual Desktop Access (NVDA) -#Copyright (C) 2006-2018 NV Access Limited, Robert Hänggi -#This file is covered by the GNU General Public License. -#See the file COPYING for more details. +# A part of NonVisual Desktop Access (NVDA) +# Copyright (C) 2006-2019 NV Access Limited, Robert Hänggi, Łukasz Golonka +# This file is covered by the GNU General Public License. +# See the file COPYING for more details. import appModuleHandler import controlTypes @@ -11,5 +10,9 @@ class AppModule(appModuleHandler.AppModule): def event_NVDAObject_init(self,obj): - if obj.windowClassName=="Button" and not obj.role in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]: + if ( + obj.windowClassName == "Button" + and obj.role not in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU] + and obj.name is not None + ): obj.name=obj.name.replace('&','')
{"golden_diff": "diff --git a/source/appModules/audacity.py b/source/appModules/audacity.py\n--- a/source/appModules/audacity.py\n+++ b/source/appModules/audacity.py\n@@ -1,9 +1,8 @@\n # -*- coding: UTF-8 -*-\r\n-#appModules/audacity.py\r\n-#A part of NonVisual Desktop Access (NVDA)\r\n-#Copyright (C) 2006-2018 NV Access Limited, Robert H\u00e4nggi\r\n-#This file is covered by the GNU General Public License.\r\n-#See the file COPYING for more details.\r\n+# A part of NonVisual Desktop Access (NVDA)\r\n+# Copyright (C) 2006-2019 NV Access Limited, Robert H\u00e4nggi, \u0141ukasz Golonka\r\n+# This file is covered by the GNU General Public License.\r\n+# See the file COPYING for more details.\r\n \r\n import appModuleHandler\r\n import controlTypes\r\n@@ -11,5 +10,9 @@\n class AppModule(appModuleHandler.AppModule):\r\n \r\n \tdef event_NVDAObject_init(self,obj):\r\n-\t\tif obj.windowClassName==\"Button\" and not obj.role in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]:\r\n+\t\tif (\r\n+\t\t\tobj.windowClassName == \"Button\"\r\n+\t\t\tand obj.role not in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]\r\n+\t\t\tand obj.name is not None\r\n+\t\t):\r\n \t\t\tobj.name=obj.name.replace('&','')\n", "issue": "Error in alpha with built-in audacity appmod\n### Steps to reproduce:\r\n\r\n1. open audacity.\r\n2. select audio. ctrl+a works.\r\n3. press alt+c, and press c until \"compresser\" is found.\r\n4. press enter.\r\n5. press tab.\r\n\r\n### Actual behavior:\r\n\r\n```\r\nERROR - eventHandler.executeEvent (23:07:33.553):\r\nerror executing event: gainFocus on <NVDAObjects.IAccessible.IAccessible object at 0x03318FF0> with extra args of {}\r\nTraceback (most recent call last):\r\n File \"eventHandler.pyc\", line 150, in executeEvent\r\n File \"eventHandler.pyc\", line 163, in doPreGainFocus\r\n File \"api.pyc\", line 109, in setFocusObject\r\n File \"baseObject.pyc\", line 46, in __get__\r\n File \"baseObject.pyc\", line 149, in _getPropertyViaCache\r\n File \"NVDAObjects\\IAccessible\\__init__.pyc\", line 1695, in _get_container\r\n File \"IAccessibleHandler.pyc\", line 950, in findGroupboxObject\r\n File \"NVDAObjects\\IAccessible\\__init__.pyc\", line 48, in getNVDAObjectFromEvent\r\n File \"NVDAObjects\\__init__.pyc\", line 137, in __call__\r\n File \"appModules\\audacity.pyc\", line 15, in event_NVDAObject_init\r\nAttributeError: 'NoneType' object has no attribute 'replace'\r\n```\r\n\r\n### Expected behavior:\r\n\r\nItems in the UI read.\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\ninstalled\r\n\r\n#### NVDA version:\r\nVersion: alpha-18399,26485a09\r\n\r\n\r\n#### Windows version:\r\n^10\r\n\r\n#### Name and version of other software in use when reproducing the issue:\r\nAudacity: 2.3.1\r\n\r\n\n", "before_files": [{"content": "# -*- coding: UTF-8 -*-\r\n#appModules/audacity.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2018 NV Access Limited, Robert H\u00e4nggi\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport appModuleHandler\r\nimport controlTypes\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\tif obj.windowClassName==\"Button\" and not obj.role in [controlTypes.ROLE_MENUBAR, controlTypes.ROLE_MENUITEM, controlTypes.ROLE_POPUPMENU]:\r\n\t\t\tobj.name=obj.name.replace('&','')\r\n", "path": "source/appModules/audacity.py"}]}
1,160
340
gh_patches_debug_20232
rasdani/github-patches
git_diff
ansible__ansible-17394
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> network _template not checking arguments correctly ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME lib/ansible/plugins/action/net_template.py ##### ANSIBLE VERSION devel ##### CONFIGURATION ##### OS / ENVIRONMENT ##### SUMMARY ##### STEPS TO REPRODUCE ``` --- - name: Backup existing configuration eos_template: backup: yes ``` No other `vars` set same for `ios_template` ##### EXPECTED RESULTS `msg": "missing required arguments: host"` (or src) ##### ACTUAL RESULTS ``` File "/home/gundalow/git/ansible-inc/ansible/lib/ansible/plugins/action/net_template.py", line 43, in run self._handle_template() File "/home/gundalow/git/ansible-inc/ansible/lib/ansible/plugins/action/net_template.py", line 80, in _handle_template if os.path.isabs(src) or urlparse.urlsplit('src').scheme: File "/usr/lib/python2.7/posixpath.py", line 54, in isabs return s.startswith('/') AttributeError: 'NoneType' object has no attribute 'startswith' ``` </issue> <code> [start of lib/ansible/plugins/action/net_template.py] 1 # 2 # Copyright 2015 Peter Sprygada <[email protected]> 3 # 4 # This file is part of Ansible 5 # 6 # Ansible is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # Ansible is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 18 # 19 from __future__ import (absolute_import, division, print_function) 20 __metaclass__ = type 21 22 import sys 23 import os 24 import time 25 import glob 26 import urlparse 27 28 from ansible.plugins.action import ActionBase 29 from ansible.utils.boolean import boolean 30 from ansible.utils.unicode import to_unicode 31 32 BOOLEANS = ('true', 'false', 'yes', 'no') 33 34 class ActionModule(ActionBase): 35 36 TRANSFERS_FILES = False 37 38 def run(self, tmp=None, task_vars=None): 39 result = super(ActionModule, self).run(tmp, task_vars) 40 result['changed'] = False 41 42 try: 43 self._handle_template() 44 except ValueError as exc: 45 return dict(failed=True, msg=exc.message) 46 47 result.update(self._execute_module(module_name=self._task.action, 48 module_args=self._task.args, task_vars=task_vars)) 49 50 if self._task.args.get('backup') and result.get('_backup'): 51 # User requested backup and no error occurred in module. 52 # NOTE: If there is a parameter error, _backup key may not be in results. 53 self._write_backup(task_vars['inventory_hostname'], result['_backup']) 54 55 if '_backup' in result: 56 del result['_backup'] 57 58 return result 59 60 def _get_working_path(self): 61 cwd = self._loader.get_basedir() 62 if self._task._role is not None: 63 cwd = self._task._role._role_path 64 return cwd 65 66 def _write_backup(self, host, contents): 67 backup_path = self._get_working_path() + '/backup' 68 if not os.path.exists(backup_path): 69 os.mkdir(backup_path) 70 for fn in glob.glob('%s/%s*' % (backup_path, host)): 71 os.remove(fn) 72 tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) 73 filename = '%s/%s_config.%s' % (backup_path, host, tstamp) 74 open(filename, 'w').write(contents) 75 76 def _handle_template(self): 77 src = self._task.args.get('src') 78 working_path = self._get_working_path() 79 80 if os.path.isabs(src) or urlparse.urlsplit('src').scheme: 81 source = src 82 else: 83 source = self._loader.path_dwim_relative(working_path, 'templates', src) 84 if not source: 85 source = self._loader.path_dwim_relative(working_path, src) 86 87 if not os.path.exists(source): 88 return 89 90 try: 91 with open(source, 'r') as f: 92 template_data = to_unicode(f.read()) 93 except IOError: 94 return dict(failed=True, msg='unable to load src file') 95 96 # Create a template search path in the following order: 97 # [working_path, self_role_path, dependent_role_paths, dirname(source)] 98 searchpath = [working_path] 99 if self._task._role is not None: 100 searchpath.append(self._task._role._role_path) 101 dep_chain = self._task._block.get_dep_chain() 102 if dep_chain is not None: 103 for role in dep_chain: 104 searchpath.append(role._role_path) 105 searchpath.append(os.path.dirname(source)) 106 self._templar.environment.loader.searchpath = searchpath 107 self._task.args['src'] = self._templar.template(template_data) 108 109 110 [end of lib/ansible/plugins/action/net_template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansible/plugins/action/net_template.py b/lib/ansible/plugins/action/net_template.py --- a/lib/ansible/plugins/action/net_template.py +++ b/lib/ansible/plugins/action/net_template.py @@ -41,7 +41,7 @@ try: self._handle_template() - except ValueError as exc: + except (ValueError, AttributeError) as exc: return dict(failed=True, msg=exc.message) result.update(self._execute_module(module_name=self._task.action, @@ -75,9 +75,12 @@ def _handle_template(self): src = self._task.args.get('src') + if not src: + raise ValueError('missing required arguments: src') + working_path = self._get_working_path() - if os.path.isabs(src) or urlparse.urlsplit('src').scheme: + if os.path.isabs(src) or urlparse.urlsplit(src).scheme: source = src else: source = self._loader.path_dwim_relative(working_path, 'templates', src)
{"golden_diff": "diff --git a/lib/ansible/plugins/action/net_template.py b/lib/ansible/plugins/action/net_template.py\n--- a/lib/ansible/plugins/action/net_template.py\n+++ b/lib/ansible/plugins/action/net_template.py\n@@ -41,7 +41,7 @@\n \n try:\n self._handle_template()\n- except ValueError as exc:\n+ except (ValueError, AttributeError) as exc:\n return dict(failed=True, msg=exc.message)\n \n result.update(self._execute_module(module_name=self._task.action,\n@@ -75,9 +75,12 @@\n \n def _handle_template(self):\n src = self._task.args.get('src')\n+ if not src:\n+ raise ValueError('missing required arguments: src')\n+\n working_path = self._get_working_path()\n \n- if os.path.isabs(src) or urlparse.urlsplit('src').scheme:\n+ if os.path.isabs(src) or urlparse.urlsplit(src).scheme:\n source = src\n else:\n source = self._loader.path_dwim_relative(working_path, 'templates', src)\n", "issue": "network _template not checking arguments correctly\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nlib/ansible/plugins/action/net_template.py\n##### ANSIBLE VERSION\n\ndevel\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n##### SUMMARY\n##### STEPS TO REPRODUCE\n\n```\n\n---\n- name: Backup existing configuration\n eos_template:\n backup: yes\n```\n\nNo other `vars` set\n\nsame for `ios_template`\n##### EXPECTED RESULTS\n\n`msg\": \"missing required arguments: host\"` (or src)\n##### ACTUAL RESULTS\n\n```\n File \"/home/gundalow/git/ansible-inc/ansible/lib/ansible/plugins/action/net_template.py\", line 43, in run\n self._handle_template()\n File \"/home/gundalow/git/ansible-inc/ansible/lib/ansible/plugins/action/net_template.py\", line 80, in _handle_template\n if os.path.isabs(src) or urlparse.urlsplit('src').scheme:\n File \"/usr/lib/python2.7/posixpath.py\", line 54, in isabs\n return s.startswith('/')\nAttributeError: 'NoneType' object has no attribute 'startswith'\n```\n\n", "before_files": [{"content": "#\n# Copyright 2015 Peter Sprygada <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport sys\nimport os\nimport time\nimport glob\nimport urlparse\n\nfrom ansible.plugins.action import ActionBase\nfrom ansible.utils.boolean import boolean\nfrom ansible.utils.unicode import to_unicode\n\nBOOLEANS = ('true', 'false', 'yes', 'no')\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n def run(self, tmp=None, task_vars=None):\n result = super(ActionModule, self).run(tmp, task_vars)\n result['changed'] = False\n\n try:\n self._handle_template()\n except ValueError as exc:\n return dict(failed=True, msg=exc.message)\n\n result.update(self._execute_module(module_name=self._task.action,\n module_args=self._task.args, task_vars=task_vars))\n\n if self._task.args.get('backup') and result.get('_backup'):\n # User requested backup and no error occurred in module.\n # NOTE: If there is a parameter error, _backup key may not be in results.\n self._write_backup(task_vars['inventory_hostname'], result['_backup'])\n\n if '_backup' in result:\n del result['_backup']\n\n return result\n\n def _get_working_path(self):\n cwd = self._loader.get_basedir()\n if self._task._role is not None:\n cwd = self._task._role._role_path\n return cwd\n\n def _write_backup(self, host, contents):\n backup_path = self._get_working_path() + '/backup'\n if not os.path.exists(backup_path):\n os.mkdir(backup_path)\n for fn in glob.glob('%s/%s*' % (backup_path, host)):\n os.remove(fn)\n tstamp = time.strftime(\"%Y-%m-%d@%H:%M:%S\", time.localtime(time.time()))\n filename = '%s/%s_config.%s' % (backup_path, host, tstamp)\n open(filename, 'w').write(contents)\n\n def _handle_template(self):\n src = self._task.args.get('src')\n working_path = self._get_working_path()\n\n if os.path.isabs(src) or urlparse.urlsplit('src').scheme:\n source = src\n else:\n source = self._loader.path_dwim_relative(working_path, 'templates', src)\n if not source:\n source = self._loader.path_dwim_relative(working_path, src)\n\n if not os.path.exists(source):\n return\n\n try:\n with open(source, 'r') as f:\n template_data = to_unicode(f.read())\n except IOError:\n return dict(failed=True, msg='unable to load src file')\n\n # Create a template search path in the following order:\n # [working_path, self_role_path, dependent_role_paths, dirname(source)]\n searchpath = [working_path]\n if self._task._role is not None:\n searchpath.append(self._task._role._role_path)\n dep_chain = self._task._block.get_dep_chain()\n if dep_chain is not None:\n for role in dep_chain:\n searchpath.append(role._role_path)\n searchpath.append(os.path.dirname(source))\n self._templar.environment.loader.searchpath = searchpath\n self._task.args['src'] = self._templar.template(template_data)\n\n\n", "path": "lib/ansible/plugins/action/net_template.py"}]}
1,918
236
gh_patches_debug_28348
rasdani/github-patches
git_diff
ibis-project__ibis-3263
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> docs: add blurb about the datafusion backend After #2918 lands, we need to mention it in the docs. </issue> <code> [start of ibis/backends/datafusion/__init__.py] 1 from __future__ import annotations 2 3 import re 4 from typing import Mapping 5 6 import datafusion as df 7 import pyarrow as pa 8 9 import ibis.common.exceptions as com 10 import ibis.expr.schema as sch 11 import ibis.expr.types as ir 12 from ibis.backends.base import BaseBackend 13 14 from .compiler import translate 15 16 17 def _to_pyarrow_table(frame): 18 batches = frame.collect() 19 if batches: 20 return pa.Table.from_batches(batches) 21 else: 22 # TODO(kszucs): file a bug to datafusion because the fields' 23 # nullability from frame.schema() is not always consistent 24 # with the first record batch's schema 25 return pa.Table.from_batches(batches, schema=frame.schema()) 26 27 28 class Backend(BaseBackend): 29 name = 'datafusion' 30 builder = None 31 32 @property 33 def version(self): 34 try: 35 import importlib.metadata as importlib_metadata 36 except ImportError: 37 # TODO: remove this when Python 3.7 support is dropped 38 import importlib_metadata 39 return importlib_metadata.version("datafusion") 40 41 def do_connect(self, config): 42 """ 43 Create a DataFusionClient for use with Ibis 44 45 Parameters 46 ---------- 47 config : DataFusionContext or dict 48 49 Returns 50 ------- 51 DataFusionClient 52 """ 53 if isinstance(config, df.ExecutionContext): 54 self._context = config 55 else: 56 self._context = df.ExecutionContext() 57 58 for name, path in config.items(): 59 strpath = str(path) 60 if strpath.endswith('.csv'): 61 self.register_csv(name, path) 62 elif strpath.endswith('.parquet'): 63 self.register_parquet(name, path) 64 else: 65 raise ValueError( 66 "Currently the DataFusion backend only supports CSV " 67 "files with the extension .csv and Parquet files with " 68 "the .parquet extension." 69 ) 70 71 def current_database(self): 72 raise NotImplementedError() 73 74 def list_databases(self, like: str = None) -> list[str]: 75 raise NotImplementedError() 76 77 def list_tables(self, like: str = None, database: str = None) -> list[str]: 78 """List the available tables.""" 79 tables = list(self._context.tables()) 80 if like is not None: 81 pattern = re.compile(like) 82 return list(filter(lambda t: pattern.findall(t), tables)) 83 return tables 84 85 def table(self, name, schema=None): 86 catalog = self._context.catalog() 87 database = catalog.database('public') 88 table = database.table(name) 89 schema = sch.infer(table.schema) 90 return self.table_class(name, schema, self).to_expr() 91 92 def register_csv(self, name, path, schema=None): 93 self._context.register_csv(name, path, schema=schema) 94 95 def register_parquet(self, name, path, schema=None): 96 self._context.register_parquet(name, path, schema=schema) 97 98 def execute( 99 self, 100 expr: ir.Expr, 101 params: Mapping[ir.Expr, object] = None, 102 limit: str = 'default', 103 **kwargs, 104 ): 105 if isinstance(expr, ir.TableExpr): 106 frame = self.compile(expr, params, **kwargs) 107 table = _to_pyarrow_table(frame) 108 return table.to_pandas() 109 elif isinstance(expr, ir.ColumnExpr): 110 # expression must be named for the projection 111 expr = expr.name('tmp').to_projection() 112 frame = self.compile(expr, params, **kwargs) 113 table = _to_pyarrow_table(frame) 114 return table['tmp'].to_pandas() 115 elif isinstance(expr, ir.ScalarExpr): 116 if expr.op().root_tables(): 117 # there are associated datafusion tables so convert the expr 118 # to a selection which we can directly convert to a datafusion 119 # plan 120 expr = expr.name('tmp').to_projection() 121 frame = self.compile(expr, params, **kwargs) 122 else: 123 # doesn't have any tables associated so create a plan from a 124 # dummy datafusion table 125 compiled = self.compile(expr, params, **kwargs) 126 frame = self._context.empty_table().select(compiled) 127 table = _to_pyarrow_table(frame) 128 return table[0][0].as_py() 129 else: 130 raise com.IbisError( 131 f"Cannot execute expression of type: {type(expr)}" 132 ) 133 134 def compile( 135 self, expr: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs 136 ): 137 return translate(expr) 138 [end of ibis/backends/datafusion/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ibis/backends/datafusion/__init__.py b/ibis/backends/datafusion/__init__.py --- a/ibis/backends/datafusion/__init__.py +++ b/ibis/backends/datafusion/__init__.py @@ -83,6 +83,20 @@ return tables def table(self, name, schema=None): + """Get an ibis expression representing a DataFusion table. + + Parameters + --------- + name + The name of the table to retreive + schema + An optional schema + + Returns + ------- + ibis.expr.types.TableExpr + A table expression + """ catalog = self._context.catalog() database = catalog.database('public') table = database.table(name) @@ -90,9 +104,31 @@ return self.table_class(name, schema, self).to_expr() def register_csv(self, name, path, schema=None): + """Register a CSV file with with `name` located at `path`. + + Parameters + ---------- + name + The name of the table + path + The path to the CSV file + schema + An optional schema + """ self._context.register_csv(name, path, schema=schema) def register_parquet(self, name, path, schema=None): + """Register a parquet file with with `name` located at `path`. + + Parameters + ---------- + name + The name of the table + path + The path to the parquet file + schema + An optional schema + """ self._context.register_parquet(name, path, schema=schema) def execute(
{"golden_diff": "diff --git a/ibis/backends/datafusion/__init__.py b/ibis/backends/datafusion/__init__.py\n--- a/ibis/backends/datafusion/__init__.py\n+++ b/ibis/backends/datafusion/__init__.py\n@@ -83,6 +83,20 @@\n return tables\n \n def table(self, name, schema=None):\n+ \"\"\"Get an ibis expression representing a DataFusion table.\n+\n+ Parameters\n+ ---------\n+ name\n+ The name of the table to retreive\n+ schema\n+ An optional schema\n+\n+ Returns\n+ -------\n+ ibis.expr.types.TableExpr\n+ A table expression\n+ \"\"\"\n catalog = self._context.catalog()\n database = catalog.database('public')\n table = database.table(name)\n@@ -90,9 +104,31 @@\n return self.table_class(name, schema, self).to_expr()\n \n def register_csv(self, name, path, schema=None):\n+ \"\"\"Register a CSV file with with `name` located at `path`.\n+\n+ Parameters\n+ ----------\n+ name\n+ The name of the table\n+ path\n+ The path to the CSV file\n+ schema\n+ An optional schema\n+ \"\"\"\n self._context.register_csv(name, path, schema=schema)\n \n def register_parquet(self, name, path, schema=None):\n+ \"\"\"Register a parquet file with with `name` located at `path`.\n+\n+ Parameters\n+ ----------\n+ name\n+ The name of the table\n+ path\n+ The path to the parquet file\n+ schema\n+ An optional schema\n+ \"\"\"\n self._context.register_parquet(name, path, schema=schema)\n \n def execute(\n", "issue": "docs: add blurb about the datafusion backend\nAfter #2918 lands, we need to mention it in the docs.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import Mapping\n\nimport datafusion as df\nimport pyarrow as pa\n\nimport ibis.common.exceptions as com\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.backends.base import BaseBackend\n\nfrom .compiler import translate\n\n\ndef _to_pyarrow_table(frame):\n batches = frame.collect()\n if batches:\n return pa.Table.from_batches(batches)\n else:\n # TODO(kszucs): file a bug to datafusion because the fields'\n # nullability from frame.schema() is not always consistent\n # with the first record batch's schema\n return pa.Table.from_batches(batches, schema=frame.schema())\n\n\nclass Backend(BaseBackend):\n name = 'datafusion'\n builder = None\n\n @property\n def version(self):\n try:\n import importlib.metadata as importlib_metadata\n except ImportError:\n # TODO: remove this when Python 3.7 support is dropped\n import importlib_metadata\n return importlib_metadata.version(\"datafusion\")\n\n def do_connect(self, config):\n \"\"\"\n Create a DataFusionClient for use with Ibis\n\n Parameters\n ----------\n config : DataFusionContext or dict\n\n Returns\n -------\n DataFusionClient\n \"\"\"\n if isinstance(config, df.ExecutionContext):\n self._context = config\n else:\n self._context = df.ExecutionContext()\n\n for name, path in config.items():\n strpath = str(path)\n if strpath.endswith('.csv'):\n self.register_csv(name, path)\n elif strpath.endswith('.parquet'):\n self.register_parquet(name, path)\n else:\n raise ValueError(\n \"Currently the DataFusion backend only supports CSV \"\n \"files with the extension .csv and Parquet files with \"\n \"the .parquet extension.\"\n )\n\n def current_database(self):\n raise NotImplementedError()\n\n def list_databases(self, like: str = None) -> list[str]:\n raise NotImplementedError()\n\n def list_tables(self, like: str = None, database: str = None) -> list[str]:\n \"\"\"List the available tables.\"\"\"\n tables = list(self._context.tables())\n if like is not None:\n pattern = re.compile(like)\n return list(filter(lambda t: pattern.findall(t), tables))\n return tables\n\n def table(self, name, schema=None):\n catalog = self._context.catalog()\n database = catalog.database('public')\n table = database.table(name)\n schema = sch.infer(table.schema)\n return self.table_class(name, schema, self).to_expr()\n\n def register_csv(self, name, path, schema=None):\n self._context.register_csv(name, path, schema=schema)\n\n def register_parquet(self, name, path, schema=None):\n self._context.register_parquet(name, path, schema=schema)\n\n def execute(\n self,\n expr: ir.Expr,\n params: Mapping[ir.Expr, object] = None,\n limit: str = 'default',\n **kwargs,\n ):\n if isinstance(expr, ir.TableExpr):\n frame = self.compile(expr, params, **kwargs)\n table = _to_pyarrow_table(frame)\n return table.to_pandas()\n elif isinstance(expr, ir.ColumnExpr):\n # expression must be named for the projection\n expr = expr.name('tmp').to_projection()\n frame = self.compile(expr, params, **kwargs)\n table = _to_pyarrow_table(frame)\n return table['tmp'].to_pandas()\n elif isinstance(expr, ir.ScalarExpr):\n if expr.op().root_tables():\n # there are associated datafusion tables so convert the expr\n # to a selection which we can directly convert to a datafusion\n # plan\n expr = expr.name('tmp').to_projection()\n frame = self.compile(expr, params, **kwargs)\n else:\n # doesn't have any tables associated so create a plan from a\n # dummy datafusion table\n compiled = self.compile(expr, params, **kwargs)\n frame = self._context.empty_table().select(compiled)\n table = _to_pyarrow_table(frame)\n return table[0][0].as_py()\n else:\n raise com.IbisError(\n f\"Cannot execute expression of type: {type(expr)}\"\n )\n\n def compile(\n self, expr: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs\n ):\n return translate(expr)\n", "path": "ibis/backends/datafusion/__init__.py"}]}
1,854
402
gh_patches_debug_2007
rasdani/github-patches
git_diff
googleapis__google-cloud-python-5683
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 'datastore 1.7.0' Major changes are: - Add support for Python 3.7. - Drop support for Python 3.4. - Bugfix: query offsets (#4675). </issue> <code> [start of datastore/setup.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = 'google-cloud-datastore' 24 description = 'Google Cloud Datastore API client library' 25 version = '1.6.0' 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = 'Development Status :: 5 - Production/Stable' 31 dependencies = [ 32 'google-cloud-core<0.29dev,>=0.28.0', 33 'google-api-core[grpc]<2.0.0dev,>=1.0.0', 34 ] 35 extras = { 36 } 37 38 39 # Setup boilerplate below this line. 40 41 package_root = os.path.abspath(os.path.dirname(__file__)) 42 43 readme_filename = os.path.join(package_root, 'README.rst') 44 with io.open(readme_filename, encoding='utf-8') as readme_file: 45 readme = readme_file.read() 46 47 # Only include packages under the 'google' namespace. Do not include tests, 48 # benchmarks, etc. 49 packages = [ 50 package for package in setuptools.find_packages() 51 if package.startswith('google')] 52 53 # Determine which namespaces are needed. 54 namespaces = ['google'] 55 if 'google.cloud' in packages: 56 namespaces.append('google.cloud') 57 58 59 setuptools.setup( 60 name=name, 61 version=version, 62 description=description, 63 long_description=readme, 64 author='Google LLC', 65 author_email='[email protected]', 66 license='Apache 2.0', 67 url='https://github.com/GoogleCloudPlatform/google-cloud-python', 68 classifiers=[ 69 release_status, 70 'Intended Audience :: Developers', 71 'License :: OSI Approved :: Apache Software License', 72 'Programming Language :: Python', 73 'Programming Language :: Python :: 2', 74 'Programming Language :: Python :: 2.7', 75 'Programming Language :: Python :: 3', 76 'Programming Language :: Python :: 3.4', 77 'Programming Language :: Python :: 3.5', 78 'Programming Language :: Python :: 3.6', 79 'Operating System :: OS Independent', 80 'Topic :: Internet', 81 ], 82 platforms='Posix; MacOS X; Windows', 83 packages=packages, 84 namespace_packages=namespaces, 85 install_requires=dependencies, 86 extras_require=extras, 87 include_package_data=True, 88 zip_safe=False, 89 ) 90 [end of datastore/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/datastore/setup.py b/datastore/setup.py --- a/datastore/setup.py +++ b/datastore/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-datastore' description = 'Google Cloud Datastore API client library' -version = '1.6.0' +version = '1.7.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta'
{"golden_diff": "diff --git a/datastore/setup.py b/datastore/setup.py\n--- a/datastore/setup.py\n+++ b/datastore/setup.py\n@@ -22,7 +22,7 @@\n \n name = 'google-cloud-datastore'\n description = 'Google Cloud Datastore API client library'\n-version = '1.6.0'\n+version = '1.7.0'\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Release 'datastore 1.7.0'\nMajor changes are:\r\n\r\n- Add support for Python 3.7.\r\n- Drop support for Python 3.4.\r\n- Bugfix: query offsets (#4675).\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-datastore'\ndescription = 'Google Cloud Datastore API client library'\nversion = '1.6.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'google-cloud-core<0.29dev,>=0.28.0',\n 'google-api-core[grpc]<2.0.0dev,>=1.0.0',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "datastore/setup.py"}]}
1,389
106
gh_patches_debug_2550
rasdani/github-patches
git_diff
semgrep__semgrep-rules-1051
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> python tempfile-without-flush: Don't require .write() for exception? **Describe the bug** The `python.lang.correctness.tempfile.flush.tempfile-without-flush` rule has [exceptions](https://github.com/returntocorp/semgrep-rules/blob/35e9f350a5d0190c936502fff179ba2c465c4438/python/lang/correctness/tempfile/flush.yaml#L5-L20) if `.write()` and then `.close()` or `.flush()` is used on a tempfile. However, this doesn't cover code which calls `.close()` immediately, without writing: ``` $ semgrep --config=p/r2c-ci --verbose test.py [...] severity:error rule:python.lang.correctness.tempfile.flush.tempfile-without-flush: possibly missing a .flush() or .close() call to temporary file handle; file may or may not exist when handle.name is used 3:handle = tempfile.NamedTemporaryFile(delete=False) 4:handle.close() 5:print(handle.name) ``` **To Reproduce** ```python3 import tempfile handle = tempfile.NamedTemporaryFile(delete=False) handle.close() print(handle.name) ``` **Expected behavior** It seems to me like the exception should trigger whenever `.close()` is invoked, no matter if `.write()` is called before that or not? python tempfile-without-flush: Don't require .write() for exception? **Describe the bug** The `python.lang.correctness.tempfile.flush.tempfile-without-flush` rule has [exceptions](https://github.com/returntocorp/semgrep-rules/blob/35e9f350a5d0190c936502fff179ba2c465c4438/python/lang/correctness/tempfile/flush.yaml#L5-L20) if `.write()` and then `.close()` or `.flush()` is used on a tempfile. However, this doesn't cover code which calls `.close()` immediately, without writing: ``` $ semgrep --config=p/r2c-ci --verbose test.py [...] severity:error rule:python.lang.correctness.tempfile.flush.tempfile-without-flush: possibly missing a .flush() or .close() call to temporary file handle; file may or may not exist when handle.name is used 3:handle = tempfile.NamedTemporaryFile(delete=False) 4:handle.close() 5:print(handle.name) ``` **To Reproduce** ```python3 import tempfile handle = tempfile.NamedTemporaryFile(delete=False) handle.close() print(handle.name) ``` **Expected behavior** It seems to me like the exception should trigger whenever `.close()` is invoked, no matter if `.write()` is called before that or not? </issue> <code> [start of python/lang/correctness/tempfile/flush.py] 1 import tempfile 2 3 import at 4 import tf 5 6 7 def main(): 8 with tempfile.NamedTemporaryFile("w") as fout: 9 debug_print(astr) 10 fout.write(astr) 11 # ok:tempfile-without-flush 12 fout.flush() 13 cmd = [binary_name, fout.name, *[str(path) for path in targets]] 14 15 16 def main_b(): 17 with tempfile.NamedTemporaryFile("w") as fout: 18 debug_print(astr) 19 fout.write(astr) 20 # ok:tempfile-without-flush 21 fout.close() 22 cmd = [binary_name, fout.name, *[str(path) for path in targets]] 23 24 25 def main_c(): 26 with tempfile.NamedTemporaryFile("w") as fout: 27 debug_print(astr) 28 fout.write(astr) 29 30 # ok:tempfile-without-flush 31 cmd = [binary_name, fout.name, *[str(path) for path in targets]] 32 33 34 def main_c(): 35 with tempfile.NamedTemporaryFile("w") as fout: 36 debug_print(astr) 37 fout.write(astr) 38 debug_print('wrote file') 39 40 # ruleid:tempfile-without-flush 41 cmd = [binary_name, fout.name, *[str(path) for path in targets]] 42 43 44 def main_d(): 45 fout = tempfile.NamedTemporaryFile('w') 46 debug_print(astr) 47 fout.write(astr) 48 49 # ruleid:tempfile-without-flush 50 fout.name 51 # ruleid:tempfile-without-flush 52 cmd = [binary_name, fout.name, *[str(path) for path in targets]] 53 54 55 def main_e(): 56 fout = tempfile.NamedTemporaryFile('w') 57 debug_print(astr) 58 fout.write(astr) 59 60 # ruleid:tempfile-without-flush 61 print(fout.name) 62 # ruleid:tempfile-without-flush 63 cmd = [binary_name, fout.name, *[str(path) for path in targets]] 64 [end of python/lang/correctness/tempfile/flush.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/lang/correctness/tempfile/flush.py b/python/lang/correctness/tempfile/flush.py --- a/python/lang/correctness/tempfile/flush.py +++ b/python/lang/correctness/tempfile/flush.py @@ -61,3 +61,12 @@ print(fout.name) # ruleid:tempfile-without-flush cmd = [binary_name, fout.name, *[str(path) for path in targets]] + + +def main_f(): + fout = tempfile.NamedTemporaryFile('w', delete=False) + debug_print(astr) + fout.close() + + # ok:tempfile-without-flush + print(fout.name)
{"golden_diff": "diff --git a/python/lang/correctness/tempfile/flush.py b/python/lang/correctness/tempfile/flush.py\n--- a/python/lang/correctness/tempfile/flush.py\n+++ b/python/lang/correctness/tempfile/flush.py\n@@ -61,3 +61,12 @@\n print(fout.name)\n # ruleid:tempfile-without-flush\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n+\n+\n+def main_f():\n+ fout = tempfile.NamedTemporaryFile('w', delete=False)\n+ debug_print(astr)\n+ fout.close()\n+\n+ # ok:tempfile-without-flush\n+ print(fout.name)\n", "issue": "python tempfile-without-flush: Don't require .write() for exception?\n**Describe the bug**\r\nThe `python.lang.correctness.tempfile.flush.tempfile-without-flush` rule has [exceptions](https://github.com/returntocorp/semgrep-rules/blob/35e9f350a5d0190c936502fff179ba2c465c4438/python/lang/correctness/tempfile/flush.yaml#L5-L20) if `.write()` and then `.close()` or `.flush()` is used on a tempfile. However, this doesn't cover code which calls `.close()` immediately, without writing:\r\n\r\n```\r\n$ semgrep --config=p/r2c-ci --verbose test.py\r\n[...]\r\nseverity:error rule:python.lang.correctness.tempfile.flush.tempfile-without-flush: possibly missing a .flush() or .close() call to temporary file handle; file may or may not exist when handle.name is used\r\n3:handle = tempfile.NamedTemporaryFile(delete=False)\r\n4:handle.close()\r\n5:print(handle.name)\r\n```\r\n\r\n**To Reproduce**\r\n```python3\r\nimport tempfile\r\n\r\nhandle = tempfile.NamedTemporaryFile(delete=False)\r\nhandle.close()\r\nprint(handle.name)\r\n```\r\n\r\n**Expected behavior**\r\nIt seems to me like the exception should trigger whenever `.close()` is invoked, no matter if `.write()` is called before that or not?\npython tempfile-without-flush: Don't require .write() for exception?\n**Describe the bug**\r\nThe `python.lang.correctness.tempfile.flush.tempfile-without-flush` rule has [exceptions](https://github.com/returntocorp/semgrep-rules/blob/35e9f350a5d0190c936502fff179ba2c465c4438/python/lang/correctness/tempfile/flush.yaml#L5-L20) if `.write()` and then `.close()` or `.flush()` is used on a tempfile. However, this doesn't cover code which calls `.close()` immediately, without writing:\r\n\r\n```\r\n$ semgrep --config=p/r2c-ci --verbose test.py\r\n[...]\r\nseverity:error rule:python.lang.correctness.tempfile.flush.tempfile-without-flush: possibly missing a .flush() or .close() call to temporary file handle; file may or may not exist when handle.name is used\r\n3:handle = tempfile.NamedTemporaryFile(delete=False)\r\n4:handle.close()\r\n5:print(handle.name)\r\n```\r\n\r\n**To Reproduce**\r\n```python3\r\nimport tempfile\r\n\r\nhandle = tempfile.NamedTemporaryFile(delete=False)\r\nhandle.close()\r\nprint(handle.name)\r\n```\r\n\r\n**Expected behavior**\r\nIt seems to me like the exception should trigger whenever `.close()` is invoked, no matter if `.write()` is called before that or not?\n", "before_files": [{"content": "import tempfile\n\nimport at\nimport tf\n\n\ndef main():\n with tempfile.NamedTemporaryFile(\"w\") as fout:\n debug_print(astr)\n fout.write(astr)\n # ok:tempfile-without-flush\n fout.flush()\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n\n\ndef main_b():\n with tempfile.NamedTemporaryFile(\"w\") as fout:\n debug_print(astr)\n fout.write(astr)\n # ok:tempfile-without-flush\n fout.close()\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n\n\ndef main_c():\n with tempfile.NamedTemporaryFile(\"w\") as fout:\n debug_print(astr)\n fout.write(astr)\n\n # ok:tempfile-without-flush\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n\n\ndef main_c():\n with tempfile.NamedTemporaryFile(\"w\") as fout:\n debug_print(astr)\n fout.write(astr)\n debug_print('wrote file')\n\n # ruleid:tempfile-without-flush\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n\n\ndef main_d():\n fout = tempfile.NamedTemporaryFile('w')\n debug_print(astr)\n fout.write(astr)\n\n # ruleid:tempfile-without-flush\n fout.name\n # ruleid:tempfile-without-flush\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n\n\ndef main_e():\n fout = tempfile.NamedTemporaryFile('w')\n debug_print(astr)\n fout.write(astr)\n\n # ruleid:tempfile-without-flush\n print(fout.name)\n # ruleid:tempfile-without-flush\n cmd = [binary_name, fout.name, *[str(path) for path in targets]]\n", "path": "python/lang/correctness/tempfile/flush.py"}]}
1,686
154
gh_patches_debug_14362
rasdani/github-patches
git_diff
python-poetry__poetry-620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Arbitrary equality in pip freeze causes crash <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [ x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x ] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). <!-- Once those are done, if you're able to fill in the following list with your information, it'd be very helpful to whoever handles the issue. --> - **MacOS High Sierra 10.13.6**: <!-- Replace with version + name --> - **Poetry 0.11.5**: <!-- Replace with version --> ## Issue <!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ --> Running `poetry install` when a package is installed such that pip freeze lists it with an arbitrary equality operator (ex. `package===1.6.1-642-g1e03d5e853`) causes an exception ``` [ValueError] Unable to parse "=1.6.1-642-g1e03d5e853". ``` Looks like the issue is the strict splitting on `==` in `poetry/repositories/installed_repository.py`. </issue> <code> [start of poetry/repositories/installed_repository.py] 1 from poetry.packages import Package 2 from poetry.utils.env import Env 3 4 from .repository import Repository 5 6 7 class InstalledRepository(Repository): 8 @classmethod 9 def load(cls, env): # type: (Env) -> InstalledRepository 10 """ 11 Load installed packages. 12 13 For now, it uses the pip "freeze" command. 14 """ 15 repo = cls() 16 17 freeze_output = env.run("pip", "freeze") 18 for line in freeze_output.split("\n"): 19 if "==" in line: 20 name, version = line.split("==") 21 repo.add_package(Package(name, version, version)) 22 elif line.startswith("-e "): 23 line = line[3:].strip() 24 if line.startswith("git+"): 25 url = line.lstrip("git+") 26 if "@" in url: 27 url, rev = url.rsplit("@", 1) 28 else: 29 rev = "master" 30 31 name = url.split("/")[-1].rstrip(".git") 32 if "#egg=" in rev: 33 rev, name = rev.split("#egg=") 34 35 package = Package(name, "0.0.0") 36 package.source_type = "git" 37 package.source_url = url 38 package.source_reference = rev 39 40 repo.add_package(package) 41 42 return repo 43 [end of poetry/repositories/installed_repository.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py --- a/poetry/repositories/installed_repository.py +++ b/poetry/repositories/installed_repository.py @@ -1,3 +1,5 @@ +import re + from poetry.packages import Package from poetry.utils.env import Env @@ -17,7 +19,7 @@ freeze_output = env.run("pip", "freeze") for line in freeze_output.split("\n"): if "==" in line: - name, version = line.split("==") + name, version = re.split("={2,3}", line) repo.add_package(Package(name, version, version)) elif line.startswith("-e "): line = line[3:].strip()
{"golden_diff": "diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py\n--- a/poetry/repositories/installed_repository.py\n+++ b/poetry/repositories/installed_repository.py\n@@ -1,3 +1,5 @@\n+import re\n+\n from poetry.packages import Package\n from poetry.utils.env import Env\n \n@@ -17,7 +19,7 @@\n freeze_output = env.run(\"pip\", \"freeze\")\n for line in freeze_output.split(\"\\n\"):\n if \"==\" in line:\n- name, version = line.split(\"==\")\n+ name, version = re.split(\"={2,3}\", line)\n repo.add_package(Package(name, version, version))\n elif line.startswith(\"-e \"):\n line = line[3:].strip()\n", "issue": "Arbitrary equality in pip freeze causes crash\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [ x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x ] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **MacOS High Sierra 10.13.6**: <!-- Replace with version + name -->\r\n- **Poetry 0.11.5**: <!-- Replace with version -->\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\nRunning `poetry install` when a package is installed such that pip freeze lists it with an arbitrary equality operator (ex. `package===1.6.1-642-g1e03d5e853`) causes an exception\r\n\r\n```\r\n[ValueError]\r\nUnable to parse \"=1.6.1-642-g1e03d5e853\".\r\n```\r\n\r\nLooks like the issue is the strict splitting on `==` in `poetry/repositories/installed_repository.py`.\r\n\n", "before_files": [{"content": "from poetry.packages import Package\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n\n For now, it uses the pip \"freeze\" command.\n \"\"\"\n repo = cls()\n\n freeze_output = env.run(\"pip\", \"freeze\")\n for line in freeze_output.split(\"\\n\"):\n if \"==\" in line:\n name, version = line.split(\"==\")\n repo.add_package(Package(name, version, version))\n elif line.startswith(\"-e \"):\n line = line[3:].strip()\n if line.startswith(\"git+\"):\n url = line.lstrip(\"git+\")\n if \"@\" in url:\n url, rev = url.rsplit(\"@\", 1)\n else:\n rev = \"master\"\n\n name = url.split(\"/\")[-1].rstrip(\".git\")\n if \"#egg=\" in rev:\n rev, name = rev.split(\"#egg=\")\n\n package = Package(name, \"0.0.0\")\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = rev\n\n repo.add_package(package)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}]}
1,273
175
gh_patches_debug_94
rasdani/github-patches
git_diff
open-mmlab__mmsegmentation-658
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> error in train.py error in line 134 in mmsegmentation/tools/train.py model.init_weights() is error model.init_weight() is ok </issue> <code> [start of mmseg/__init__.py] 1 import mmcv 2 3 from .version import __version__, version_info 4 5 MMCV_MIN = '1.3.1' 6 MMCV_MAX = '1.4.0' 7 8 9 def digit_version(version_str): 10 digit_version = [] 11 for x in version_str.split('.'): 12 if x.isdigit(): 13 digit_version.append(int(x)) 14 elif x.find('rc') != -1: 15 patch_version = x.split('rc') 16 digit_version.append(int(patch_version[0]) - 1) 17 digit_version.append(int(patch_version[1])) 18 return digit_version 19 20 21 mmcv_min_version = digit_version(MMCV_MIN) 22 mmcv_max_version = digit_version(MMCV_MAX) 23 mmcv_version = digit_version(mmcv.__version__) 24 25 26 assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \ 27 f'MMCV=={mmcv.__version__} is used but incompatible. ' \ 28 f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.' 29 30 __all__ = ['__version__', 'version_info'] 31 [end of mmseg/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mmseg/__init__.py b/mmseg/__init__.py --- a/mmseg/__init__.py +++ b/mmseg/__init__.py @@ -2,7 +2,7 @@ from .version import __version__, version_info -MMCV_MIN = '1.3.1' +MMCV_MIN = '1.3.7' MMCV_MAX = '1.4.0'
{"golden_diff": "diff --git a/mmseg/__init__.py b/mmseg/__init__.py\n--- a/mmseg/__init__.py\n+++ b/mmseg/__init__.py\n@@ -2,7 +2,7 @@\n \n from .version import __version__, version_info\n \n-MMCV_MIN = '1.3.1'\n+MMCV_MIN = '1.3.7'\n MMCV_MAX = '1.4.0'\n", "issue": "error in train.py\nerror in line 134 in mmsegmentation/tools/train.py \r\nmodel.init_weights() is error \r\nmodel.init_weight() is ok\r\n\n", "before_files": [{"content": "import mmcv\n\nfrom .version import __version__, version_info\n\nMMCV_MIN = '1.3.1'\nMMCV_MAX = '1.4.0'\n\n\ndef digit_version(version_str):\n digit_version = []\n for x in version_str.split('.'):\n if x.isdigit():\n digit_version.append(int(x))\n elif x.find('rc') != -1:\n patch_version = x.split('rc')\n digit_version.append(int(patch_version[0]) - 1)\n digit_version.append(int(patch_version[1]))\n return digit_version\n\n\nmmcv_min_version = digit_version(MMCV_MIN)\nmmcv_max_version = digit_version(MMCV_MAX)\nmmcv_version = digit_version(mmcv.__version__)\n\n\nassert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \\\n f'MMCV=={mmcv.__version__} is used but incompatible. ' \\\n f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.'\n\n__all__ = ['__version__', 'version_info']\n", "path": "mmseg/__init__.py"}]}
859
93
gh_patches_debug_1570
rasdani/github-patches
git_diff
statsmodels__statsmodels-578
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PatsyData detection buglet If exog is None, then is_using_patsy data utility function returns False. It's a quick fix, but I'm starting to use patsy a bit more internally for convenience (e.g., passing in names to plotting functions rather than having to pull out the data) so I'll make a ticket. </issue> <code> [start of statsmodels/tools/data.py] 1 """ 2 Compatibility tools for various data structure inputs 3 """ 4 5 #TODO: question: interpret_data 6 # looks good and could/should be merged with other check convertion functions we also have 7 # similar also to what Nathaniel mentioned for Formula 8 # good: if ndarray check passes then loading pandas is not triggered, 9 10 11 import numpy as np 12 13 def have_pandas(): 14 try: 15 import pandas 16 return True 17 except ImportError: 18 return False 19 except Exception: 20 return False 21 22 def have_patsy(): 23 try: 24 import patsy 25 return True 26 except ImportError: 27 return False 28 except Exception: 29 return False 30 31 def is_data_frame(obj): 32 if not have_pandas(): 33 return False 34 35 import pandas as pn 36 37 return isinstance(obj, pn.DataFrame) 38 39 def is_design_matrix(obj): 40 if not have_patsy(): 41 return False 42 43 from patsy import DesignMatrix 44 return isinstance(obj, DesignMatrix) 45 46 def _is_structured_ndarray(obj): 47 return isinstance(obj, np.ndarray) and obj.dtype.names is not None 48 49 def interpret_data(data, colnames=None, rownames=None): 50 """ 51 Convert passed data structure to form required by estimation classes 52 53 Parameters 54 ---------- 55 data : ndarray-like 56 colnames : sequence or None 57 May be part of data structure 58 rownames : sequence or None 59 60 Returns 61 ------- 62 (values, colnames, rownames) : (homogeneous ndarray, list) 63 """ 64 if isinstance(data, np.ndarray): 65 if _is_structured_ndarray(data): 66 if colnames is None: 67 colnames = data.dtype.names 68 values = struct_to_ndarray(data) 69 else: 70 values = data 71 72 if colnames is None: 73 colnames = ['Y_%d' % i for i in range(values.shape[1])] 74 elif is_data_frame(data): 75 # XXX: hack 76 data = data.dropna() 77 values = data.values 78 colnames = data.columns 79 rownames = data.index 80 else: # pragma: no cover 81 raise Exception('cannot handle other input types at the moment') 82 83 if not isinstance(colnames, list): 84 colnames = list(colnames) 85 86 # sanity check 87 if len(colnames) != values.shape[1]: 88 raise ValueError('length of colnames does not match number ' 89 'of columns in data') 90 91 if rownames is not None and len(rownames) != len(values): 92 raise ValueError('length of rownames does not match number ' 93 'of rows in data') 94 95 return values, colnames, rownames 96 97 def struct_to_ndarray(arr): 98 return arr.view((float, len(arr.dtype.names))) 99 100 def _is_using_ndarray_type(endog, exog): 101 return (type(endog) is np.ndarray and 102 (type(exog) is np.ndarray or exog is None)) 103 104 def _is_using_ndarray(endog, exog): 105 return (isinstance(endog, np.ndarray) and 106 (isinstance(exog, np.ndarray) or exog is None)) 107 108 def _is_using_pandas(endog, exog): 109 if not have_pandas(): 110 return False 111 from pandas import Series, DataFrame, WidePanel 112 klasses = (Series, DataFrame, WidePanel) 113 return (isinstance(endog, klasses) or isinstance(exog, klasses)) 114 115 def _is_array_like(endog, exog): 116 try: # do it like this in case of mixed types, ie., ndarray and list 117 endog = np.asarray(endog) 118 exog = np.asarray(exog) 119 return True 120 except: 121 return False 122 123 def _is_using_patsy(endog, exog): 124 # we get this when a structured array is passed through a formula 125 return is_design_matrix(endog) and is_design_matrix(exog) 126 127 [end of statsmodels/tools/data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/statsmodels/tools/data.py b/statsmodels/tools/data.py --- a/statsmodels/tools/data.py +++ b/statsmodels/tools/data.py @@ -122,5 +122,6 @@ def _is_using_patsy(endog, exog): # we get this when a structured array is passed through a formula - return is_design_matrix(endog) and is_design_matrix(exog) + return (is_design_matrix(endog) and + (is_design_matrix(exog) or exog is None))
{"golden_diff": "diff --git a/statsmodels/tools/data.py b/statsmodels/tools/data.py\n--- a/statsmodels/tools/data.py\n+++ b/statsmodels/tools/data.py\n@@ -122,5 +122,6 @@\n \n def _is_using_patsy(endog, exog):\n # we get this when a structured array is passed through a formula\n- return is_design_matrix(endog) and is_design_matrix(exog)\n+ return (is_design_matrix(endog) and\n+ (is_design_matrix(exog) or exog is None))\n", "issue": "PatsyData detection buglet\nIf exog is None, then is_using_patsy data utility function returns False. It's a quick fix, but I'm starting to use patsy a bit more internally for convenience (e.g., passing in names to plotting functions rather than having to pull out the data) so I'll make a ticket.\n\n", "before_files": [{"content": "\"\"\"\nCompatibility tools for various data structure inputs\n\"\"\"\n\n#TODO: question: interpret_data\n# looks good and could/should be merged with other check convertion functions we also have\n# similar also to what Nathaniel mentioned for Formula\n# good: if ndarray check passes then loading pandas is not triggered,\n\n\nimport numpy as np\n\ndef have_pandas():\n try:\n import pandas\n return True\n except ImportError:\n return False\n except Exception:\n return False\n\ndef have_patsy():\n try:\n import patsy\n return True\n except ImportError:\n return False\n except Exception:\n return False\n\ndef is_data_frame(obj):\n if not have_pandas():\n return False\n\n import pandas as pn\n\n return isinstance(obj, pn.DataFrame)\n\ndef is_design_matrix(obj):\n if not have_patsy():\n return False\n\n from patsy import DesignMatrix\n return isinstance(obj, DesignMatrix)\n\ndef _is_structured_ndarray(obj):\n return isinstance(obj, np.ndarray) and obj.dtype.names is not None\n\ndef interpret_data(data, colnames=None, rownames=None):\n \"\"\"\n Convert passed data structure to form required by estimation classes\n\n Parameters\n ----------\n data : ndarray-like\n colnames : sequence or None\n May be part of data structure\n rownames : sequence or None\n\n Returns\n -------\n (values, colnames, rownames) : (homogeneous ndarray, list)\n \"\"\"\n if isinstance(data, np.ndarray):\n if _is_structured_ndarray(data):\n if colnames is None:\n colnames = data.dtype.names\n values = struct_to_ndarray(data)\n else:\n values = data\n\n if colnames is None:\n colnames = ['Y_%d' % i for i in range(values.shape[1])]\n elif is_data_frame(data):\n # XXX: hack\n data = data.dropna()\n values = data.values\n colnames = data.columns\n rownames = data.index\n else: # pragma: no cover\n raise Exception('cannot handle other input types at the moment')\n\n if not isinstance(colnames, list):\n colnames = list(colnames)\n\n # sanity check\n if len(colnames) != values.shape[1]:\n raise ValueError('length of colnames does not match number '\n 'of columns in data')\n\n if rownames is not None and len(rownames) != len(values):\n raise ValueError('length of rownames does not match number '\n 'of rows in data')\n\n return values, colnames, rownames\n\ndef struct_to_ndarray(arr):\n return arr.view((float, len(arr.dtype.names)))\n\ndef _is_using_ndarray_type(endog, exog):\n return (type(endog) is np.ndarray and\n (type(exog) is np.ndarray or exog is None))\n\ndef _is_using_ndarray(endog, exog):\n return (isinstance(endog, np.ndarray) and\n (isinstance(exog, np.ndarray) or exog is None))\n\ndef _is_using_pandas(endog, exog):\n if not have_pandas():\n return False\n from pandas import Series, DataFrame, WidePanel\n klasses = (Series, DataFrame, WidePanel)\n return (isinstance(endog, klasses) or isinstance(exog, klasses))\n\ndef _is_array_like(endog, exog):\n try: # do it like this in case of mixed types, ie., ndarray and list\n endog = np.asarray(endog)\n exog = np.asarray(exog)\n return True\n except:\n return False\n\ndef _is_using_patsy(endog, exog):\n # we get this when a structured array is passed through a formula\n return is_design_matrix(endog) and is_design_matrix(exog)\n\n", "path": "statsmodels/tools/data.py"}]}
1,728
119
gh_patches_debug_29826
rasdani/github-patches
git_diff
getsentry__sentry-python-593
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Memory leak introduced in 0.12.3 Dear maintainers, I recently noticed that my application containers started going out of memory fairly quickly. Upon investigation, I found that recent upgrade of sentry-sdk was causing this. Tested with a few versions of sentry-sdk using Python 3.7.6: sentry-sdk==0.12.2 -> memory usage stays at ~70 MiB, does not raise sentry-sdk==0.12.3 sentry-sdk==0.13.5 sentry-sdk @ master -> memory usage starts from ~70MiB and raises steadily until reaching memory limit and crashes container. Please see if you can reproduce this behaviour. If not, I'll try to put up a minimal reproducible example, but this should be fairly simple to see. My application does not do anything special really, just receives some millions of messages over MQ and out of those, some few thousands end up getting logged to Sentry due to exceptions occurring. This is using pretty much the default Sentry config, no integrations. Thanks for your help! </issue> <code> [start of sentry_sdk/worker.py] 1 import os 2 3 from threading import Thread, Lock 4 from time import sleep, time 5 from sentry_sdk._compat import queue, check_thread_support 6 from sentry_sdk.utils import logger 7 8 9 from sentry_sdk._types import MYPY 10 11 if MYPY: 12 from queue import Queue 13 from typing import Any 14 from typing import Optional 15 from typing import Callable 16 17 18 _TERMINATOR = object() 19 20 21 class BackgroundWorker(object): 22 def __init__(self): 23 # type: () -> None 24 check_thread_support() 25 self._queue = queue.Queue(-1) # type: Queue[Any] 26 self._lock = Lock() 27 self._thread = None # type: Optional[Thread] 28 self._thread_for_pid = None # type: Optional[int] 29 30 @property 31 def is_alive(self): 32 # type: () -> bool 33 if self._thread_for_pid != os.getpid(): 34 return False 35 if not self._thread: 36 return False 37 return self._thread.is_alive() 38 39 def _ensure_thread(self): 40 # type: () -> None 41 if not self.is_alive: 42 self.start() 43 44 def _timed_queue_join(self, timeout): 45 # type: (float) -> bool 46 deadline = time() + timeout 47 queue = self._queue 48 49 real_all_tasks_done = getattr( 50 queue, "all_tasks_done", None 51 ) # type: Optional[Any] 52 if real_all_tasks_done is not None: 53 real_all_tasks_done.acquire() 54 all_tasks_done = real_all_tasks_done # type: Optional[Any] 55 elif queue.__module__.startswith("eventlet."): 56 all_tasks_done = getattr(queue, "_cond", None) 57 else: 58 all_tasks_done = None 59 60 try: 61 while queue.unfinished_tasks: 62 delay = deadline - time() 63 if delay <= 0: 64 return False 65 if all_tasks_done is not None: 66 all_tasks_done.wait(timeout=delay) 67 else: 68 # worst case, we just poll the number of remaining tasks 69 sleep(0.1) 70 71 return True 72 finally: 73 if real_all_tasks_done is not None: 74 real_all_tasks_done.release() 75 76 def start(self): 77 # type: () -> None 78 with self._lock: 79 if not self.is_alive: 80 self._thread = Thread( 81 target=self._target, name="raven-sentry.BackgroundWorker" 82 ) 83 self._thread.setDaemon(True) 84 self._thread.start() 85 self._thread_for_pid = os.getpid() 86 87 def kill(self): 88 # type: () -> None 89 logger.debug("background worker got kill request") 90 with self._lock: 91 if self._thread: 92 self._queue.put_nowait(_TERMINATOR) 93 self._thread = None 94 self._thread_for_pid = None 95 96 def flush(self, timeout, callback=None): 97 # type: (float, Optional[Any]) -> None 98 logger.debug("background worker got flush request") 99 with self._lock: 100 if self.is_alive and timeout > 0.0: 101 self._wait_flush(timeout, callback) 102 logger.debug("background worker flushed") 103 104 def _wait_flush(self, timeout, callback): 105 # type: (float, Optional[Any]) -> None 106 initial_timeout = min(0.1, timeout) 107 if not self._timed_queue_join(initial_timeout): 108 pending = self._queue.qsize() 109 logger.debug("%d event(s) pending on flush", pending) 110 if callback is not None: 111 callback(pending, timeout) 112 self._timed_queue_join(timeout - initial_timeout) 113 114 def submit(self, callback): 115 # type: (Callable[[], None]) -> None 116 self._ensure_thread() 117 self._queue.put_nowait(callback) 118 119 def _target(self): 120 # type: () -> None 121 while True: 122 callback = self._queue.get() 123 try: 124 if callback is _TERMINATOR: 125 break 126 try: 127 callback() 128 except Exception: 129 logger.error("Failed processing job", exc_info=True) 130 finally: 131 self._queue.task_done() 132 sleep(0) 133 [end of sentry_sdk/worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py --- a/sentry_sdk/worker.py +++ b/sentry_sdk/worker.py @@ -22,7 +22,7 @@ def __init__(self): # type: () -> None check_thread_support() - self._queue = queue.Queue(-1) # type: Queue[Any] + self._queue = queue.Queue(30) # type: Queue[Any] self._lock = Lock() self._thread = None # type: Optional[Thread] self._thread_for_pid = None # type: Optional[int] @@ -86,10 +86,18 @@ def kill(self): # type: () -> None + """ + Kill worker thread. Returns immediately. Not useful for + waiting on shutdown for events, use `flush` for that. + """ logger.debug("background worker got kill request") with self._lock: if self._thread: - self._queue.put_nowait(_TERMINATOR) + try: + self._queue.put_nowait(_TERMINATOR) + except queue.Full: + logger.debug("background worker queue full, kill failed") + self._thread = None self._thread_for_pid = None @@ -114,7 +122,10 @@ def submit(self, callback): # type: (Callable[[], None]) -> None self._ensure_thread() - self._queue.put_nowait(callback) + try: + self._queue.put_nowait(callback) + except queue.Full: + logger.debug("background worker queue full, dropping event") def _target(self): # type: () -> None
{"golden_diff": "diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py\n--- a/sentry_sdk/worker.py\n+++ b/sentry_sdk/worker.py\n@@ -22,7 +22,7 @@\n def __init__(self):\n # type: () -> None\n check_thread_support()\n- self._queue = queue.Queue(-1) # type: Queue[Any]\n+ self._queue = queue.Queue(30) # type: Queue[Any]\n self._lock = Lock()\n self._thread = None # type: Optional[Thread]\n self._thread_for_pid = None # type: Optional[int]\n@@ -86,10 +86,18 @@\n \n def kill(self):\n # type: () -> None\n+ \"\"\"\n+ Kill worker thread. Returns immediately. Not useful for\n+ waiting on shutdown for events, use `flush` for that.\n+ \"\"\"\n logger.debug(\"background worker got kill request\")\n with self._lock:\n if self._thread:\n- self._queue.put_nowait(_TERMINATOR)\n+ try:\n+ self._queue.put_nowait(_TERMINATOR)\n+ except queue.Full:\n+ logger.debug(\"background worker queue full, kill failed\")\n+\n self._thread = None\n self._thread_for_pid = None\n \n@@ -114,7 +122,10 @@\n def submit(self, callback):\n # type: (Callable[[], None]) -> None\n self._ensure_thread()\n- self._queue.put_nowait(callback)\n+ try:\n+ self._queue.put_nowait(callback)\n+ except queue.Full:\n+ logger.debug(\"background worker queue full, dropping event\")\n \n def _target(self):\n # type: () -> None\n", "issue": "Memory leak introduced in 0.12.3\nDear maintainers,\r\n\r\nI recently noticed that my application containers started going out of memory fairly quickly. Upon investigation, I found that recent upgrade of sentry-sdk was causing this.\r\n\r\nTested with a few versions of sentry-sdk using Python 3.7.6:\r\nsentry-sdk==0.12.2 -> memory usage stays at ~70 MiB, does not raise\r\n\r\nsentry-sdk==0.12.3\r\nsentry-sdk==0.13.5\r\nsentry-sdk @ master\r\n-> memory usage starts from ~70MiB and raises steadily until reaching memory limit and crashes container.\r\n\r\nPlease see if you can reproduce this behaviour. If not, I'll try to put up a minimal reproducible example, but this should be fairly simple to see. My application does not do anything special really, just receives some millions of messages over MQ and out of those, some few thousands end up getting logged to Sentry due to exceptions occurring. This is using pretty much the default Sentry config, no integrations.\r\n\r\nThanks for your help!\n", "before_files": [{"content": "import os\n\nfrom threading import Thread, Lock\nfrom time import sleep, time\nfrom sentry_sdk._compat import queue, check_thread_support\nfrom sentry_sdk.utils import logger\n\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from queue import Queue\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n\n_TERMINATOR = object()\n\n\nclass BackgroundWorker(object):\n def __init__(self):\n # type: () -> None\n check_thread_support()\n self._queue = queue.Queue(-1) # type: Queue[Any]\n self._lock = Lock()\n self._thread = None # type: Optional[Thread]\n self._thread_for_pid = None # type: Optional[int]\n\n @property\n def is_alive(self):\n # type: () -> bool\n if self._thread_for_pid != os.getpid():\n return False\n if not self._thread:\n return False\n return self._thread.is_alive()\n\n def _ensure_thread(self):\n # type: () -> None\n if not self.is_alive:\n self.start()\n\n def _timed_queue_join(self, timeout):\n # type: (float) -> bool\n deadline = time() + timeout\n queue = self._queue\n\n real_all_tasks_done = getattr(\n queue, \"all_tasks_done\", None\n ) # type: Optional[Any]\n if real_all_tasks_done is not None:\n real_all_tasks_done.acquire()\n all_tasks_done = real_all_tasks_done # type: Optional[Any]\n elif queue.__module__.startswith(\"eventlet.\"):\n all_tasks_done = getattr(queue, \"_cond\", None)\n else:\n all_tasks_done = None\n\n try:\n while queue.unfinished_tasks:\n delay = deadline - time()\n if delay <= 0:\n return False\n if all_tasks_done is not None:\n all_tasks_done.wait(timeout=delay)\n else:\n # worst case, we just poll the number of remaining tasks\n sleep(0.1)\n\n return True\n finally:\n if real_all_tasks_done is not None:\n real_all_tasks_done.release()\n\n def start(self):\n # type: () -> None\n with self._lock:\n if not self.is_alive:\n self._thread = Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n self._thread.setDaemon(True)\n self._thread.start()\n self._thread_for_pid = os.getpid()\n\n def kill(self):\n # type: () -> None\n logger.debug(\"background worker got kill request\")\n with self._lock:\n if self._thread:\n self._queue.put_nowait(_TERMINATOR)\n self._thread = None\n self._thread_for_pid = None\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"background worker got flush request\")\n with self._lock:\n if self.is_alive and timeout > 0.0:\n self._wait_flush(timeout, callback)\n logger.debug(\"background worker flushed\")\n\n def _wait_flush(self, timeout, callback):\n # type: (float, Optional[Any]) -> None\n initial_timeout = min(0.1, timeout)\n if not self._timed_queue_join(initial_timeout):\n pending = self._queue.qsize()\n logger.debug(\"%d event(s) pending on flush\", pending)\n if callback is not None:\n callback(pending, timeout)\n self._timed_queue_join(timeout - initial_timeout)\n\n def submit(self, callback):\n # type: (Callable[[], None]) -> None\n self._ensure_thread()\n self._queue.put_nowait(callback)\n\n def _target(self):\n # type: () -> None\n while True:\n callback = self._queue.get()\n try:\n if callback is _TERMINATOR:\n break\n try:\n callback()\n except Exception:\n logger.error(\"Failed processing job\", exc_info=True)\n finally:\n self._queue.task_done()\n sleep(0)\n", "path": "sentry_sdk/worker.py"}]}
1,961
391
gh_patches_debug_7727
rasdani/github-patches
git_diff
bridgecrewio__checkov-5696
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Terraform check "CKV_AZURE_158" - Typo within the name **Describe the issue** It seems that there is a typo within the name of the check CKV_AZURE_158 for Terraform resources. **Examples** ````python from checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck from typing import List, Any class DatabricksWorkspaceIsNotPublic(BaseResourceNegativeValueCheck): def __init__(self): name = "Ensure that databricks workspace has not public" id = "CKV_AZURE_158" supported_resources = ['azurerm_databricks_workspace'] categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, missing_attribute_result=CheckResult.FAILED) def get_inspected_key(self) -> str: return 'public_network_access_enabled' def get_forbidden_values(self) -> List[Any]: return [True] check = DatabricksWorkspaceIsNotPublic() ```` **Version (please complete the following information):** Current **Additional context** N/A </issue> <code> [start of checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py] 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck 3 from typing import List, Any 4 5 6 class DatabricksWorkspaceIsNotPublic(BaseResourceNegativeValueCheck): 7 def __init__(self): 8 name = "Ensure that databricks workspace has not public" 9 id = "CKV_AZURE_158" 10 supported_resources = ['azurerm_databricks_workspace'] 11 categories = [CheckCategories.NETWORKING] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources, 13 missing_attribute_result=CheckResult.FAILED) 14 15 def get_inspected_key(self) -> str: 16 return 'public_network_access_enabled' 17 18 def get_forbidden_values(self) -> List[Any]: 19 return [True] 20 21 22 check = DatabricksWorkspaceIsNotPublic() 23 [end of checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py b/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py --- a/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py +++ b/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py @@ -5,7 +5,7 @@ class DatabricksWorkspaceIsNotPublic(BaseResourceNegativeValueCheck): def __init__(self): - name = "Ensure that databricks workspace has not public" + name = "Ensure that databricks workspace is not public" id = "CKV_AZURE_158" supported_resources = ['azurerm_databricks_workspace'] categories = [CheckCategories.NETWORKING]
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py b/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py\n--- a/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py\n+++ b/checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py\n@@ -5,7 +5,7 @@\n \n class DatabricksWorkspaceIsNotPublic(BaseResourceNegativeValueCheck):\n def __init__(self):\n- name = \"Ensure that databricks workspace has not public\"\n+ name = \"Ensure that databricks workspace is not public\"\n id = \"CKV_AZURE_158\"\n supported_resources = ['azurerm_databricks_workspace']\n categories = [CheckCategories.NETWORKING]\n", "issue": "Terraform check \"CKV_AZURE_158\" - Typo within the name\n**Describe the issue**\r\nIt seems that there is a typo within the name of the check CKV_AZURE_158 for Terraform resources.\r\n\r\n**Examples**\r\n````python\r\nfrom checkov.common.models.enums import CheckCategories, CheckResult\r\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\r\nfrom typing import List, Any\r\n\r\n\r\nclass DatabricksWorkspaceIsNotPublic(BaseResourceNegativeValueCheck):\r\n def __init__(self):\r\n name = \"Ensure that databricks workspace has not public\"\r\n id = \"CKV_AZURE_158\"\r\n supported_resources = ['azurerm_databricks_workspace']\r\n categories = [CheckCategories.NETWORKING]\r\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,\r\n missing_attribute_result=CheckResult.FAILED)\r\n\r\n def get_inspected_key(self) -> str:\r\n return 'public_network_access_enabled'\r\n\r\n def get_forbidden_values(self) -> List[Any]:\r\n return [True]\r\n\r\n\r\ncheck = DatabricksWorkspaceIsNotPublic()\r\n````\r\n\r\n\r\n**Version (please complete the following information):**\r\n Current\r\n \r\n \r\n**Additional context**\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\nfrom typing import List, Any\n\n\nclass DatabricksWorkspaceIsNotPublic(BaseResourceNegativeValueCheck):\n def __init__(self):\n name = \"Ensure that databricks workspace has not public\"\n id = \"CKV_AZURE_158\"\n supported_resources = ['azurerm_databricks_workspace']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,\n missing_attribute_result=CheckResult.FAILED)\n\n def get_inspected_key(self) -> str:\n return 'public_network_access_enabled'\n\n def get_forbidden_values(self) -> List[Any]:\n return [True]\n\n\ncheck = DatabricksWorkspaceIsNotPublic()\n", "path": "checkov/terraform/checks/resource/azure/DatabricksWorkspaceIsNotPublic.py"}]}
1,066
178
gh_patches_debug_63151
rasdani/github-patches
git_diff
frappe__frappe-7705
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Hard coded localhost for pgsql setup The postgresql setup is using `localhost` instead of the `db_host` from the site configuration: https://github.com/frappe/frappe/blob/17c43b4e1d2506e5b1498e52f3aa6c81277d67be/frappe/database/postgres/setup_db.py#L20 This makes it impossible to use Postgres on a remote host :/ I can try to provide a PR for that. I do not guaranty I will have the time to test it but it's fairly simple. </issue> <code> [start of frappe/database/postgres/setup_db.py] 1 import frappe, subprocess, os 2 from six.moves import input 3 4 def setup_database(force, source_sql, verbose): 5 root_conn = get_root_connection() 6 root_conn.commit() 7 root_conn.sql("DROP DATABASE IF EXISTS `{0}`".format(frappe.conf.db_name)) 8 root_conn.sql("DROP USER IF EXISTS {0}".format(frappe.conf.db_name)) 9 root_conn.sql("CREATE DATABASE `{0}`".format(frappe.conf.db_name)) 10 root_conn.sql("CREATE user {0} password '{1}'".format(frappe.conf.db_name, 11 frappe.conf.db_password)) 12 root_conn.sql("GRANT ALL PRIVILEGES ON DATABASE `{0}` TO {0}".format(frappe.conf.db_name)) 13 14 # we can't pass psql password in arguments in postgresql as mysql. So 15 # set password connection parameter in environment variable 16 subprocess_env = os.environ.copy() 17 subprocess_env['PGPASSWORD'] = str(frappe.conf.db_password) 18 # bootstrap db 19 subprocess.check_output([ 20 'psql', frappe.conf.db_name, '-h', 'localhost', '-U', 21 frappe.conf.db_name, '-f', 22 os.path.join(os.path.dirname(__file__), 'framework_postgres.sql') 23 ], env=subprocess_env) 24 25 frappe.connect() 26 27 def setup_help_database(help_db_name): 28 root_conn = get_root_connection() 29 root_conn.sql("DROP DATABASE IF EXISTS `{0}`".format(help_db_name)) 30 root_conn.sql("DROP USER IF EXISTS {0}".format(help_db_name)) 31 root_conn.sql("CREATE DATABASE `{0}`".format(help_db_name)) 32 root_conn.sql("CREATE user {0} password '{1}'".format(help_db_name, help_db_name)) 33 root_conn.sql("GRANT ALL PRIVILEGES ON DATABASE `{0}` TO {0}".format(help_db_name)) 34 35 def get_root_connection(root_login=None, root_password=None): 36 import getpass 37 if not frappe.local.flags.root_connection: 38 if not root_login: 39 root_login = frappe.conf.get("root_login") or None 40 41 if not root_login: 42 root_login = input("Enter postgres super user: ") 43 44 if not root_password: 45 root_password = frappe.conf.get("root_password") or None 46 47 if not root_password: 48 root_password = getpass.getpass("Postgres super user password: ") 49 50 frappe.local.flags.root_connection = frappe.database.get_db(user=root_login, password=root_password) 51 52 return frappe.local.flags.root_connection 53 [end of frappe/database/postgres/setup_db.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/frappe/database/postgres/setup_db.py b/frappe/database/postgres/setup_db.py --- a/frappe/database/postgres/setup_db.py +++ b/frappe/database/postgres/setup_db.py @@ -17,7 +17,7 @@ subprocess_env['PGPASSWORD'] = str(frappe.conf.db_password) # bootstrap db subprocess.check_output([ - 'psql', frappe.conf.db_name, '-h', 'localhost', '-U', + 'psql', frappe.conf.db_name, '-h', frappe.conf.db_host, '-U', frappe.conf.db_name, '-f', os.path.join(os.path.dirname(__file__), 'framework_postgres.sql') ], env=subprocess_env)
{"golden_diff": "diff --git a/frappe/database/postgres/setup_db.py b/frappe/database/postgres/setup_db.py\n--- a/frappe/database/postgres/setup_db.py\n+++ b/frappe/database/postgres/setup_db.py\n@@ -17,7 +17,7 @@\n \tsubprocess_env['PGPASSWORD'] = str(frappe.conf.db_password)\n \t# bootstrap db\n \tsubprocess.check_output([\n-\t\t'psql', frappe.conf.db_name, '-h', 'localhost', '-U',\n+\t\t'psql', frappe.conf.db_name, '-h', frappe.conf.db_host, '-U',\n \t\tfrappe.conf.db_name, '-f',\n \t\tos.path.join(os.path.dirname(__file__), 'framework_postgres.sql')\n \t], env=subprocess_env)\n", "issue": "Hard coded localhost for pgsql setup\nThe postgresql setup is using `localhost` instead of the `db_host` from the site configuration:\r\nhttps://github.com/frappe/frappe/blob/17c43b4e1d2506e5b1498e52f3aa6c81277d67be/frappe/database/postgres/setup_db.py#L20\r\n\r\nThis makes it impossible to use Postgres on a remote host :/\r\n\r\nI can try to provide a PR for that.\r\nI do not guaranty I will have the time to test it but it's fairly simple.\n", "before_files": [{"content": "import frappe, subprocess, os\nfrom six.moves import input\n\ndef setup_database(force, source_sql, verbose):\n\troot_conn = get_root_connection()\n\troot_conn.commit()\n\troot_conn.sql(\"DROP DATABASE IF EXISTS `{0}`\".format(frappe.conf.db_name))\n\troot_conn.sql(\"DROP USER IF EXISTS {0}\".format(frappe.conf.db_name))\n\troot_conn.sql(\"CREATE DATABASE `{0}`\".format(frappe.conf.db_name))\n\troot_conn.sql(\"CREATE user {0} password '{1}'\".format(frappe.conf.db_name,\n\t\tfrappe.conf.db_password))\n\troot_conn.sql(\"GRANT ALL PRIVILEGES ON DATABASE `{0}` TO {0}\".format(frappe.conf.db_name))\n\n\t# we can't pass psql password in arguments in postgresql as mysql. So\n\t# set password connection parameter in environment variable\n\tsubprocess_env = os.environ.copy()\n\tsubprocess_env['PGPASSWORD'] = str(frappe.conf.db_password)\n\t# bootstrap db\n\tsubprocess.check_output([\n\t\t'psql', frappe.conf.db_name, '-h', 'localhost', '-U',\n\t\tfrappe.conf.db_name, '-f',\n\t\tos.path.join(os.path.dirname(__file__), 'framework_postgres.sql')\n\t], env=subprocess_env)\n\n\tfrappe.connect()\n\ndef setup_help_database(help_db_name):\n\troot_conn = get_root_connection()\n\troot_conn.sql(\"DROP DATABASE IF EXISTS `{0}`\".format(help_db_name))\n\troot_conn.sql(\"DROP USER IF EXISTS {0}\".format(help_db_name))\n\troot_conn.sql(\"CREATE DATABASE `{0}`\".format(help_db_name))\n\troot_conn.sql(\"CREATE user {0} password '{1}'\".format(help_db_name, help_db_name))\n\troot_conn.sql(\"GRANT ALL PRIVILEGES ON DATABASE `{0}` TO {0}\".format(help_db_name))\n\ndef get_root_connection(root_login=None, root_password=None):\n\timport getpass\n\tif not frappe.local.flags.root_connection:\n\t\tif not root_login:\n\t\t\troot_login = frappe.conf.get(\"root_login\") or None\n\n\t\tif not root_login:\n\t\t\troot_login = input(\"Enter postgres super user: \")\n\n\t\tif not root_password:\n\t\t\troot_password = frappe.conf.get(\"root_password\") or None\n\n\t\tif not root_password:\n\t\t\troot_password = getpass.getpass(\"Postgres super user password: \")\n\n\t\tfrappe.local.flags.root_connection = frappe.database.get_db(user=root_login, password=root_password)\n\n\treturn frappe.local.flags.root_connection\n", "path": "frappe/database/postgres/setup_db.py"}]}
1,325
167
gh_patches_debug_1782
rasdani/github-patches
git_diff
python__python-docs-es-1714
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Translate 'library/http.server.po' This needs to reach 100% translated. The rendered version of this file will be available at https://docs.python.org/es/3.10/library/http.server.html once translated. Meanwhile, the English version is shown. Current stats for `library/http.server.po`: * Fuzzy: 4 * Percent translated: 97.8% * Entries: 87 / 89 * Untranslated: 2 Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it. Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html). </issue> <code> [start of scripts/translate.py] 1 import os 2 import re 3 import sys 4 from typing import Dict, Tuple 5 6 import polib 7 8 VERBOSE = False 9 DEBUG = False 10 SKIP_TRANSLATED_ENTRIES = True 11 12 try: 13 from deep_translator import GoogleTranslator 14 except ImportError: 15 print("Error: This util script needs `deep_translator` to be installed") 16 sys.exit(1) 17 18 _patterns = [ 19 ":c:func:`[^`]+`", 20 ":c:type:`[^`]+`", 21 ":c:macro:`[^`]+`", 22 ":c:member:`[^`]+`", 23 ":c:data:`[^`]+`", 24 ":py:data:`[^`]+`", 25 ":py:mod:`[^`]+`", 26 ":func:`[^`]+`", 27 ":mod:`[^`]+`", 28 ":ref:`[^`]+`", 29 ":class:`[^`]+`", 30 ":pep:`[^`]+`", 31 ":data:`[^`]+`", 32 ":exc:`[^`]+`", 33 ":term:`[^`]+`", 34 ":meth:`[^`]+`", 35 ":envvar:`[^`]+`", 36 ":file:`[^`]+`", 37 ":attr:`[^`]+`", 38 ":const:`[^`]+`", 39 ":issue:`[^`]+`", 40 ":opcode:`[^`]+`", 41 ":option:`[^`]+`", 42 ":program:`[^`]+`", 43 ":keyword:`[^`]+`", 44 ":RFC:`[^`]+`", 45 ":doc:`[^`]+`", 46 "``[^`]+``", 47 "`[^`]+`__", 48 "`[^`]+`_", 49 "\*\*.+\*\*", # bold text between ** 50 "\*.+\*", # italic text between * 51 ] 52 53 _exps = [re.compile(e) for e in _patterns] 54 55 def protect_sphinx_directives(s: str) -> Tuple[dict, str]: 56 """ 57 Parameters: 58 string containing the text to translate 59 60 Returns: 61 dictionary containing all the placeholder text as keys 62 and the correct value. 63 """ 64 65 i = 0 66 d: Dict[str, str] = {} 67 for exp in _exps: 68 matches = exp.findall(s) 69 if DEBUG: 70 print(exp, matches) 71 for match in matches: 72 ph = f"XASDF{str(i).zfill(2)}" 73 s = s.replace(match, ph) 74 if ph in d and VERBOSE: 75 print(f"Error: {ph} is already in the dictionary") 76 print("new", match) 77 print("old", d[ph]) 78 d[ph] = match 79 i += 1 80 return d, s 81 82 83 def undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str: 84 for ph, value in placeholders.items(): 85 translated_text = translated_text.replace(ph, value) 86 if DEBUG: 87 print(ph, value) 88 print(translated_text) 89 return translated_text 90 91 92 if __name__ == "__main__": 93 filename = sys.argv[1] 94 if not os.path.isfile(filename): 95 print(f"File not found: '{filename}'") 96 sys.exit(-1) 97 98 po = polib.pofile(filename) 99 translator = GoogleTranslator(source="en", target="es") 100 101 for entry in po: 102 # If the entry has already a translation, skip. 103 if SKIP_TRANSLATED_ENTRIES and entry.msgstr: 104 continue 105 106 print("\nEN|", entry.msgid) 107 placeholders, temp_text = protect_sphinx_directives(entry.msgid) 108 if VERBOSE: 109 print(temp_text) 110 print(placeholders) 111 112 # Translate the temporary text without sphinx statements 113 translated_text = translator.translate(temp_text) 114 115 # Recover sphinx statements 116 real_text = undo_sphinx_directives_protection(placeholders, translated_text) 117 print("ES|", real_text) 118 119 # Replace the po file translated entry 120 entry.msgstr = real_text 121 122 # Save the file after all the entries are translated 123 po.save() 124 [end of scripts/translate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/translate.py b/scripts/translate.py --- a/scripts/translate.py +++ b/scripts/translate.py @@ -46,8 +46,8 @@ "``[^`]+``", "`[^`]+`__", "`[^`]+`_", - "\*\*.+\*\*", # bold text between ** - "\*.+\*", # italic text between * + "\*\*[^\*]+\*\*", # bold text between ** + "\*[^\*]+\*", # italic text between * ] _exps = [re.compile(e) for e in _patterns]
{"golden_diff": "diff --git a/scripts/translate.py b/scripts/translate.py\n--- a/scripts/translate.py\n+++ b/scripts/translate.py\n@@ -46,8 +46,8 @@\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n- \"\\*\\*.+\\*\\*\", # bold text between **\n- \"\\*.+\\*\", # italic text between *\n+ \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n+ \"\\*[^\\*]+\\*\", # italic text between *\n ]\n \n _exps = [re.compile(e) for e in _patterns]\n", "issue": "Translate 'library/http.server.po'\nThis needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.10/library/http.server.html once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `library/http.server.po`:\n\n* Fuzzy: 4\n* Percent translated: 97.8%\n* Entries: 87 / 89\n* Untranslated: 2\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).\n", "before_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":doc:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*.+\\*\\*\", # bold text between **\n \"\\*.+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}]}
1,845
138
gh_patches_debug_8128
rasdani/github-patches
git_diff
ethereum__web3.py-1930
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Changed pypiwin32 to pywin32 ### What was wrong? pypiwin32 is just an empty package requiring pywin32, so this can just be required directly. As far as I know, pypiwin32 is only there for historical reasons. Related to Issue #1721 ### How was it fixed? change it in setup.py ### Todo: - [ ] Add entry to the [release notes](https://github.com/ethereum/web3.py/blob/master/newsfragments/README.md) #### Cute Animal Picture ![Put a link to a cute animal picture inside the parenthesis-->](https://i.pinimg.com/originals/0d/3f/50/0d3f507974868a3be56d25424a7bf0a9.jpg) </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from setuptools import ( 4 find_packages, 5 setup, 6 ) 7 8 extras_require = { 9 'tester': [ 10 "eth-tester[py-evm]==v0.5.0-beta.3", 11 "py-geth>=2.4.0,<3", 12 ], 13 'linter': [ 14 "flake8==3.8.3", 15 "isort>=4.2.15,<4.3.5", 16 "mypy==0.730", 17 ], 18 'docs': [ 19 "mock", 20 "sphinx-better-theme>=0.1.4", 21 "click>=5.1", 22 "configparser==3.5.0", 23 "contextlib2>=0.5.4", 24 "py-geth>=2.4.0,<3", 25 "py-solc>=0.4.0", 26 "pytest>=4.4.0,<5.0.0", 27 "sphinx>=2.4.4,<3", 28 "sphinx_rtd_theme>=0.1.9", 29 "toposort>=1.4", 30 "towncrier>=19.2.0,<20", 31 "urllib3", 32 "web3>=2.1.0", 33 "wheel" 34 ], 35 'dev': [ 36 "bumpversion", 37 "flaky>=3.3.0", 38 "hypothesis>=3.31.2,<6", 39 "pytest>=4.4.0,<5.0.0", 40 "pytest-asyncio>=0.10.0,<0.11", 41 "pytest-mock>=1.10,<2", 42 "pytest-pythonpath>=0.3", 43 "pytest-watch>=4.2,<5", 44 "pytest-xdist>=1.29,<2", 45 "setuptools>=38.6.0", 46 "tox>=1.8.0", 47 "tqdm>4.32,<5", 48 "twine>=1.13,<2", 49 "when-changed>=0.3.0,<0.4" 50 ] 51 } 52 53 extras_require['dev'] = ( 54 extras_require['tester'] 55 + extras_require['linter'] 56 + extras_require['docs'] 57 + extras_require['dev'] 58 ) 59 60 with open('./README.md') as readme: 61 long_description = readme.read() 62 63 setup( 64 name='web3', 65 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility. 66 version='5.17.0', 67 description="""Web3.py""", 68 long_description_content_type='text/markdown', 69 long_description=long_description, 70 author='Piper Merriam', 71 author_email='[email protected]', 72 url='https://github.com/ethereum/web3.py', 73 include_package_data=True, 74 install_requires=[ 75 "eth-abi>=2.0.0b6,<3.0.0", 76 "eth-account>=0.5.3,<0.6.0", 77 "eth-hash[pycryptodome]>=0.2.0,<1.0.0", 78 "eth-typing>=2.0.0,<3.0.0", 79 "eth-utils>=1.9.5,<2.0.0", 80 "hexbytes>=0.1.0,<1.0.0", 81 "ipfshttpclient==0.7.0a1", 82 "jsonschema>=3.2.0,<4.0.0", 83 "lru-dict>=1.1.6,<2.0.0", 84 "protobuf>=3.10.0,<4", 85 "pypiwin32>=223;platform_system=='Windows'", 86 "requests>=2.16.0,<3.0.0", 87 # remove typing_extensions after python_requires>=3.8, see web3._utils.compat 88 "typing-extensions>=3.7.4.1,<4;python_version<'3.8'", 89 "websockets>=8.1.0,<9.0.0", 90 ], 91 python_requires='>=3.6,<4', 92 extras_require=extras_require, 93 py_modules=['web3', 'ens', 'ethpm'], 94 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]}, 95 license="MIT", 96 zip_safe=False, 97 keywords='ethereum', 98 packages=find_packages(exclude=["tests", "tests.*"]), 99 package_data={"web3": ["py.typed"]}, 100 classifiers=[ 101 'Development Status :: 5 - Production/Stable', 102 'Intended Audience :: Developers', 103 'License :: OSI Approved :: MIT License', 104 'Natural Language :: English', 105 'Programming Language :: Python :: 3', 106 'Programming Language :: Python :: 3.6', 107 'Programming Language :: Python :: 3.7', 108 'Programming Language :: Python :: 3.8', 109 ], 110 ) 111 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -82,7 +82,7 @@ "jsonschema>=3.2.0,<4.0.0", "lru-dict>=1.1.6,<2.0.0", "protobuf>=3.10.0,<4", - "pypiwin32>=223;platform_system=='Windows'", + "pywin32>=223;platform_system=='Windows'", "requests>=2.16.0,<3.0.0", # remove typing_extensions after python_requires>=3.8, see web3._utils.compat "typing-extensions>=3.7.4.1,<4;python_version<'3.8'",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -82,7 +82,7 @@\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n- \"pypiwin32>=223;platform_system=='Windows'\",\n+ \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<4;python_version<'3.8'\",\n", "issue": "Changed pypiwin32 to pywin32\n### What was wrong?\r\npypiwin32 is just an empty package requiring pywin32, so this can just be required directly. As far as I know, pypiwin32 is only there for historical reasons.\r\n\r\nRelated to Issue #1721\r\n\r\n### How was it fixed?\r\nchange it in setup.py\r\n\r\n### Todo:\r\n- [ ] Add entry to the [release notes](https://github.com/ethereum/web3.py/blob/master/newsfragments/README.md)\r\n\r\n#### Cute Animal Picture\r\n\r\n![Put a link to a cute animal picture inside the parenthesis-->](https://i.pinimg.com/originals/0d/3f/50/0d3f507974868a3be56d25424a7bf0a9.jpg)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.5.0-beta.3\",\n \"py-geth>=2.4.0,<3\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.730\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=2.4.0,<3\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=2.4.4,<3\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier>=19.2.0,<20\",\n \"urllib3\",\n \"web3>=2.1.0\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.3.0\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.17.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.3,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.7.0a1\",\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<4;python_version<'3.8'\",\n \"websockets>=8.1.0,<9.0.0\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py"}]}
2,038
177
gh_patches_debug_9273
rasdani/github-patches
git_diff
google__fuzzbench-309
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> master cannot pass "make presubmit" Current master branch has one failed at "``make presubmit``". To reproduce: ``git clone https://github.com/google/fuzzbench ; cd fuzzbench ; git submodule update --init ; make install-dependencies ; source ./vene/bin/activate ; make presubmit`` https://github.com/google/fuzzbench/blob/41aa9b4267907185bd51514a6fb335ff70f4990d/experiment/test_measurer.py#L347 E assert 0 == 3798 Reference: https://google.github.io/fuzzbench/getting-started/prerequisites/ </issue> <code> [start of conftest.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """A pytest conftest.py file that defines fixtures and does other things many 15 tests might need (such as using an in-memory SQLite database).""" 16 17 import os 18 import sqlite3 19 from unittest import mock 20 21 import pytest 22 import sqlalchemy 23 24 from common import new_process 25 26 # Never wait for a timeout so that tests don't take any longer than they need 27 # to. 28 new_process.WAIT_SECONDS = 0 29 30 # Set this to an in-memory instance of SQLite so that db_utils can be imported 31 # without running a real Postgres database. 32 # pylint: disable=wrong-import-position 33 os.environ['SQL_DATABASE_URL'] = 'sqlite://' 34 35 from database import utils as db_utils 36 from database import models 37 38 39 # Give this a short name since it is a fixture. 40 @pytest.yield_fixture 41 def db(): # pylint: disable=invalid-name 42 """Connect to the SQLite database and create all the expected tables.""" 43 db_utils.initialize() 44 models.Base.metadata.create_all(db_utils.engine) 45 with mock.patch('database.utils.cleanup'): 46 yield 47 db_utils.cleanup() 48 49 50 @sqlalchemy.event.listens_for(sqlalchemy.engine.Engine, 'connect') 51 def set_sqlite_pragma(connection, _): 52 """Force SQLite to enforce non-null foreign key relationships. 53 Based on 54 https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support 55 """ 56 if not isinstance(connection, sqlite3.Connection): 57 return 58 59 cursor = connection.cursor() 60 cursor.execute('PRAGMA foreign_keys=ON') 61 cursor.close() 62 63 64 @pytest.yield_fixture 65 def environ(): 66 """Patch environment.""" 67 # TODO(metzman): Make sure this is used by all tests that modify the 68 # environment. 69 patcher = mock.patch.dict(os.environ, {}) 70 patcher.start() 71 yield 72 patcher.stop() 73 74 75 @pytest.fixture 76 def experiment(tmp_path, environ): # pylint: disable=redefined-outer-name,unused-argument 77 """Mock an experiment.""" 78 os.environ['WORK'] = str(tmp_path) 79 os.environ['EXPERIMENT'] = 'test-experiment' 80 os.environ['CLOUD_EXPERIMENT_BUCKET'] = 'gs://experiment-data' 81 os.environ['CLOUD_WEB_BUCKET'] = 'gs://web-bucket' 82 os.environ['CLOUD_PROJECT'] = 'fuzzbench' 83 [end of conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conftest.py b/conftest.py --- a/conftest.py +++ b/conftest.py @@ -73,9 +73,9 @@ @pytest.fixture -def experiment(tmp_path, environ): # pylint: disable=redefined-outer-name,unused-argument +def experiment(environ): # pylint: disable=redefined-outer-name,unused-argument """Mock an experiment.""" - os.environ['WORK'] = str(tmp_path) + os.environ['WORK'] = '/work' os.environ['EXPERIMENT'] = 'test-experiment' os.environ['CLOUD_EXPERIMENT_BUCKET'] = 'gs://experiment-data' os.environ['CLOUD_WEB_BUCKET'] = 'gs://web-bucket'
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -73,9 +73,9 @@\n \n \n @pytest.fixture\n-def experiment(tmp_path, environ): # pylint: disable=redefined-outer-name,unused-argument\n+def experiment(environ): # pylint: disable=redefined-outer-name,unused-argument\n \"\"\"Mock an experiment.\"\"\"\n- os.environ['WORK'] = str(tmp_path)\n+ os.environ['WORK'] = '/work'\n os.environ['EXPERIMENT'] = 'test-experiment'\n os.environ['CLOUD_EXPERIMENT_BUCKET'] = 'gs://experiment-data'\n os.environ['CLOUD_WEB_BUCKET'] = 'gs://web-bucket'\n", "issue": "master cannot pass \"make presubmit\"\nCurrent master branch has one failed at \"``make presubmit``\".\r\n\r\nTo reproduce: ``git clone https://github.com/google/fuzzbench ; cd fuzzbench ; git submodule update --init ; make install-dependencies ; source ./vene/bin/activate ; make presubmit``\r\n\r\nhttps://github.com/google/fuzzbench/blob/41aa9b4267907185bd51514a6fb335ff70f4990d/experiment/test_measurer.py#L347\r\n\r\nE assert 0 == 3798\r\n\r\nReference: https://google.github.io/fuzzbench/getting-started/prerequisites/\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A pytest conftest.py file that defines fixtures and does other things many\ntests might need (such as using an in-memory SQLite database).\"\"\"\n\nimport os\nimport sqlite3\nfrom unittest import mock\n\nimport pytest\nimport sqlalchemy\n\nfrom common import new_process\n\n# Never wait for a timeout so that tests don't take any longer than they need\n# to.\nnew_process.WAIT_SECONDS = 0\n\n# Set this to an in-memory instance of SQLite so that db_utils can be imported\n# without running a real Postgres database.\n# pylint: disable=wrong-import-position\nos.environ['SQL_DATABASE_URL'] = 'sqlite://'\n\nfrom database import utils as db_utils\nfrom database import models\n\n\n# Give this a short name since it is a fixture.\[email protected]_fixture\ndef db(): # pylint: disable=invalid-name\n \"\"\"Connect to the SQLite database and create all the expected tables.\"\"\"\n db_utils.initialize()\n models.Base.metadata.create_all(db_utils.engine)\n with mock.patch('database.utils.cleanup'):\n yield\n db_utils.cleanup()\n\n\[email protected]_for(sqlalchemy.engine.Engine, 'connect')\ndef set_sqlite_pragma(connection, _):\n \"\"\"Force SQLite to enforce non-null foreign key relationships.\n Based on\n https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support\n \"\"\"\n if not isinstance(connection, sqlite3.Connection):\n return\n\n cursor = connection.cursor()\n cursor.execute('PRAGMA foreign_keys=ON')\n cursor.close()\n\n\[email protected]_fixture\ndef environ():\n \"\"\"Patch environment.\"\"\"\n # TODO(metzman): Make sure this is used by all tests that modify the\n # environment.\n patcher = mock.patch.dict(os.environ, {})\n patcher.start()\n yield\n patcher.stop()\n\n\[email protected]\ndef experiment(tmp_path, environ): # pylint: disable=redefined-outer-name,unused-argument\n \"\"\"Mock an experiment.\"\"\"\n os.environ['WORK'] = str(tmp_path)\n os.environ['EXPERIMENT'] = 'test-experiment'\n os.environ['CLOUD_EXPERIMENT_BUCKET'] = 'gs://experiment-data'\n os.environ['CLOUD_WEB_BUCKET'] = 'gs://web-bucket'\n os.environ['CLOUD_PROJECT'] = 'fuzzbench'\n", "path": "conftest.py"}]}
1,481
167
gh_patches_debug_9572
rasdani/github-patches
git_diff
ESMCI__cime-1428
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Clean up python in unit_testing Clean up code in unit_testing/python </issue> <code> [start of scripts/lib/CIME/code_checker.py] 1 """ 2 Libraries for checking python code with pylint 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 7 from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable 8 9 from multiprocessing.dummy import Pool as ThreadPool 10 from distutils.spawn import find_executable 11 12 logger = logging.getLogger(__name__) 13 14 ############################################################################### 15 def _run_pylint(on_file, interactive): 16 ############################################################################### 17 pylint = find_executable("pylint") 18 19 cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement" 20 cimeroot = get_cime_root() 21 22 if "scripts/Tools" in on_file: 23 cmd_options +=",relative-import" 24 25 # add init-hook option 26 cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\"))'"%\ 27 (os.path.join(cimeroot,"scripts","lib"), 28 os.path.join(cimeroot,"scripts","Tools"), 29 os.path.join(cimeroot,"tools","unit_testing","python")) 30 31 cmd = "%s %s %s" % (pylint, cmd_options, on_file) 32 logger.debug("pylint command is %s"%cmd) 33 stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) 34 if stat != 0: 35 if interactive: 36 logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd)) 37 logger.info(out + "\n" + err) 38 return (on_file, out + "\n" + err) 39 else: 40 if interactive: 41 logger.info("File %s has no pylint problems" % on_file) 42 return (on_file, "") 43 44 ############################################################################### 45 def _matches(file_path, file_ends): 46 ############################################################################### 47 for file_end in file_ends: 48 if file_path.endswith(file_end): 49 return True 50 51 return False 52 53 ############################################################################### 54 def _should_pylint_skip(filepath): 55 ############################################################################### 56 # TODO - get rid of this 57 list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs") 58 for dir_to_skip in list_of_directories_to_ignore: 59 if dir_to_skip in filepath: 60 return True 61 62 return False 63 64 ############################################################################### 65 def get_all_checkable_files(): 66 ############################################################################### 67 cimeroot = get_cime_root() 68 all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() 69 70 files_to_test = [item for item in all_git_files 71 if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))] 72 return files_to_test 73 74 ############################################################################### 75 def check_code(files, num_procs=10, interactive=False): 76 ############################################################################### 77 """ 78 Check all python files in the given directory 79 80 Returns True if all files had no problems 81 """ 82 # Get list of files to check, we look to see if user-provided file argument 83 # is a valid file, if not, we search the repo for a file with similar name. 84 repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines() 85 files_to_check = [] 86 if files: 87 for filearg in files: 88 if os.path.exists(filearg): 89 files_to_check.append(os.path.abspath(filearg)) 90 else: 91 found = False 92 for repo_file in repo_files: 93 if repo_file.endswith(filearg): 94 found = True 95 files_to_check.append(repo_file) # could have multiple matches 96 97 if not found: 98 logger.warning("Could not find file matching argument '%s'" % filearg) 99 else: 100 # Check every python file 101 files_to_check = get_all_checkable_files() 102 103 expect(len(files_to_check) > 0, "No matching files found") 104 105 # No point in using more threads than files 106 if len(files_to_check) < num_procs: 107 num_procs = len(files_to_check) 108 109 pool = ThreadPool(num_procs) 110 results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) 111 pool.close() 112 pool.join() 113 return dict(results) 114 [end of scripts/lib/CIME/code_checker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py --- a/scripts/lib/CIME/code_checker.py +++ b/scripts/lib/CIME/code_checker.py @@ -26,7 +26,7 @@ cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\"))'"%\ (os.path.join(cimeroot,"scripts","lib"), os.path.join(cimeroot,"scripts","Tools"), - os.path.join(cimeroot,"tools","unit_testing","python")) + os.path.join(cimeroot,"scripts","fortran_unit_testing","python")) cmd = "%s %s %s" % (pylint, cmd_options, on_file) logger.debug("pylint command is %s"%cmd)
{"golden_diff": "diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py\n--- a/scripts/lib/CIME/code_checker.py\n+++ b/scripts/lib/CIME/code_checker.py\n@@ -26,7 +26,7 @@\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n- os.path.join(cimeroot,\"tools\",\"unit_testing\",\"python\"))\n+ os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"))\n \n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n", "issue": "Clean up python in unit_testing\nClean up code in unit_testing/python\n", "before_files": [{"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cimeroot = get_cime_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"tools\",\"unit_testing\",\"python\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip in filepath:\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines()\n files_to_check = []\n if files:\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}]}
1,748
174
gh_patches_debug_11938
rasdani/github-patches
git_diff
inventree__InvenTree-1890
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can't start inventree server It seems that error at `https://api.exchangerate.host/latest` blocks server startup. ``` # curl https://api.exchangerate.host/latest <html> <head><title>521 Origin Down</title></head> <body bgcolor="white"> <center><h1>521 Origin Down</h1></center> <hr><center>cloudflare-nginx</center> </body> </html> ``` <details> ``` # inv server InvenTree Database Configuration ================================ ENGINE: django.db.backends.mysql NAME: inventree HOST: localhost Base currency changed from USD to RUB Updating exchange rates from https://api.exchangerate.host/latest Using base currency 'RUB' InvenTree Database Configuration ================================ ENGINE: django.db.backends.mysql NAME: inventree HOST: localhost Base currency changed from USD to RUB Updating exchange rates from https://api.exchangerate.host/latest Using base currency 'RUB' Watching for file changes with StatReloader Watching for file changes with StatReloader Exception in thread django-main-thread: Traceback (most recent call last): File "/usr/lib64/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/usr/lib64/python3.6/threading.py", line 864, in run self._target(*self._args, **self._kwargs) File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/utils/autoreload.py", line 64, in wrapper fn(*args, **kwargs) File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/core/management/commands/runserver.py", line 110, in inner_run autoreload.raise_last_exception() File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/utils/autoreload.py", line 87, in raise_last_exception raise _exception[1] File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute autoreload.check_errors(django.setup)() File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/utils/autoreload.py", line 64, in wrapper fn(*args, **kwargs) File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/__init__.py", line 24, in setup apps.populate(settings.INSTALLED_APPS) File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/apps/registry.py", line 122, in populate app_config.ready() File "/var/projects/inventree/inventree/InvenTree/InvenTree/apps.py", line 24, in ready self.update_exchange_rates() File "/var/projects/inventree/inventree/InvenTree/InvenTree/apps.py", line 105, in update_exchange_rates update_exchange_rates() File "/var/projects/inventree/inventree/InvenTree/InvenTree/tasks.py", line 199, in update_exchange_rates backend.update_rates(base_currency=base) File "/var/projects/inventree/inventree/InvenTree/InvenTree/exchange.py", line 29, in update_rates super().update_rates(base=base_currency, symbols=symbols) File "/usr/lib64/python3.6/contextlib.py", line 52, in inner return func(*args, **kwds) File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/djmoney/contrib/exchange/backends/base.py", line 68, in update_rates for currency, value in self.get_rates(**params).items() File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/djmoney/contrib/exchange/backends/base.py", line 80, in get_rates response = self.get_response(**params) File "/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/djmoney/contrib/exchange/backends/base.py", line 48, in get_response response = urlopen(url, cafile=certifi.where()) File "/usr/lib64/python3.6/urllib/request.py", line 223, in urlopen return opener.open(url, data, timeout) File "/usr/lib64/python3.6/urllib/request.py", line 532, in open response = meth(req, response) File "/usr/lib64/python3.6/urllib/request.py", line 642, in http_response 'http', request, response, code, msg, hdrs) File "/usr/lib64/python3.6/urllib/request.py", line 570, in error return self._call_chain(*args) File "/usr/lib64/python3.6/urllib/request.py", line 504, in _call_chain result = func(*args) File "/usr/lib64/python3.6/urllib/request.py", line 650, in http_error_default raise HTTPError(req.full_url, code, msg, hdrs, fp) urllib.error.HTTPError: HTTP Error 521: Origin Down ``` </details> </issue> <code> [start of InvenTree/InvenTree/exchange.py] 1 from common.settings import currency_code_default, currency_codes 2 3 from djmoney.contrib.exchange.backends.base import SimpleExchangeBackend 4 5 6 class InvenTreeExchange(SimpleExchangeBackend): 7 """ 8 Backend for automatically updating currency exchange rates. 9 10 Uses the exchangerate.host service API 11 """ 12 13 name = "InvenTreeExchange" 14 15 def __init__(self): 16 self.url = "https://api.exchangerate.host/latest" 17 18 super().__init__() 19 20 def get_params(self): 21 # No API key is required 22 return { 23 } 24 25 def update_rates(self, base_currency=currency_code_default()): 26 27 symbols = ','.join(currency_codes()) 28 29 super().update_rates(base=base_currency, symbols=symbols) 30 [end of InvenTree/InvenTree/exchange.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/InvenTree/InvenTree/exchange.py b/InvenTree/InvenTree/exchange.py --- a/InvenTree/InvenTree/exchange.py +++ b/InvenTree/InvenTree/exchange.py @@ -1,4 +1,5 @@ from common.settings import currency_code_default, currency_codes +from urllib.error import HTTPError, URLError from djmoney.contrib.exchange.backends.base import SimpleExchangeBackend @@ -26,4 +27,8 @@ symbols = ','.join(currency_codes()) - super().update_rates(base=base_currency, symbols=symbols) + try: + super().update_rates(base=base_currency, symbols=symbols) + # catch connection errors + except (HTTPError, URLError): + print('Encountered connection error while updating')
{"golden_diff": "diff --git a/InvenTree/InvenTree/exchange.py b/InvenTree/InvenTree/exchange.py\n--- a/InvenTree/InvenTree/exchange.py\n+++ b/InvenTree/InvenTree/exchange.py\n@@ -1,4 +1,5 @@\n from common.settings import currency_code_default, currency_codes\n+from urllib.error import HTTPError, URLError\n \n from djmoney.contrib.exchange.backends.base import SimpleExchangeBackend\n \n@@ -26,4 +27,8 @@\n \n symbols = ','.join(currency_codes())\n \n- super().update_rates(base=base_currency, symbols=symbols)\n+ try:\n+ super().update_rates(base=base_currency, symbols=symbols)\n+ # catch connection errors\n+ except (HTTPError, URLError):\n+ print('Encountered connection error while updating')\n", "issue": "Can't start inventree server\nIt seems that error at `https://api.exchangerate.host/latest` blocks server startup.\r\n\r\n```\r\n# curl https://api.exchangerate.host/latest\r\n<html>\r\n<head><title>521 Origin Down</title></head>\r\n<body bgcolor=\"white\">\r\n<center><h1>521 Origin Down</h1></center>\r\n<hr><center>cloudflare-nginx</center>\r\n</body>\r\n</html>\r\n```\r\n\r\n<details>\r\n\r\n```\r\n# inv server\r\nInvenTree Database Configuration\r\n================================\r\nENGINE: django.db.backends.mysql\r\nNAME: inventree\r\nHOST: localhost\r\nBase currency changed from USD to RUB\r\nUpdating exchange rates from https://api.exchangerate.host/latest\r\nUsing base currency 'RUB'\r\nInvenTree Database Configuration\r\n================================\r\nENGINE: django.db.backends.mysql\r\nNAME: inventree\r\nHOST: localhost\r\nBase currency changed from USD to RUB\r\nUpdating exchange rates from https://api.exchangerate.host/latest\r\nUsing base currency 'RUB'\r\nWatching for file changes with StatReloader\r\nWatching for file changes with StatReloader\r\nException in thread django-main-thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib64/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/utils/autoreload.py\", line 64, in wrapper\r\n fn(*args, **kwargs)\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/core/management/commands/runserver.py\", line 110, in inner_run\r\n autoreload.raise_last_exception()\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/utils/autoreload.py\", line 87, in raise_last_exception\r\n raise _exception[1]\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/core/management/__init__.py\", line 375, in execute\r\n autoreload.check_errors(django.setup)()\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/utils/autoreload.py\", line 64, in wrapper\r\n fn(*args, **kwargs)\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/django/apps/registry.py\", line 122, in populate\r\n app_config.ready()\r\n File \"/var/projects/inventree/inventree/InvenTree/InvenTree/apps.py\", line 24, in ready\r\n self.update_exchange_rates()\r\n File \"/var/projects/inventree/inventree/InvenTree/InvenTree/apps.py\", line 105, in update_exchange_rates\r\n update_exchange_rates()\r\n File \"/var/projects/inventree/inventree/InvenTree/InvenTree/tasks.py\", line 199, in update_exchange_rates\r\n backend.update_rates(base_currency=base)\r\n File \"/var/projects/inventree/inventree/InvenTree/InvenTree/exchange.py\", line 29, in update_rates\r\n super().update_rates(base=base_currency, symbols=symbols)\r\n File \"/usr/lib64/python3.6/contextlib.py\", line 52, in inner\r\n return func(*args, **kwds)\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/djmoney/contrib/exchange/backends/base.py\", line 68, in update_rates\r\n for currency, value in self.get_rates(**params).items()\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/djmoney/contrib/exchange/backends/base.py\", line 80, in get_rates\r\n response = self.get_response(**params)\r\n File \"/var/projects/inventree/inventree-env/lib64/python3.6/site-packages/djmoney/contrib/exchange/backends/base.py\", line 48, in get_response\r\n response = urlopen(url, cafile=certifi.where())\r\n File \"/usr/lib64/python3.6/urllib/request.py\", line 223, in urlopen\r\n return opener.open(url, data, timeout)\r\n File \"/usr/lib64/python3.6/urllib/request.py\", line 532, in open\r\n response = meth(req, response)\r\n File \"/usr/lib64/python3.6/urllib/request.py\", line 642, in http_response\r\n 'http', request, response, code, msg, hdrs)\r\n File \"/usr/lib64/python3.6/urllib/request.py\", line 570, in error\r\n return self._call_chain(*args)\r\n File \"/usr/lib64/python3.6/urllib/request.py\", line 504, in _call_chain\r\n result = func(*args)\r\n File \"/usr/lib64/python3.6/urllib/request.py\", line 650, in http_error_default\r\n raise HTTPError(req.full_url, code, msg, hdrs, fp)\r\nurllib.error.HTTPError: HTTP Error 521: Origin Down\r\n```\r\n\r\n</details>\n", "before_files": [{"content": "from common.settings import currency_code_default, currency_codes\n\nfrom djmoney.contrib.exchange.backends.base import SimpleExchangeBackend\n\n\nclass InvenTreeExchange(SimpleExchangeBackend):\n \"\"\"\n Backend for automatically updating currency exchange rates.\n\n Uses the exchangerate.host service API\n \"\"\"\n\n name = \"InvenTreeExchange\"\n\n def __init__(self):\n self.url = \"https://api.exchangerate.host/latest\"\n\n super().__init__()\n\n def get_params(self):\n # No API key is required\n return {\n }\n\n def update_rates(self, base_currency=currency_code_default()):\n\n symbols = ','.join(currency_codes())\n\n super().update_rates(base=base_currency, symbols=symbols)\n", "path": "InvenTree/InvenTree/exchange.py"}]}
1,998
186
gh_patches_debug_29185
rasdani/github-patches
git_diff
fossasia__open-event-server-4267
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement User Emails Parent Issue: #4196 Requires #4210 to be merged - [x] Email verification email - To the user who registered - [x] A welcome email - After registration. Before email verification. - To the user who registered - [x] Forgot password reset email - To the user who registered - [x] Email change notification (#4210) - To both the old email address and the new email address - [ ] Password change notification - To the user </issue> <code> [start of app/api/auth.py] 1 import base64 2 3 from flask import Blueprint, abort, make_response 4 from flask import request, jsonify 5 6 from app.api.helpers.db import save_to_db 7 8 from app.api.helpers.utilities import get_serializer 9 from app.models.user import User 10 11 auth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth') 12 13 14 @auth_routes.route('/verify-email', methods=['POST']) 15 def verify_email(): 16 token = base64.b64decode(request.json['data']['token']) 17 s = get_serializer() 18 19 try: 20 data = s.loads(token) 21 except Exception: 22 return abort( 23 make_response(jsonify(error="Invalid Token"), 400) 24 ) 25 26 try: 27 user = User.query.filter_by(email=data[0]).one() 28 except Exception: 29 return abort( 30 make_response(jsonify(error="Invalid Token"), 400) 31 ) 32 else: 33 user.is_verified = True 34 save_to_db(user) 35 36 return jsonify({ 37 "id": user.id, 38 "email": user.email, 39 "name": user.name if user.get('name') else None 40 }) 41 [end of app/api/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/auth.py b/app/api/auth.py --- a/app/api/auth.py +++ b/app/api/auth.py @@ -1,11 +1,14 @@ import base64 +from flask import request, jsonify, abort, make_response, Blueprint +from sqlalchemy.orm.exc import NoResultFound -from flask import Blueprint, abort, make_response -from flask import request, jsonify - +from app import get_settings from app.api.helpers.db import save_to_db +from app.api.helpers.files import make_frontend_url +from app.api.helpers.mail import send_email_with_action from app.api.helpers.utilities import get_serializer +from app.models.mail import PASSWORD_RESET from app.models.user import User auth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth') @@ -31,6 +34,38 @@ ) else: user.is_verified = True + + +@auth_routes.route('/reset-password', methods=['POST']) +def reset_password_post(): + email = request.json['data']['email'] + + try: + user = User.query.filter_by(email=email).one() + except NoResultFound: + return abort( + make_response(jsonify(error="User not found"), 404) + ) + else: + link = make_frontend_url('/reset-password', {'token': user.reset_password}) + send_email_with_action(user, PASSWORD_RESET, app_name=get_settings()['app_name'], link=link) + + return make_response(jsonify(message="Email Sent"), 200) + + +@auth_routes.route('/reset-password', methods=['PATCH']) +def reset_password_patch(): + token = request.json['data']['token'] + password = request.json['data']['password'] + + try: + user = User.query.filter_by(reset_password=token).one() + except NoResultFound: + return abort( + make_response(jsonify(error="User not found"), 404) + ) + else: + user.password = password save_to_db(user) return jsonify({
{"golden_diff": "diff --git a/app/api/auth.py b/app/api/auth.py\n--- a/app/api/auth.py\n+++ b/app/api/auth.py\n@@ -1,11 +1,14 @@\n import base64\n+from flask import request, jsonify, abort, make_response, Blueprint\n+from sqlalchemy.orm.exc import NoResultFound\n \n-from flask import Blueprint, abort, make_response\n-from flask import request, jsonify\n-\n+from app import get_settings\n from app.api.helpers.db import save_to_db\n+from app.api.helpers.files import make_frontend_url\n+from app.api.helpers.mail import send_email_with_action\n \n from app.api.helpers.utilities import get_serializer\n+from app.models.mail import PASSWORD_RESET\n from app.models.user import User\n \n auth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth')\n@@ -31,6 +34,38 @@\n )\n else:\n user.is_verified = True\n+\n+\n+@auth_routes.route('/reset-password', methods=['POST'])\n+def reset_password_post():\n+ email = request.json['data']['email']\n+\n+ try:\n+ user = User.query.filter_by(email=email).one()\n+ except NoResultFound:\n+ return abort(\n+ make_response(jsonify(error=\"User not found\"), 404)\n+ )\n+ else:\n+ link = make_frontend_url('/reset-password', {'token': user.reset_password})\n+ send_email_with_action(user, PASSWORD_RESET, app_name=get_settings()['app_name'], link=link)\n+\n+ return make_response(jsonify(message=\"Email Sent\"), 200)\n+\n+\n+@auth_routes.route('/reset-password', methods=['PATCH'])\n+def reset_password_patch():\n+ token = request.json['data']['token']\n+ password = request.json['data']['password']\n+\n+ try:\n+ user = User.query.filter_by(reset_password=token).one()\n+ except NoResultFound:\n+ return abort(\n+ make_response(jsonify(error=\"User not found\"), 404)\n+ )\n+ else:\n+ user.password = password\n save_to_db(user)\n \n return jsonify({\n", "issue": "Implement User Emails\nParent Issue: #4196 \r\n\r\nRequires #4210 to be merged\r\n\r\n- [x] Email verification email - To the user who registered\r\n- [x] A welcome email - After registration. Before email verification. - To the user who registered\r\n- [x] Forgot password reset email - To the user who registered\r\n- [x] Email change notification (#4210) - To both the old email address and the new email address\r\n- [ ] Password change notification - To the user\n", "before_files": [{"content": "import base64\n\nfrom flask import Blueprint, abort, make_response\nfrom flask import request, jsonify\n\nfrom app.api.helpers.db import save_to_db\n\nfrom app.api.helpers.utilities import get_serializer\nfrom app.models.user import User\n\nauth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth')\n\n\n@auth_routes.route('/verify-email', methods=['POST'])\ndef verify_email():\n token = base64.b64decode(request.json['data']['token'])\n s = get_serializer()\n\n try:\n data = s.loads(token)\n except Exception:\n return abort(\n make_response(jsonify(error=\"Invalid Token\"), 400)\n )\n\n try:\n user = User.query.filter_by(email=data[0]).one()\n except Exception:\n return abort(\n make_response(jsonify(error=\"Invalid Token\"), 400)\n )\n else:\n user.is_verified = True\n save_to_db(user)\n\n return jsonify({\n \"id\": user.id,\n \"email\": user.email,\n \"name\": user.name if user.get('name') else None\n })\n", "path": "app/api/auth.py"}]}
959
459
gh_patches_debug_19
rasdani/github-patches
git_diff
pwndbg__pwndbg-381
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Broken `entry` command ### Description The `entry` command pass arguments differently then the `run` command. ### Steps to reproduce ``` [dc@dc:pwndbg|dev *$%]$ gdb python Loaded 113 commands. Type pwndbg [filter] for a list. Reading symbols from python...(no debugging symbols found)...done. pwndbg> set exception-verbose on Set whether to print a full stacktracefor exceptions raised in Pwndbg commands to True pwndbg> run -c "print(1); print(2)" Starting program: /usr/bin/python -c "print(1); print(2)" [Thread debugging using libthread_db enabled] Using host libthread_db library "/usr/lib/libthread_db.so.1". 1 2 [Inferior 1 (process 20590) exited normally] pwndbg> entry -c "print(1); print(2)" ('-c', 'print(1); print(2)') Running '%s' run -c print(1); print(2) /bin/bash: -c: line 0: syntax error near unexpected token `(' /bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)' Traceback (most recent call last): File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 100, in __call__ return self.function(*args, **kwargs) File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 181, in _OnlyWithFile return function(*a, **kw) File "/home/dc/installed/pwndbg/pwndbg/commands/start.py", line 72, in entry gdb.execute(run, from_tty=False) gdb.error: During startup program exited with code 1. If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues (Please don't forget to search if it hasn't been reported before) PS: Pull requests are welcome ``` ### My version ``` pwndbg> version Gdb: GNU gdb (GDB) 8.0.1 Python: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0] Pwndbg: 1.0.0 build: 5811010 ``` </issue> <code> [start of pwndbg/prompt.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import gdb 9 10 import pwndbg.color as C 11 import pwndbg.events 12 import pwndbg.gdbutils 13 import pwndbg.memoize 14 15 funcs_list_str = ', '.join(C.purple('$' + f.name) for f in pwndbg.gdbutils.functions.functions) 16 17 hint_lines = ( 18 'loaded %i commands. Type %s for a list.' % (len(pwndbg.commands.commands), C.purple('pwndbg [filter]')), 19 'created %s gdb functions (can be used with print/break)' % funcs_list_str 20 ) 21 22 for line in hint_lines: 23 print(C.light_red(pwndbg.color.bold('pwndbg: ') + line)) 24 25 cur = (gdb.selected_inferior(), gdb.selected_thread()) 26 27 28 def prompt_hook(*a): 29 global cur 30 new = (gdb.selected_inferior(), gdb.selected_thread()) 31 32 if cur != new: 33 pwndbg.events.after_reload(start=False) 34 cur = new 35 36 if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped: 37 prompt_hook_on_stop(*a) 38 39 40 @pwndbg.memoize.reset_on_stop 41 def prompt_hook_on_stop(*a): 42 pwndbg.commands.context.context() 43 44 45 46 gdb.prompt_hook = prompt_hook 47 [end of pwndbg/prompt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/prompt.py b/pwndbg/prompt.py --- a/pwndbg/prompt.py +++ b/pwndbg/prompt.py @@ -43,4 +43,5 @@ + gdb.prompt_hook = prompt_hook
{"golden_diff": "diff --git a/pwndbg/prompt.py b/pwndbg/prompt.py\n--- a/pwndbg/prompt.py\n+++ b/pwndbg/prompt.py\n@@ -43,4 +43,5 @@\n \n \n \n+\n gdb.prompt_hook = prompt_hook\n", "issue": "Broken `entry` command\n### Description\r\n\r\nThe `entry` command pass arguments differently then the `run` command.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n[dc@dc:pwndbg|dev *$%]$ gdb python\r\nLoaded 113 commands. Type pwndbg [filter] for a list.\r\nReading symbols from python...(no debugging symbols found)...done.\r\npwndbg> set exception-verbose on\r\nSet whether to print a full stacktracefor exceptions raised in Pwndbg commands to True\r\npwndbg> run -c \"print(1); print(2)\"\r\nStarting program: /usr/bin/python -c \"print(1); print(2)\"\r\n[Thread debugging using libthread_db enabled]\r\nUsing host libthread_db library \"/usr/lib/libthread_db.so.1\".\r\n1\r\n2\r\n[Inferior 1 (process 20590) exited normally]\r\npwndbg> entry -c \"print(1); print(2)\"\r\n('-c', 'print(1); print(2)')\r\nRunning '%s' run -c print(1); print(2)\r\n/bin/bash: -c: line 0: syntax error near unexpected token `('\r\n/bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)'\r\nTraceback (most recent call last):\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/__init__.py\", line 100, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/__init__.py\", line 181, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/start.py\", line 72, in entry\r\n gdb.execute(run, from_tty=False)\r\ngdb.error: During startup program exited with code 1.\r\n\r\nIf that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\r\n(Please don't forget to search if it hasn't been reported before)\r\nPS: Pull requests are welcome\r\n```\r\n\r\n### My version\r\n\r\n```\r\npwndbg> version\r\nGdb: GNU gdb (GDB) 8.0.1\r\nPython: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0]\r\nPwndbg: 1.0.0 build: 5811010\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.color as C\nimport pwndbg.events\nimport pwndbg.gdbutils\nimport pwndbg.memoize\n\nfuncs_list_str = ', '.join(C.purple('$' + f.name) for f in pwndbg.gdbutils.functions.functions)\n\nhint_lines = (\n 'loaded %i commands. Type %s for a list.' % (len(pwndbg.commands.commands), C.purple('pwndbg [filter]')),\n 'created %s gdb functions (can be used with print/break)' % funcs_list_str\n)\n\nfor line in hint_lines:\n print(C.light_red(pwndbg.color.bold('pwndbg: ') + line))\n\ncur = (gdb.selected_inferior(), gdb.selected_thread())\n\n\ndef prompt_hook(*a):\n global cur\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.events.after_reload(start=False)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\n\ngdb.prompt_hook = prompt_hook\n", "path": "pwndbg/prompt.py"}]}
1,505
59
gh_patches_debug_57501
rasdani/github-patches
git_diff
dotkom__onlineweb4-1652
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Application for extending membership marks field of study as guest When someone applies to get their membership prolonged their field of study is set to "guest" instead of what they previously were registered as. </issue> <code> [start of apps/approval/views.py] 1 # -*- encoding: utf-8 -*- 2 3 import datetime 4 5 from django.contrib import messages 6 from django.contrib.auth.decorators import login_required 7 from django.http import Http404 8 from django.shortcuts import get_object_or_404, redirect 9 from django.utils import timezone 10 from django.utils.translation import ugettext as _ 11 12 from apps.approval.forms import FieldOfStudyApplicationForm 13 from apps.approval.models import MembershipApproval 14 from apps.authentication.models import AllowedUsername, get_length_of_field_of_study 15 16 17 @login_required 18 def create_fos_application(request): 19 if request.method == 'POST': 20 if not request.user.ntnu_username: 21 messages.error(request, _("Du må knytte et NTNU-brukernavn til kontoen din.")) 22 return redirect('profiles_active', active_tab='membership') 23 24 form = FieldOfStudyApplicationForm(request.POST) 25 if form.is_valid(): 26 cleaned = form.cleaned_data 27 28 field_of_study = int(cleaned['field_of_study']) 29 30 if field_of_study == 0: 31 messages.warning(request, _("Denne studieretningen (Gjest) er ikke et gyldig alternativ.")) 32 return redirect('profiles_active', active_tab='membership') 33 34 started_day = 1 35 started_month = 0 36 started_year = int(cleaned['started_year']) 37 38 if cleaned['started_semester'] == "h": 39 started_month = 7 40 if cleaned['started_semester'] == "v": 41 started_month = 1 42 43 started_date = datetime.date(started_year, started_month, started_day) 44 45 # Does the user already have a field of study and started date? 46 if request.user.started_date and request.user.field_of_study: 47 # If there is no change from the current settings, ignore the request 48 if request.user.started_date == started_date and request.user.field_of_study == field_of_study: 49 messages.error( 50 request, 51 _("Du er allerede registrert med denne studieretningen og denne startdatoen.") 52 ) 53 return redirect('profiles_active', active_tab='membership') 54 55 application = MembershipApproval( 56 applicant=request.user, 57 field_of_study=field_of_study, 58 started_date=started_date 59 ) 60 61 length_of_fos = get_length_of_field_of_study(field_of_study) 62 if length_of_fos > 0: 63 application.new_expiry_date = get_expiry_date(started_year, length_of_fos) 64 application.save() 65 66 messages.success(request, _("Søknad om bytte av studieretning er sendt.")) 67 68 return redirect('profiles_active', active_tab='membership') 69 raise Http404 70 71 72 def get_expiry_date(started_year, length_of_fos): 73 today = timezone.now().date() 74 # Expiry dates should be 15th September, so that we have time to get new lists from NTNU 75 new_expiry_date = datetime.date( 76 started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos) 77 # Expiry dates in the past sets the expiry date to next september 78 if new_expiry_date < today: 79 if today < datetime.date(today.year, 9, 15): 80 new_expiry_date = datetime.date(today.year, 9, 15) 81 else: 82 new_expiry_date = datetime.date( 83 today.year, 9, 16) + datetime.timedelta(days=365) 84 return new_expiry_date 85 86 87 @login_required 88 def create_membership_application(request): 89 if request.method == 'POST': 90 if not request.user.has_expiring_membership: 91 messages.error(request, _("Din bruker har ikke et utløpende medlemskap.")) 92 return redirect('profiles_active', active_tab='membership') 93 94 if not request.user.ntnu_username: 95 messages.error(request, _("Du må knytte et NTNU-brukernavn til kontoen din.")) 96 return redirect('profiles_active', active_tab='membership') 97 98 # Extend length of membership by 1 year 99 membership = AllowedUsername.objects.get(username=request.user.ntnu_username) 100 new_expiration_date = datetime.date(membership.expiration_date.year + 1, 9, 16) 101 102 application = MembershipApproval( 103 applicant=request.user, 104 new_expiry_date=new_expiration_date, 105 ) 106 application.save() 107 108 messages.success(request, _("Søknad om ett års forlenget medlemskap er sendt.")) 109 110 return redirect('profiles_active', active_tab='membership') 111 raise Http404 112 113 114 @login_required 115 def cancel_application(request, application_id): 116 app = get_object_or_404(MembershipApproval, pk=application_id) 117 118 if app.applicant != request.user: 119 messages.error(request, _("Bare søkeren selv kan slette en søknad.")) 120 return redirect('profiles_active', active_tab='membership') 121 122 if app.processed: 123 messages.error(request, _("Denne søknaden er behandlet og kan ikke slettes.")) 124 return redirect('profiles_active', active_tab='membership') 125 126 app.delete() 127 128 return redirect('profiles_active', active_tab='membership') 129 [end of apps/approval/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/approval/views.py b/apps/approval/views.py --- a/apps/approval/views.py +++ b/apps/approval/views.py @@ -101,6 +101,7 @@ application = MembershipApproval( applicant=request.user, + field_of_study=request.user.field_of_study, new_expiry_date=new_expiration_date, ) application.save()
{"golden_diff": "diff --git a/apps/approval/views.py b/apps/approval/views.py\n--- a/apps/approval/views.py\n+++ b/apps/approval/views.py\n@@ -101,6 +101,7 @@\n \n application = MembershipApproval(\n applicant=request.user,\n+ field_of_study=request.user.field_of_study,\n new_expiry_date=new_expiration_date,\n )\n application.save()\n", "issue": "Application for extending membership marks field of study as guest\nWhen someone applies to get their membership prolonged their field of study is set to \"guest\" instead of what they previously were registered as. \n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom apps.approval.forms import FieldOfStudyApplicationForm\nfrom apps.approval.models import MembershipApproval\nfrom apps.authentication.models import AllowedUsername, get_length_of_field_of_study\n\n\n@login_required\ndef create_fos_application(request):\n if request.method == 'POST':\n if not request.user.ntnu_username:\n messages.error(request, _(\"Du m\u00e5 knytte et NTNU-brukernavn til kontoen din.\"))\n return redirect('profiles_active', active_tab='membership')\n\n form = FieldOfStudyApplicationForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n field_of_study = int(cleaned['field_of_study'])\n\n if field_of_study == 0:\n messages.warning(request, _(\"Denne studieretningen (Gjest) er ikke et gyldig alternativ.\"))\n return redirect('profiles_active', active_tab='membership')\n\n started_day = 1\n started_month = 0\n started_year = int(cleaned['started_year'])\n\n if cleaned['started_semester'] == \"h\":\n started_month = 7\n if cleaned['started_semester'] == \"v\":\n started_month = 1\n\n started_date = datetime.date(started_year, started_month, started_day)\n\n # Does the user already have a field of study and started date?\n if request.user.started_date and request.user.field_of_study:\n # If there is no change from the current settings, ignore the request\n if request.user.started_date == started_date and request.user.field_of_study == field_of_study:\n messages.error(\n request,\n _(\"Du er allerede registrert med denne studieretningen og denne startdatoen.\")\n )\n return redirect('profiles_active', active_tab='membership')\n\n application = MembershipApproval(\n applicant=request.user,\n field_of_study=field_of_study,\n started_date=started_date\n )\n\n length_of_fos = get_length_of_field_of_study(field_of_study)\n if length_of_fos > 0:\n application.new_expiry_date = get_expiry_date(started_year, length_of_fos)\n application.save()\n\n messages.success(request, _(\"S\u00f8knad om bytte av studieretning er sendt.\"))\n\n return redirect('profiles_active', active_tab='membership')\n raise Http404\n\n\ndef get_expiry_date(started_year, length_of_fos):\n today = timezone.now().date()\n # Expiry dates should be 15th September, so that we have time to get new lists from NTNU\n new_expiry_date = datetime.date(\n started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos)\n # Expiry dates in the past sets the expiry date to next september\n if new_expiry_date < today:\n if today < datetime.date(today.year, 9, 15):\n new_expiry_date = datetime.date(today.year, 9, 15)\n else:\n new_expiry_date = datetime.date(\n today.year, 9, 16) + datetime.timedelta(days=365)\n return new_expiry_date\n\n\n@login_required\ndef create_membership_application(request):\n if request.method == 'POST':\n if not request.user.has_expiring_membership:\n messages.error(request, _(\"Din bruker har ikke et utl\u00f8pende medlemskap.\"))\n return redirect('profiles_active', active_tab='membership')\n\n if not request.user.ntnu_username:\n messages.error(request, _(\"Du m\u00e5 knytte et NTNU-brukernavn til kontoen din.\"))\n return redirect('profiles_active', active_tab='membership')\n\n # Extend length of membership by 1 year\n membership = AllowedUsername.objects.get(username=request.user.ntnu_username)\n new_expiration_date = datetime.date(membership.expiration_date.year + 1, 9, 16)\n\n application = MembershipApproval(\n applicant=request.user,\n new_expiry_date=new_expiration_date,\n )\n application.save()\n\n messages.success(request, _(\"S\u00f8knad om ett \u00e5rs forlenget medlemskap er sendt.\"))\n\n return redirect('profiles_active', active_tab='membership')\n raise Http404\n\n\n@login_required\ndef cancel_application(request, application_id):\n app = get_object_or_404(MembershipApproval, pk=application_id)\n\n if app.applicant != request.user:\n messages.error(request, _(\"Bare s\u00f8keren selv kan slette en s\u00f8knad.\"))\n return redirect('profiles_active', active_tab='membership')\n\n if app.processed:\n messages.error(request, _(\"Denne s\u00f8knaden er behandlet og kan ikke slettes.\"))\n return redirect('profiles_active', active_tab='membership')\n\n app.delete()\n\n return redirect('profiles_active', active_tab='membership')\n", "path": "apps/approval/views.py"}]}
1,974
89
gh_patches_debug_28459
rasdani/github-patches
git_diff
saleor__saleor-336
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Removing category redirects to category update view Confirmation modal contains "update" button that redirects to update view, instead of sending a POST request to remove the category. </issue> <code> [start of saleor/dashboard/category/views.py] 1 from django.contrib import messages 2 from django.shortcuts import get_object_or_404, redirect 3 from django.template.response import TemplateResponse 4 from django.utils.translation import ugettext_lazy as _ 5 6 from ...product.models import Category 7 from ..views import staff_member_required 8 from .forms import CategoryForm 9 10 11 @staff_member_required 12 def category_list(request, root_pk=None): 13 root = None 14 path = None 15 categories = Category.tree.root_nodes() 16 if root_pk: 17 root = get_object_or_404(Category, pk=root_pk) 18 path = root.get_ancestors(include_self=True) if root else [] 19 categories = root.get_children() 20 ctx = {'categories': categories, 'path': path, 'root': root} 21 return TemplateResponse(request, 'dashboard/category/list.html', ctx) 22 23 24 @staff_member_required 25 def category_create(request, root_pk=None): 26 category = Category() 27 form = CategoryForm(request.POST or None, parent_pk=root_pk) 28 if form.is_valid(): 29 category = form.save() 30 messages.success(request, _('Added category %s') % category) 31 if root_pk: 32 return redirect('dashboard:category-list', root_pk=root_pk) 33 else: 34 return redirect('dashboard:category-list') 35 ctx = {'category': category, 'form': form} 36 return TemplateResponse(request, 'dashboard/category/detail.html', ctx) 37 38 39 @staff_member_required 40 def category_edit(request, root_pk=None): 41 category = get_object_or_404(Category, pk=root_pk) 42 form = CategoryForm(request.POST or None, instance=category, 43 parent_pk=category.parent_id) 44 status = 200 45 if form.is_valid(): 46 category = form.save() 47 messages.success(request, _('Added category %s') % category) 48 if root_pk: 49 return redirect('dashboard:category-list', root_pk=root_pk) 50 else: 51 return redirect('dashboard:category-list') 52 elif form.errors: 53 status = 400 54 ctx = {'category': category, 'form': form, 'status': status} 55 template = 'dashboard/category/modal_edit.html' 56 return TemplateResponse(request, template, ctx, status=status) 57 58 59 @staff_member_required 60 def category_delete(request, pk): 61 category = get_object_or_404(Category, pk=pk) 62 if request.method == 'POST': 63 category.delete() 64 messages.success(request, _('Deleted category %s') % category) 65 root_pk = None 66 if category.parent: 67 root_pk = category.parent.pk 68 if root_pk: 69 return redirect('dashboard:category-list', root_pk=root_pk) 70 else: 71 return redirect('dashboard:category-list') 72 ctx = {'category': category, 73 'descendants': list(category.get_descendants()), 74 'products_count': len(category.products.all())} 75 return TemplateResponse(request, 76 'dashboard/category/modal_category_confirm_delete.html', 77 ctx) 78 [end of saleor/dashboard/category/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/dashboard/category/views.py b/saleor/dashboard/category/views.py --- a/saleor/dashboard/category/views.py +++ b/saleor/dashboard/category/views.py @@ -1,4 +1,6 @@ from django.contrib import messages +from django.core.urlresolvers import reverse +from django.http import JsonResponse from django.shortcuts import get_object_or_404, redirect from django.template.response import TemplateResponse from django.utils.translation import ugettext_lazy as _ @@ -66,12 +68,19 @@ if category.parent: root_pk = category.parent.pk if root_pk: + if request.is_ajax(): + response = {'redirectUrl': reverse( + 'dashboard:category-list', kwargs={'root_pk': root_pk})} + return JsonResponse(response) return redirect('dashboard:category-list', root_pk=root_pk) else: + if request.is_ajax(): + response = {'redirectUrl': reverse('dashboard:category-list')} + return JsonResponse(response) return redirect('dashboard:category-list') ctx = {'category': category, 'descendants': list(category.get_descendants()), 'products_count': len(category.products.all())} return TemplateResponse(request, - 'dashboard/category/modal_category_confirm_delete.html', + 'dashboard/category/modal_delete.html', ctx)
{"golden_diff": "diff --git a/saleor/dashboard/category/views.py b/saleor/dashboard/category/views.py\n--- a/saleor/dashboard/category/views.py\n+++ b/saleor/dashboard/category/views.py\n@@ -1,4 +1,6 @@\n from django.contrib import messages\n+from django.core.urlresolvers import reverse\n+from django.http import JsonResponse\n from django.shortcuts import get_object_or_404, redirect\n from django.template.response import TemplateResponse\n from django.utils.translation import ugettext_lazy as _\n@@ -66,12 +68,19 @@\n if category.parent:\n root_pk = category.parent.pk\n if root_pk:\n+ if request.is_ajax():\n+ response = {'redirectUrl': reverse(\n+ 'dashboard:category-list', kwargs={'root_pk': root_pk})}\n+ return JsonResponse(response)\n return redirect('dashboard:category-list', root_pk=root_pk)\n else:\n+ if request.is_ajax():\n+ response = {'redirectUrl': reverse('dashboard:category-list')}\n+ return JsonResponse(response)\n return redirect('dashboard:category-list')\n ctx = {'category': category,\n 'descendants': list(category.get_descendants()),\n 'products_count': len(category.products.all())}\n return TemplateResponse(request,\n- 'dashboard/category/modal_category_confirm_delete.html',\n+ 'dashboard/category/modal_delete.html',\n ctx)\n", "issue": "Removing category redirects to category update view \nConfirmation modal contains \"update\" button that redirects to update view, instead of sending a POST request to remove the category.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ...product.models import Category\nfrom ..views import staff_member_required\nfrom .forms import CategoryForm\n\n\n@staff_member_required\ndef category_list(request, root_pk=None):\n root = None\n path = None\n categories = Category.tree.root_nodes()\n if root_pk:\n root = get_object_or_404(Category, pk=root_pk)\n path = root.get_ancestors(include_self=True) if root else []\n categories = root.get_children()\n ctx = {'categories': categories, 'path': path, 'root': root}\n return TemplateResponse(request, 'dashboard/category/list.html', ctx)\n\n\n@staff_member_required\ndef category_create(request, root_pk=None):\n category = Category()\n form = CategoryForm(request.POST or None, parent_pk=root_pk)\n if form.is_valid():\n category = form.save()\n messages.success(request, _('Added category %s') % category)\n if root_pk:\n return redirect('dashboard:category-list', root_pk=root_pk)\n else:\n return redirect('dashboard:category-list')\n ctx = {'category': category, 'form': form}\n return TemplateResponse(request, 'dashboard/category/detail.html', ctx)\n\n\n@staff_member_required\ndef category_edit(request, root_pk=None):\n category = get_object_or_404(Category, pk=root_pk)\n form = CategoryForm(request.POST or None, instance=category,\n parent_pk=category.parent_id)\n status = 200\n if form.is_valid():\n category = form.save()\n messages.success(request, _('Added category %s') % category)\n if root_pk:\n return redirect('dashboard:category-list', root_pk=root_pk)\n else:\n return redirect('dashboard:category-list')\n elif form.errors:\n status = 400\n ctx = {'category': category, 'form': form, 'status': status}\n template = 'dashboard/category/modal_edit.html'\n return TemplateResponse(request, template, ctx, status=status)\n\n\n@staff_member_required\ndef category_delete(request, pk):\n category = get_object_or_404(Category, pk=pk)\n if request.method == 'POST':\n category.delete()\n messages.success(request, _('Deleted category %s') % category)\n root_pk = None\n if category.parent:\n root_pk = category.parent.pk\n if root_pk:\n return redirect('dashboard:category-list', root_pk=root_pk)\n else:\n return redirect('dashboard:category-list')\n ctx = {'category': category,\n 'descendants': list(category.get_descendants()),\n 'products_count': len(category.products.all())}\n return TemplateResponse(request,\n 'dashboard/category/modal_category_confirm_delete.html',\n ctx)\n", "path": "saleor/dashboard/category/views.py"}]}
1,336
290
gh_patches_debug_5289
rasdani/github-patches
git_diff
freedomofpress__securedrop-7074
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release SecureDrop 2.7.0 This is a tracking issue for the release of SecureDrop 2.7.0 Tentatively scheduled as follows: **Pre-release announcement:** ~10-19-2023~10-26-2023 **Release date:** ~10-26-2023~11-07-2023 **Release manager:** @zenmonkeykstop **Deputy release manager:** @cfm **Localization manager:** @cfm **Communications manager:** @nathandyer _SecureDrop maintainers and testers:_ As you QA 2.7.0, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them "QA: Release". Test debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey). # [QA Matrix for 2.7.0](https://docs.google.com/spreadsheets/d/1Q_dOST_87ezW3L1LO-u7Nl18h6bQsYUbQHYPGQbG448/edit#gid=361662860) # [Test Plan for 2.7.0](https://github.com/freedomofpress/securedrop/wiki/2.7.0-Test-Plan) # Prepare release candidate (2.7.0~rc1) - [ ] Link to latest version of Tails, including release candidates, to test against during QA - [x] Prepare 2.7.0~rc1 release changelog - [x] Branch off release/2.7.0 from develop - [x] Prepare 2.7.0 - [ ] Build debs, preserving build log, and put up `2.7.0~rc1` on test apt server - [ ] Commit build log. # Prepare release candidate (2.7.0~rc2) - [x] Prepare 2.7.0~rc2 release changelog - [x] Branch off release/2.7.0 from develop - [x] Prepare 2.7.0 - [x] Build debs, preserving build log, and put up `2.7.0~rc2` on test apt server - [x] Commit build log. # Prepare release candidate (2.7.0~rc3) - [x] Prepare 2.7.0~rc3 release changelog - [x] Branch off release/2.7.0 from develop - [x] Prepare 2.7.0 - [x] Build debs, preserving build log, and put up `2.7.0~rc3` on test apt server - [x] Commit build log. # Prepare release candidate (2.7.0~rc4) - [x] Prepare 2.7.0~rc4 release changelog - [x] Branch off release/2.7.0 from develop - [x] Prepare 2.7.0-rc4 - [x] Build debs, preserving build log, and put up `2.7.0~rc4` on test apt server - [x] Commit build log. After each test, please update the QA matrix and post details for Basic Server Testing, Application Acceptance Testing and release-specific testing below in comments to this ticket. # Final release - [x] Ensure builder in release branch is updated and/or update builder image - [x] Push signed tag - [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo) - [x] Build final Debian packages(and preserve build log) - [x] Commit package build log to https://github.com/freedomofpress/build-logs - [x] Pre-Flight: Test that install and upgrade from 2.6.1 to 2.7.0 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs) - [x] Flip apt QA server to prod status (merge to `main` in the LFS repo) - [ ] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo - [x] Prepare release messaging # Post release - [x] Create GitHub release object - [x] Once release object is created, update versions in `securedrop-docs` and Wagtail - [x] Verify new docs show up on https://docs.securedrop.org - [x] Publish announcements - [ ] Merge changelog back to `develop` - [ ] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap </issue> <code> [start of securedrop/version.py] 1 __version__ = "2.7.0~rc1" 2 [end of securedrop/version.py] [start of securedrop/setup.py] 1 import setuptools 2 3 long_description = "The SecureDrop whistleblower platform." 4 5 setuptools.setup( 6 name="securedrop-app-code", 7 version="2.7.0~rc1", 8 author="Freedom of the Press Foundation", 9 author_email="[email protected]", 10 description="SecureDrop Server", 11 long_description=long_description, 12 long_description_content_type="text/markdown", 13 license="AGPLv3+", 14 python_requires=">=3.8", 15 url="https://github.com/freedomofpress/securedrop", 16 classifiers=[ 17 "Development Status :: 5 - Stable", 18 "Programming Language :: Python :: 3", 19 "Topic :: Software Development :: Libraries :: Python Modules", 20 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", 21 "Intended Audience :: Developers", 22 "Operating System :: OS Independent", 23 ], 24 ) 25 [end of securedrop/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/securedrop/setup.py b/securedrop/setup.py --- a/securedrop/setup.py +++ b/securedrop/setup.py @@ -4,7 +4,7 @@ setuptools.setup( name="securedrop-app-code", - version="2.7.0~rc1", + version="2.8.0~rc1", author="Freedom of the Press Foundation", author_email="[email protected]", description="SecureDrop Server", diff --git a/securedrop/version.py b/securedrop/version.py --- a/securedrop/version.py +++ b/securedrop/version.py @@ -1 +1 @@ -__version__ = "2.7.0~rc1" +__version__ = "2.8.0~rc1"
{"golden_diff": "diff --git a/securedrop/setup.py b/securedrop/setup.py\n--- a/securedrop/setup.py\n+++ b/securedrop/setup.py\n@@ -4,7 +4,7 @@\n \n setuptools.setup(\n name=\"securedrop-app-code\",\n- version=\"2.7.0~rc1\",\n+ version=\"2.8.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = \"2.7.0~rc1\"\n+__version__ = \"2.8.0~rc1\"\n", "issue": "Release SecureDrop 2.7.0\nThis is a tracking issue for the release of SecureDrop 2.7.0\r\n\r\nTentatively scheduled as follows:\r\n\r\n**Pre-release announcement:** ~10-19-2023~10-26-2023\r\n**Release date:** ~10-26-2023~11-07-2023\r\n\r\n**Release manager:** @zenmonkeykstop \r\n**Deputy release manager:** @cfm\r\n**Localization manager:** @cfm\r\n**Communications manager:** @nathandyer \r\n\r\n_SecureDrop maintainers and testers:_ As you QA 2.7.0, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them \"QA: Release\".\r\n\r\nTest debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey).\r\n\r\n# [QA Matrix for 2.7.0](https://docs.google.com/spreadsheets/d/1Q_dOST_87ezW3L1LO-u7Nl18h6bQsYUbQHYPGQbG448/edit#gid=361662860)\r\n# [Test Plan for 2.7.0](https://github.com/freedomofpress/securedrop/wiki/2.7.0-Test-Plan)\r\n\r\n# Prepare release candidate (2.7.0~rc1)\r\n- [ ] Link to latest version of Tails, including release candidates, to test against during QA\r\n- [x] Prepare 2.7.0~rc1 release changelog\r\n- [x] Branch off release/2.7.0 from develop\r\n- [x] Prepare 2.7.0\r\n- [ ] Build debs, preserving build log, and put up `2.7.0~rc1` on test apt server\r\n- [ ] Commit build log.\r\n\r\n# Prepare release candidate (2.7.0~rc2)\r\n- [x] Prepare 2.7.0~rc2 release changelog\r\n- [x] Branch off release/2.7.0 from develop\r\n- [x] Prepare 2.7.0\r\n- [x] Build debs, preserving build log, and put up `2.7.0~rc2` on test apt server\r\n- [x] Commit build log.\r\n\r\n\r\n# Prepare release candidate (2.7.0~rc3)\r\n- [x] Prepare 2.7.0~rc3 release changelog\r\n- [x] Branch off release/2.7.0 from develop\r\n- [x] Prepare 2.7.0\r\n- [x] Build debs, preserving build log, and put up `2.7.0~rc3` on test apt server\r\n- [x] Commit build log.\r\n\r\n# Prepare release candidate (2.7.0~rc4)\r\n- [x] Prepare 2.7.0~rc4 release changelog\r\n- [x] Branch off release/2.7.0 from develop\r\n- [x] Prepare 2.7.0-rc4\r\n- [x] Build debs, preserving build log, and put up `2.7.0~rc4` on test apt server\r\n- [x] Commit build log.\r\n\r\n\r\nAfter each test, please update the QA matrix and post details for Basic Server Testing, Application Acceptance Testing and release-specific testing below in comments to this ticket.\r\n\r\n# Final release\r\n- [x] Ensure builder in release branch is updated and/or update builder image\r\n- [x] Push signed tag \r\n- [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo)\r\n- [x] Build final Debian packages(and preserve build log)\r\n- [x] Commit package build log to https://github.com/freedomofpress/build-logs\r\n- [x] Pre-Flight: Test that install and upgrade from 2.6.1 to 2.7.0 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs)\r\n- [x] Flip apt QA server to prod status (merge to `main` in the LFS repo)\r\n- [ ] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo\r\n- [x] Prepare release messaging\r\n\r\n# Post release\r\n- [x] Create GitHub release object \r\n- [x] Once release object is created, update versions in `securedrop-docs` and Wagtail\r\n- [x] Verify new docs show up on https://docs.securedrop.org\r\n- [x] Publish announcements\r\n- [ ] Merge changelog back to `develop`\r\n- [ ] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap\n", "before_files": [{"content": "__version__ = \"2.7.0~rc1\"\n", "path": "securedrop/version.py"}, {"content": "import setuptools\n\nlong_description = \"The SecureDrop whistleblower platform.\"\n\nsetuptools.setup(\n name=\"securedrop-app-code\",\n version=\"2.7.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"AGPLv3+\",\n python_requires=\">=3.8\",\n url=\"https://github.com/freedomofpress/securedrop\",\n classifiers=[\n \"Development Status :: 5 - Stable\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "path": "securedrop/setup.py"}]}
1,944
176
gh_patches_debug_26784
rasdani/github-patches
git_diff
mindee__doctr-1280
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> listing the detection and recognition models not working in pytorch ### Bug description Hi, This is the list of models not working in inference: - detection models: * db_resnet_34 * linknet_resnet18 * linknet_resnet34 * linknet_resnet50 for all these detection models in pytorch, the code inference works but it get to much segmentations/detected boxes. - recognition models: * sar_resnet31 * master * vitstr_small * vitstr_base For all of these models in pytorch, the code inference seems to work, but it gets random character recognition. I think they are not correct loaded or trained. ### Code snippet to reproduce the bug in doctr/demo/backend/pytorch.py file, changing this lines ``` DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet50_rotation"] RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"] ``` by this lines ``` #DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet50_rotation"] DET_ARCHS = [ "db_resnet34", "db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18", "linknet_resnet34", "linknet_resnet50", "db_resnet50_rotation"] #RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"] RECO_ARCHS=[ "crnn_vgg16_bn", "crnn_mobilenet_v3_small", "crnn_mobilenet_v3_large", "sar_resnet31", "master", "vitstr_small", "vitstr_base"] ``` and running this code to try all the pytorch models ``` USE_TF=0 streamlit run demo/app.py ``` ### Error traceback not errors but bugs on models ### Environment wget https://raw.githubusercontent.com/mindee/doctr/main/scripts/collect_env.py # For security purposes, please check the contents of collect_env.py before running it. python collect_env.py ### Deep Learning backend from doctr.file_utils import is_tf_available, is_torch_available print(f"is_tf_available: {is_tf_available()}") print(f"is_torch_available: {is_torch_available()}") </issue> <code> [start of demo/backend/tensorflow.py] 1 # Copyright (C) 2021-2023, Mindee. 2 3 # This program is licensed under the Apache License 2.0. 4 # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details. 5 6 import numpy as np 7 import tensorflow as tf 8 9 from doctr.models import ocr_predictor 10 from doctr.models.predictor import OCRPredictor 11 12 DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18_rotation"] 13 RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"] 14 15 16 def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor: 17 """ 18 Args: 19 device is tf.device 20 """ 21 with device: 22 predictor = ocr_predictor( 23 det_arch, reco_arch, pretrained=True, assume_straight_pages=("rotation" not in det_arch) 24 ) 25 return predictor 26 27 28 def forward_image(predictor: OCRPredictor, image: np.ndarray, device) -> np.ndarray: 29 """ 30 Args: 31 device is tf.device 32 """ 33 with device: 34 processed_batches = predictor.det_predictor.pre_processor([image]) 35 out = predictor.det_predictor.model(processed_batches[0], return_model_output=True) 36 seg_map = out["out_map"] 37 38 with tf.device("/cpu:0"): 39 seg_map = tf.identity(seg_map).numpy() 40 41 return seg_map 42 [end of demo/backend/tensorflow.py] [start of demo/backend/pytorch.py] 1 # Copyright (C) 2021-2023, Mindee. 2 3 # This program is licensed under the Apache License 2.0. 4 # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details. 5 6 import numpy as np 7 import torch 8 9 from doctr.models import ocr_predictor 10 from doctr.models.predictor import OCRPredictor 11 12 DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet50_rotation"] 13 RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"] 14 15 16 def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor: 17 """ 18 Args: 19 device is torch.device 20 """ 21 predictor = ocr_predictor( 22 det_arch, reco_arch, pretrained=True, assume_straight_pages=("rotation" not in det_arch) 23 ).to(device) 24 return predictor 25 26 27 def forward_image(predictor: OCRPredictor, image: np.ndarray, device) -> np.ndarray: 28 """ 29 Args: 30 device is torch.device 31 """ 32 with torch.no_grad(): 33 processed_batches = predictor.det_predictor.pre_processor([image]) 34 out = predictor.det_predictor.model(processed_batches[0].to(device), return_model_output=True) 35 seg_map = out["out_map"].to("cpu").numpy() 36 37 return seg_map 38 [end of demo/backend/pytorch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/demo/backend/pytorch.py b/demo/backend/pytorch.py --- a/demo/backend/pytorch.py +++ b/demo/backend/pytorch.py @@ -9,8 +9,25 @@ from doctr.models import ocr_predictor from doctr.models.predictor import OCRPredictor -DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet50_rotation"] -RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"] +DET_ARCHS = [ + "db_resnet50", + "db_resnet34", + "db_mobilenet_v3_large", + "db_resnet50_rotation", + "linknet_resnet18", + "linknet_resnet34", + "linknet_resnet50", +] +RECO_ARCHS = [ + "crnn_vgg16_bn", + "crnn_mobilenet_v3_small", + "crnn_mobilenet_v3_large", + "master", + "sar_resnet31", + "vitstr_small", + "vitstr_base", + "parseq", +] def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor: diff --git a/demo/backend/tensorflow.py b/demo/backend/tensorflow.py --- a/demo/backend/tensorflow.py +++ b/demo/backend/tensorflow.py @@ -9,8 +9,24 @@ from doctr.models import ocr_predictor from doctr.models.predictor import OCRPredictor -DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18_rotation"] -RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"] +DET_ARCHS = [ + "db_resnet50", + "db_mobilenet_v3_large", + "linknet_resnet18", + "linknet_resnet18_rotation", + "linknet_resnet34", + "linknet_resnet50", +] +RECO_ARCHS = [ + "crnn_vgg16_bn", + "crnn_mobilenet_v3_small", + "crnn_mobilenet_v3_large", + "master", + "sar_resnet31", + "vitstr_small", + "vitstr_base", + "parseq", +] def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:
{"golden_diff": "diff --git a/demo/backend/pytorch.py b/demo/backend/pytorch.py\n--- a/demo/backend/pytorch.py\n+++ b/demo/backend/pytorch.py\n@@ -9,8 +9,25 @@\n from doctr.models import ocr_predictor\n from doctr.models.predictor import OCRPredictor\n \n-DET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet50_rotation\"]\n-RECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\n+DET_ARCHS = [\n+ \"db_resnet50\",\n+ \"db_resnet34\",\n+ \"db_mobilenet_v3_large\",\n+ \"db_resnet50_rotation\",\n+ \"linknet_resnet18\",\n+ \"linknet_resnet34\",\n+ \"linknet_resnet50\",\n+]\n+RECO_ARCHS = [\n+ \"crnn_vgg16_bn\",\n+ \"crnn_mobilenet_v3_small\",\n+ \"crnn_mobilenet_v3_large\",\n+ \"master\",\n+ \"sar_resnet31\",\n+ \"vitstr_small\",\n+ \"vitstr_base\",\n+ \"parseq\",\n+]\n \n \n def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:\ndiff --git a/demo/backend/tensorflow.py b/demo/backend/tensorflow.py\n--- a/demo/backend/tensorflow.py\n+++ b/demo/backend/tensorflow.py\n@@ -9,8 +9,24 @@\n from doctr.models import ocr_predictor\n from doctr.models.predictor import OCRPredictor\n \n-DET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet18_rotation\"]\n-RECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\n+DET_ARCHS = [\n+ \"db_resnet50\",\n+ \"db_mobilenet_v3_large\",\n+ \"linknet_resnet18\",\n+ \"linknet_resnet18_rotation\",\n+ \"linknet_resnet34\",\n+ \"linknet_resnet50\",\n+]\n+RECO_ARCHS = [\n+ \"crnn_vgg16_bn\",\n+ \"crnn_mobilenet_v3_small\",\n+ \"crnn_mobilenet_v3_large\",\n+ \"master\",\n+ \"sar_resnet31\",\n+ \"vitstr_small\",\n+ \"vitstr_base\",\n+ \"parseq\",\n+]\n \n \n def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:\n", "issue": "listing the detection and recognition models not working in pytorch\n### Bug description\r\n\r\nHi,\r\n\r\nThis is the list of models not working in inference:\r\n\r\n- detection models:\r\n * db_resnet_34\r\n * linknet_resnet18\r\n * linknet_resnet34\r\n * linknet_resnet50\r\nfor all these detection models in pytorch, the code inference works but it get to much segmentations/detected boxes. \r\n\r\n\r\n- recognition models:\r\n * sar_resnet31\r\n * master\r\n * vitstr_small\r\n * vitstr_base\r\nFor all of these models in pytorch, the code inference seems to work, but it gets random character recognition. I think they are not correct loaded or trained.\r\n\r\n\r\n### Code snippet to reproduce the bug\r\n\r\nin doctr/demo/backend/pytorch.py file, changing this lines\r\n```\r\nDET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet50_rotation\"]\r\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\r\n```\r\n\r\nby this lines \r\n```\r\n#DET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet50_rotation\"]\r\nDET_ARCHS = [ \"db_resnet34\",\r\n \"db_resnet50\",\r\n \"db_mobilenet_v3_large\",\r\n \"linknet_resnet18\",\r\n \"linknet_resnet34\",\r\n \"linknet_resnet50\",\r\n \"db_resnet50_rotation\"]\r\n#RECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\r\nRECO_ARCHS=[ \"crnn_vgg16_bn\",\r\n \"crnn_mobilenet_v3_small\",\r\n \"crnn_mobilenet_v3_large\",\r\n \"sar_resnet31\",\r\n \"master\",\r\n \"vitstr_small\",\r\n \"vitstr_base\"]\r\n```\r\n\r\nand running this code to try all the pytorch models\r\n```\r\nUSE_TF=0 streamlit run demo/app.py\r\n```\r\n\r\n\r\n### Error traceback\r\n\r\nnot errors but bugs on models\r\n\r\n### Environment\r\n\r\nwget https://raw.githubusercontent.com/mindee/doctr/main/scripts/collect_env.py\r\n# For security purposes, please check the contents of collect_env.py before running it.\r\npython collect_env.py\r\n\r\n### Deep Learning backend\r\n\r\n from doctr.file_utils import is_tf_available, is_torch_available\r\n\r\nprint(f\"is_tf_available: {is_tf_available()}\")\r\nprint(f\"is_torch_available: {is_torch_available()}\")\n", "before_files": [{"content": "# Copyright (C) 2021-2023, Mindee.\n\n# This program is licensed under the Apache License 2.0.\n# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom doctr.models import ocr_predictor\nfrom doctr.models.predictor import OCRPredictor\n\nDET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet18_rotation\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\n\n\ndef load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:\n \"\"\"\n Args:\n device is tf.device\n \"\"\"\n with device:\n predictor = ocr_predictor(\n det_arch, reco_arch, pretrained=True, assume_straight_pages=(\"rotation\" not in det_arch)\n )\n return predictor\n\n\ndef forward_image(predictor: OCRPredictor, image: np.ndarray, device) -> np.ndarray:\n \"\"\"\n Args:\n device is tf.device\n \"\"\"\n with device:\n processed_batches = predictor.det_predictor.pre_processor([image])\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True)\n seg_map = out[\"out_map\"]\n\n with tf.device(\"/cpu:0\"):\n seg_map = tf.identity(seg_map).numpy()\n\n return seg_map\n", "path": "demo/backend/tensorflow.py"}, {"content": "# Copyright (C) 2021-2023, Mindee.\n\n# This program is licensed under the Apache License 2.0.\n# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.\n\nimport numpy as np\nimport torch\n\nfrom doctr.models import ocr_predictor\nfrom doctr.models.predictor import OCRPredictor\n\nDET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet50_rotation\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\n\n\ndef load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:\n \"\"\"\n Args:\n device is torch.device\n \"\"\"\n predictor = ocr_predictor(\n det_arch, reco_arch, pretrained=True, assume_straight_pages=(\"rotation\" not in det_arch)\n ).to(device)\n return predictor\n\n\ndef forward_image(predictor: OCRPredictor, image: np.ndarray, device) -> np.ndarray:\n \"\"\"\n Args:\n device is torch.device\n \"\"\"\n with torch.no_grad():\n processed_batches = predictor.det_predictor.pre_processor([image])\n out = predictor.det_predictor.model(processed_batches[0].to(device), return_model_output=True)\n seg_map = out[\"out_map\"].to(\"cpu\").numpy()\n\n return seg_map\n", "path": "demo/backend/pytorch.py"}]}
1,932
605
gh_patches_debug_26097
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-3709
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> eks update action https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html </issue> <code> [start of c7n/resources/eks.py] 1 # Copyright 2018 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from __future__ import absolute_import, division, print_function, unicode_literals 15 16 from c7n.actions import Action 17 from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter, VpcFilter 18 from c7n.manager import resources 19 from c7n.query import QueryResourceManager 20 from c7n.utils import local_session, type_schema 21 22 23 @resources.register('eks') 24 class EKS(QueryResourceManager): 25 26 class resource_type(object): 27 service = 'eks' 28 enum_spec = ('list_clusters', 'clusters', None) 29 arn = 'arn' 30 detail_spec = ('describe_cluster', 'name', None, 'cluster') 31 id = name = 'name' 32 date = 'createdAt' 33 dimension = None 34 filter_name = None 35 36 37 @EKS.filter_registry.register('subnet') 38 class EKSSubnetFilter(SubnetFilter): 39 40 RelatedIdsExpression = "resourcesVpcConfig.subnetIds[]" 41 42 43 @EKS.filter_registry.register('security-group') 44 class EKSSGFilter(SecurityGroupFilter): 45 46 RelatedIdsExpression = "resourcesVpcConfig.securityGroupIds[]" 47 48 49 @EKS.filter_registry.register('vpc') 50 class EKSVpcFilter(VpcFilter): 51 52 RelatedIdsExpression = 'resourcesVpcConfig.vpcId' 53 54 55 @EKS.action_registry.register('delete') 56 class Delete(Action): 57 58 schema = type_schema('delete') 59 permissions = ('eks:DeleteCluster',) 60 61 def process(self, resources): 62 client = local_session(self.manager.session_factory).client('eks') 63 for r in resources: 64 try: 65 client.delete_cluster(name=r['name']) 66 except client.exceptions.ResourceNotFoundException: 67 continue 68 [end of c7n/resources/eks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/c7n/resources/eks.py b/c7n/resources/eks.py --- a/c7n/resources/eks.py +++ b/c7n/resources/eks.py @@ -19,6 +19,8 @@ from c7n.query import QueryResourceManager from c7n.utils import local_session, type_schema +from .aws import shape_validate + @resources.register('eks') class EKS(QueryResourceManager): @@ -52,6 +54,37 @@ RelatedIdsExpression = 'resourcesVpcConfig.vpcId' [email protected]_registry.register('update-config') +class UpdateConfig(Action): + + schema = type_schema( + 'update-config', resourcesVpcConfig={'type': 'object'}, + required=('resourcesVpcConfig',)) + permissions = ('eks:UpdateClusterConfig',) + shape = 'UpdateClusterConfigRequest' + + def validate(self): + cfg = dict(self.data) + cfg['name'] = 'validate' + cfg.pop('type') + return shape_validate( + cfg, self.shape, self.manager.resource_type.service) + + def process(self, resources): + client = local_session(self.manager.session_factory).client('eks') + state_filtered = 0 + for r in resources: + if r['status'] != 'ACTIVE': + state_filtered += 1 + continue + client.update_cluster_config( + name=r['name'], + resourcesVpcConfig=self.data['resourcesVpcConfig']) + if state_filtered: + self.log.warning( + "Filtered %d of %d clusters due to state", state_filtered, len(resources)) + + @EKS.action_registry.register('delete') class Delete(Action):
{"golden_diff": "diff --git a/c7n/resources/eks.py b/c7n/resources/eks.py\n--- a/c7n/resources/eks.py\n+++ b/c7n/resources/eks.py\n@@ -19,6 +19,8 @@\n from c7n.query import QueryResourceManager\n from c7n.utils import local_session, type_schema\n \n+from .aws import shape_validate\n+\n \n @resources.register('eks')\n class EKS(QueryResourceManager):\n@@ -52,6 +54,37 @@\n RelatedIdsExpression = 'resourcesVpcConfig.vpcId'\n \n \[email protected]_registry.register('update-config')\n+class UpdateConfig(Action):\n+\n+ schema = type_schema(\n+ 'update-config', resourcesVpcConfig={'type': 'object'},\n+ required=('resourcesVpcConfig',))\n+ permissions = ('eks:UpdateClusterConfig',)\n+ shape = 'UpdateClusterConfigRequest'\n+\n+ def validate(self):\n+ cfg = dict(self.data)\n+ cfg['name'] = 'validate'\n+ cfg.pop('type')\n+ return shape_validate(\n+ cfg, self.shape, self.manager.resource_type.service)\n+\n+ def process(self, resources):\n+ client = local_session(self.manager.session_factory).client('eks')\n+ state_filtered = 0\n+ for r in resources:\n+ if r['status'] != 'ACTIVE':\n+ state_filtered += 1\n+ continue\n+ client.update_cluster_config(\n+ name=r['name'],\n+ resourcesVpcConfig=self.data['resourcesVpcConfig'])\n+ if state_filtered:\n+ self.log.warning(\n+ \"Filtered %d of %d clusters due to state\", state_filtered, len(resources))\n+\n+\n @EKS.action_registry.register('delete')\n class Delete(Action):\n", "issue": "eks update action\nhttps://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.actions import Action\nfrom c7n.filters.vpc import SecurityGroupFilter, SubnetFilter, VpcFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema\n\n\[email protected]('eks')\nclass EKS(QueryResourceManager):\n\n class resource_type(object):\n service = 'eks'\n enum_spec = ('list_clusters', 'clusters', None)\n arn = 'arn'\n detail_spec = ('describe_cluster', 'name', None, 'cluster')\n id = name = 'name'\n date = 'createdAt'\n dimension = None\n filter_name = None\n\n\[email protected]_registry.register('subnet')\nclass EKSSubnetFilter(SubnetFilter):\n\n RelatedIdsExpression = \"resourcesVpcConfig.subnetIds[]\"\n\n\[email protected]_registry.register('security-group')\nclass EKSSGFilter(SecurityGroupFilter):\n\n RelatedIdsExpression = \"resourcesVpcConfig.securityGroupIds[]\"\n\n\[email protected]_registry.register('vpc')\nclass EKSVpcFilter(VpcFilter):\n\n RelatedIdsExpression = 'resourcesVpcConfig.vpcId'\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('eks:DeleteCluster',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('eks')\n for r in resources:\n try:\n client.delete_cluster(name=r['name'])\n except client.exceptions.ResourceNotFoundException:\n continue\n", "path": "c7n/resources/eks.py"}]}
1,179
384
gh_patches_debug_36103
rasdani/github-patches
git_diff
bokeh__bokeh-6504
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add synthetic data mode to spectrogram The pyaudio package is not available on all platforms. An option mode to generate synthetic dat would allow the app to be run everywhere in some capacity. @philippjfr I think you might already have some code that could be re-used? </issue> <code> [start of examples/app/spectrogram/audio.py] 1 from __future__ import print_function 2 3 try: 4 import pyaudio 5 except: 6 print("This demo requires pyaudio installed to function") 7 import sys 8 sys.exit(1) 9 10 import numpy as np 11 import scipy as sp 12 from scipy.integrate import simps 13 14 NUM_SAMPLES = 1024 15 SAMPLING_RATE = 44100 16 MAX_FREQ = SAMPLING_RATE / 2 17 FREQ_SAMPLES = NUM_SAMPLES / 8 18 TIMESLICE = 100 # ms 19 NUM_BINS = 16 20 21 data = {'values': None} 22 23 def _get_audio_data(): 24 pa = pyaudio.PyAudio() 25 stream = pa.open( 26 format=pyaudio.paInt16, 27 channels=1, 28 rate=SAMPLING_RATE, 29 input=True, 30 frames_per_buffer=NUM_SAMPLES 31 ) 32 33 while True: 34 try: 35 raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16) 36 signal = raw_data / 32768.0 37 fft = sp.fft(signal) 38 spectrum = abs(fft)[:NUM_SAMPLES/2] 39 power = spectrum**2 40 bins = simps(np.split(power, NUM_BINS)) 41 data['values'] = signal, spectrum, bins 42 except: 43 continue 44 [end of examples/app/spectrogram/audio.py] [start of examples/app/spectrogram/server_lifecycle.py] 1 from threading import Thread 2 3 import audio 4 5 def on_server_loaded(server_context): 6 t = Thread(target=audio._get_audio_data, args=()) 7 t.setDaemon(True) 8 t.start() 9 [end of examples/app/spectrogram/server_lifecycle.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/app/spectrogram/audio.py b/examples/app/spectrogram/audio.py --- a/examples/app/spectrogram/audio.py +++ b/examples/app/spectrogram/audio.py @@ -1,11 +1,6 @@ from __future__ import print_function -try: - import pyaudio -except: - print("This demo requires pyaudio installed to function") - import sys - sys.exit(1) +from time import sleep import numpy as np import scipy as sp @@ -20,24 +15,51 @@ data = {'values': None} -def _get_audio_data(): - pa = pyaudio.PyAudio() - stream = pa.open( - format=pyaudio.paInt16, - channels=1, - rate=SAMPLING_RATE, - input=True, - frames_per_buffer=NUM_SAMPLES - ) - - while True: - try: - raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16) - signal = raw_data / 32768.0 +try: + import pyaudio + + def update_audio_data(): + pa = pyaudio.PyAudio() + stream = pa.open( + format=pyaudio.paInt16, + channels=1, + rate=SAMPLING_RATE, + input=True, + frames_per_buffer=NUM_SAMPLES + ) + + while True: + try: + raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16) + signal = raw_data / 32768.0 + fft = sp.fft(signal) + spectrum = abs(fft)[:int(NUM_SAMPLES/2)] + power = spectrum**2 + bins = simps(np.split(power, NUM_BINS)) + data['values'] = signal, spectrum, bins + except: + continue + +except: + print() + print(" *** Pyaudio package not installed, using synthesized audio data ***") + print() + + # These are basically picked out of a hat to show something vaguely interesting + _t = np.arange(0, NUM_SAMPLES/SAMPLING_RATE, 1.0/SAMPLING_RATE) + _f = 2000 + 3000*(2+np.sin(4*np.linspace(0, 2*np.pi, 500))) + _i = 0 + + def update_audio_data(): + while True: + global _i + A = 0.3 + 0.05 * np.random.random() + signal = A*np.sin(2*np.pi*_f[_i]*_t + np.sin(2*np.pi*200*_t)) + fft = sp.fft(signal) - spectrum = abs(fft)[:NUM_SAMPLES/2] + spectrum = abs(fft)[:int(NUM_SAMPLES/2)] power = spectrum**2 bins = simps(np.split(power, NUM_BINS)) data['values'] = signal, spectrum, bins - except: - continue + _i = (_i + 1) % len(_f) + sleep(1.0/12) diff --git a/examples/app/spectrogram/server_lifecycle.py b/examples/app/spectrogram/server_lifecycle.py --- a/examples/app/spectrogram/server_lifecycle.py +++ b/examples/app/spectrogram/server_lifecycle.py @@ -3,6 +3,6 @@ import audio def on_server_loaded(server_context): - t = Thread(target=audio._get_audio_data, args=()) + t = Thread(target=audio.update_audio_data, args=()) t.setDaemon(True) t.start()
{"golden_diff": "diff --git a/examples/app/spectrogram/audio.py b/examples/app/spectrogram/audio.py\n--- a/examples/app/spectrogram/audio.py\n+++ b/examples/app/spectrogram/audio.py\n@@ -1,11 +1,6 @@\n from __future__ import print_function\n \n-try:\n- import pyaudio\n-except:\n- print(\"This demo requires pyaudio installed to function\")\n- import sys\n- sys.exit(1)\n+from time import sleep\n \n import numpy as np\n import scipy as sp\n@@ -20,24 +15,51 @@\n \n data = {'values': None}\n \n-def _get_audio_data():\n- pa = pyaudio.PyAudio()\n- stream = pa.open(\n- format=pyaudio.paInt16,\n- channels=1,\n- rate=SAMPLING_RATE,\n- input=True,\n- frames_per_buffer=NUM_SAMPLES\n- )\n-\n- while True:\n- try:\n- raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)\n- signal = raw_data / 32768.0\n+try:\n+ import pyaudio\n+\n+ def update_audio_data():\n+ pa = pyaudio.PyAudio()\n+ stream = pa.open(\n+ format=pyaudio.paInt16,\n+ channels=1,\n+ rate=SAMPLING_RATE,\n+ input=True,\n+ frames_per_buffer=NUM_SAMPLES\n+ )\n+\n+ while True:\n+ try:\n+ raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)\n+ signal = raw_data / 32768.0\n+ fft = sp.fft(signal)\n+ spectrum = abs(fft)[:int(NUM_SAMPLES/2)]\n+ power = spectrum**2\n+ bins = simps(np.split(power, NUM_BINS))\n+ data['values'] = signal, spectrum, bins\n+ except:\n+ continue\n+\n+except:\n+ print()\n+ print(\" *** Pyaudio package not installed, using synthesized audio data ***\")\n+ print()\n+\n+ # These are basically picked out of a hat to show something vaguely interesting\n+ _t = np.arange(0, NUM_SAMPLES/SAMPLING_RATE, 1.0/SAMPLING_RATE)\n+ _f = 2000 + 3000*(2+np.sin(4*np.linspace(0, 2*np.pi, 500)))\n+ _i = 0\n+\n+ def update_audio_data():\n+ while True:\n+ global _i\n+ A = 0.3 + 0.05 * np.random.random()\n+ signal = A*np.sin(2*np.pi*_f[_i]*_t + np.sin(2*np.pi*200*_t))\n+\n fft = sp.fft(signal)\n- spectrum = abs(fft)[:NUM_SAMPLES/2]\n+ spectrum = abs(fft)[:int(NUM_SAMPLES/2)]\n power = spectrum**2\n bins = simps(np.split(power, NUM_BINS))\n data['values'] = signal, spectrum, bins\n- except:\n- continue\n+ _i = (_i + 1) % len(_f)\n+ sleep(1.0/12)\ndiff --git a/examples/app/spectrogram/server_lifecycle.py b/examples/app/spectrogram/server_lifecycle.py\n--- a/examples/app/spectrogram/server_lifecycle.py\n+++ b/examples/app/spectrogram/server_lifecycle.py\n@@ -3,6 +3,6 @@\n import audio\n \n def on_server_loaded(server_context):\n- t = Thread(target=audio._get_audio_data, args=())\n+ t = Thread(target=audio.update_audio_data, args=())\n t.setDaemon(True)\n t.start()\n", "issue": "Add synthetic data mode to spectrogram\nThe pyaudio package is not available on all platforms. An option mode to generate synthetic dat would allow the app to be run everywhere in some capacity. \r\n\r\n@philippjfr I think you might already have some code that could be re-used?\n", "before_files": [{"content": "from __future__ import print_function\n\ntry:\n import pyaudio\nexcept:\n print(\"This demo requires pyaudio installed to function\")\n import sys\n sys.exit(1)\n\nimport numpy as np\nimport scipy as sp\nfrom scipy.integrate import simps\n\nNUM_SAMPLES = 1024\nSAMPLING_RATE = 44100\nMAX_FREQ = SAMPLING_RATE / 2\nFREQ_SAMPLES = NUM_SAMPLES / 8\nTIMESLICE = 100 # ms\nNUM_BINS = 16\n\ndata = {'values': None}\n\ndef _get_audio_data():\n pa = pyaudio.PyAudio()\n stream = pa.open(\n format=pyaudio.paInt16,\n channels=1,\n rate=SAMPLING_RATE,\n input=True,\n frames_per_buffer=NUM_SAMPLES\n )\n\n while True:\n try:\n raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)\n signal = raw_data / 32768.0\n fft = sp.fft(signal)\n spectrum = abs(fft)[:NUM_SAMPLES/2]\n power = spectrum**2\n bins = simps(np.split(power, NUM_BINS))\n data['values'] = signal, spectrum, bins\n except:\n continue\n", "path": "examples/app/spectrogram/audio.py"}, {"content": "from threading import Thread\n\nimport audio\n\ndef on_server_loaded(server_context):\n t = Thread(target=audio._get_audio_data, args=())\n t.setDaemon(True)\n t.start()\n", "path": "examples/app/spectrogram/server_lifecycle.py"}]}
1,040
834
gh_patches_debug_17638
rasdani/github-patches
git_diff
joke2k__faker-283
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ReadTheDocs spacing is off for providers http://fake-factory.readthedocs.org/en/latest/providers/internet.html The spacing makes this pretty unreadable without constantly scrolling back and forth, which is annoying. Since these are the official extended docs linked to in the project ReadMe, it'd certainly be aesthetically nicer if they used native RTD formatting, for example see the formatting for the reference page of the factory boy project: http://factoryboy.readthedocs.org/en/latest/reference.html I dug around a little trying to make a pull request to fix this, but it looks like Faker's docs are auto-generated somehow and I didn't have time to figure it all out. If you point me in the right direction, I'd be happy to submit a PR. </issue> <code> [start of faker/build_docs.py] 1 # coding=utf-8 2 3 from __future__ import print_function 4 from __future__ import unicode_literals 5 6 import os 7 import sys 8 9 10 if sys.version < '3': 11 text_type = unicode 12 binary_type = str 13 else: 14 text_type = str 15 binary_type = bytes 16 17 18 DOCS_ROOT = os.path.abspath(os.path.join('..', 'docs')) 19 20 21 def write(fh, s): 22 return fh.write(s.encode('utf-8')) 23 24 25 def write_provider(fh, doc, provider, formatters, excludes=None): 26 27 if excludes is None: 28 excludes = [] 29 30 write(fh, '\n') 31 title = "``{0}``".format(doc.get_provider_name(provider)) 32 write(fh, '%s\n' % title) 33 write(fh, "-" * len(title)) 34 write(fh, '\n\n::\n') 35 36 for signature, example in formatters.items(): 37 if signature in excludes: 38 continue 39 try: 40 lines = text_type(example).expandtabs().splitlines() 41 except UnicodeEncodeError: 42 msg = 'error on "{0}" with value "{1}"'.format(signature, example) 43 raise Exception(msg) 44 margin = max(30, doc.max_name_len+1) 45 remains = 150 - margin 46 separator = '#' 47 write(fh, '\n') 48 for line in lines: 49 for i in range(0, (len(line) // remains) + 1): 50 write(fh, "\t{fake:<{margin}}{separator} {example}".format( 51 fake=signature, 52 separator=separator, 53 example=line[i*remains:(i+1)*remains], 54 margin=margin 55 )) 56 signature = separator = ' ' 57 write(fh, '\n') 58 59 60 def write_docs(*args, **kwargs): 61 from faker import Faker, documentor 62 from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES 63 64 fake = Faker(locale=DEFAULT_LOCALE) 65 66 from faker.providers import BaseProvider 67 base_provider_formatters = [f for f in dir(BaseProvider)] 68 69 doc = documentor.Documentor(fake) 70 71 formatters = doc.get_formatters(with_args=True, with_defaults=True) 72 73 for provider, fakers in formatters: 74 provider_name = doc.get_provider_name(provider) 75 fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name) 76 with open(fname, 'wb') as fh: 77 write_provider(fh, doc, provider, fakers) 78 79 with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh: 80 write(fh, 'Providers\n') 81 write(fh, '=========\n') 82 write(fh, '.. toctree::\n') 83 write(fh, ' :maxdepth: 2\n\n') 84 [write(fh, ' providers/%s\n' % doc.get_provider_name(provider)) 85 for provider, fakers in formatters] 86 87 AVAILABLE_LOCALES = list(AVAILABLE_LOCALES) 88 AVAILABLE_LOCALES.sort() 89 for lang in AVAILABLE_LOCALES: 90 fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang) 91 with open(fname, 'wb') as fh: 92 write(fh, '\n') 93 title = 'Language {0}\n'.format(lang) 94 write(fh, title) 95 write(fh, '=' * len(title)) 96 write(fh, '\n') 97 fake = Faker(locale=lang) 98 d = documentor.Documentor(fake) 99 100 for p, fs in d.get_formatters(with_args=True, with_defaults=True, 101 locale=lang, 102 excludes=base_provider_formatters): 103 write_provider(fh, d, p, fs) 104 105 with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh: 106 write(fh, 'Locales\n') 107 write(fh, '=======\n') 108 write(fh, '.. toctree::\n') 109 write(fh, ' :maxdepth: 2\n\n') 110 [write(fh, ' locales/%s\n' % lang) for lang in AVAILABLE_LOCALES] 111 112 113 # wrappers for sphinx 114 def _main(app, *args, **kwargs): 115 return write_docs(*args, **kwargs) 116 117 118 def setup(app): 119 app.connect(str('builder-inited'), _main) 120 121 122 if __name__ == "__main__": 123 write_docs(*sys.argv[1:]) 124 [end of faker/build_docs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/build_docs.py b/faker/build_docs.py --- a/faker/build_docs.py +++ b/faker/build_docs.py @@ -41,20 +41,11 @@ except UnicodeEncodeError: msg = 'error on "{0}" with value "{1}"'.format(signature, example) raise Exception(msg) - margin = max(30, doc.max_name_len+1) - remains = 150 - margin - separator = '#' write(fh, '\n') - for line in lines: - for i in range(0, (len(line) // remains) + 1): - write(fh, "\t{fake:<{margin}}{separator} {example}".format( - fake=signature, - separator=separator, - example=line[i*remains:(i+1)*remains], - margin=margin - )) - signature = separator = ' ' - write(fh, '\n') + write(fh, "\t{fake}\n{example}\n".format( + fake=signature, + example='\n'.join(['\t# ' + line for line in lines]), + )) def write_docs(*args, **kwargs):
{"golden_diff": "diff --git a/faker/build_docs.py b/faker/build_docs.py\n--- a/faker/build_docs.py\n+++ b/faker/build_docs.py\n@@ -41,20 +41,11 @@\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n- margin = max(30, doc.max_name_len+1)\n- remains = 150 - margin\n- separator = '#'\n write(fh, '\\n')\n- for line in lines:\n- for i in range(0, (len(line) // remains) + 1):\n- write(fh, \"\\t{fake:<{margin}}{separator} {example}\".format(\n- fake=signature,\n- separator=separator,\n- example=line[i*remains:(i+1)*remains],\n- margin=margin\n- ))\n- signature = separator = ' '\n- write(fh, '\\n')\n+ write(fh, \"\\t{fake}\\n{example}\\n\".format(\n+ fake=signature,\n+ example='\\n'.join(['\\t# ' + line for line in lines]),\n+ ))\n \n \n def write_docs(*args, **kwargs):\n", "issue": "ReadTheDocs spacing is off for providers\nhttp://fake-factory.readthedocs.org/en/latest/providers/internet.html\n\nThe spacing makes this pretty unreadable without constantly scrolling back and forth, which is annoying. \n\nSince these are the official extended docs linked to in the project ReadMe, it'd certainly be aesthetically nicer if they used native RTD formatting, for example see the formatting for the reference page of the factory boy project: http://factoryboy.readthedocs.org/en/latest/reference.html\n\nI dug around a little trying to make a pull request to fix this, but it looks like Faker's docs are auto-generated somehow and I didn't have time to figure it all out. If you point me in the right direction, I'd be happy to submit a PR. \n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\n\n\nif sys.version < '3':\n text_type = unicode\n binary_type = str\nelse:\n text_type = str\n binary_type = bytes\n\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n lines = text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n margin = max(30, doc.max_name_len+1)\n remains = 150 - margin\n separator = '#'\n write(fh, '\\n')\n for line in lines:\n for i in range(0, (len(line) // remains) + 1):\n write(fh, \"\\t{fake:<{margin}}{separator} {example}\".format(\n fake=signature,\n separator=separator,\n example=line[i*remains:(i+1)*remains],\n margin=margin\n ))\n signature = separator = ' '\n write(fh, '\\n')\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write_provider(fh, doc, provider, fakers)\n\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n AVAILABLE_LOCALES = list(AVAILABLE_LOCALES)\n AVAILABLE_LOCALES.sort()\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, '\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}]}
1,921
275
gh_patches_debug_42117
rasdani/github-patches
git_diff
3cn-ecn__nantralPlatform-23
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Separer les differentes actions d'edition d'un groupe en plusieurs pages Il faudrait diviser les actions des pages d'edition en plusieurs sous pages - [ ] Convertir les pages pour utiliser le template update base - [ ] Mettre les membres dans une nouvelle page </issue> <code> [start of server/apps/group/views.py] 1 from django.shortcuts import redirect, render 2 from django.views.generic import DetailView, UpdateView, ListView, View 3 from .models import Club, Group, NamedMembership 4 from .forms import NamedMembershipClubFormset 5 from django.contrib import messages 6 from django.contrib.auth.decorators import login_required 7 from django.views.decorators.http import require_http_methods 8 from django.contrib.auth.mixins import UserPassesTestMixin 9 10 from apps.student.models import Student 11 from apps.event.models import Event 12 13 from apps.event.forms import EventGroupFormSet 14 15 class ListClubView(ListView): 16 model = Club 17 template_name = 'group/club_list.html' 18 19 20 class UpdateClubView(UpdateView): 21 model = Club 22 template_name = 'group/club_update.html' 23 fields = ['description', 'admins', 'logo'] 24 def get_context_data(self, **kwargs): 25 context = super().get_context_data(**kwargs) 26 memberships = NamedMembership.objects.filter(group=self.object) 27 membersForm = NamedMembershipClubFormset(queryset=memberships) 28 context['members'] = membersForm 29 return context 30 31 class UpdateClubEventsView(UserPassesTestMixin, View): 32 template_name = 'group/club_events_update.html' 33 def test_func(self): 34 group = Group.get_group_by_slug(self.kwargs['group_slug']) 35 return group.is_admin(self.request.user) 36 def get_context_data(self, **kwargs): 37 context = {} 38 context['object'] = Group.get_group_by_slug(kwargs['group_slug']) 39 context['events'] = Event.objects.filter(group=kwargs['group_slug']) 40 context['form'] = EventGroupFormSet(queryset=context['events']) 41 return context 42 43 def get(self, request, group_slug): 44 return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug)) 45 46 def post(self, request, group_slug): 47 return edit_events(request, group_slug) 48 49 def delete(self, request, group_slug, event_id): 50 print('Hello') 51 event = Event.objects.delete(group=group_slug, id=event_id) 52 return redirect('group:update-events') 53 54 class DetailClubView(DetailView): 55 model = Club 56 template_name = 'group/club_detail.html' 57 def get_context_data(self, **kwargs): 58 context = super().get_context_data(**kwargs) 59 members = NamedMembership.objects.filter(group=self.object) 60 context['members'] = members 61 return context 62 63 @login_required 64 def add_member(request, group_slug, student_id): 65 """Add a user to a club""" 66 group = Group.get_group_by_slug(group_slug) 67 student = Student.objects.get(id=student_id) 68 if isinstance(Club, group): 69 NamedMembership.objects.create(student=student, group=group) 70 71 72 @require_http_methods(['POST']) 73 @login_required 74 def edit_named_memberships(request, pk): 75 club = Club.objects.get(pk=pk) 76 form = NamedMembershipClubFormset(request.POST) 77 if form.is_valid(): 78 members = form.save(commit=False) 79 for member in members: 80 member.group = club 81 member.save() 82 for member in form.deleted_objects: 83 member.delete() 84 messages.success(request, 'Membres modifies') 85 return redirect('group:update', pk) 86 else: 87 messages.warning(request, form.errors) 88 return redirect('group:update', pk) 89 90 91 @login_required 92 def edit_events(request, group_slug): 93 group = Group.get_group_by_slug(group_slug) 94 form = EventGroupFormSet(request.POST) 95 if form.is_valid(): 96 events = form.save(commit=False) 97 # Link each event to the group 98 for event in events: 99 event.group = group.slug 100 event.save() 101 # Delete missing events 102 for event in form.deleted_objects: 103 event.delete() 104 messages.success(request, 'Events modifies') 105 return redirect('group:update-events', group_slug) 106 else: 107 messages.warning(request, form.errors) 108 return redirect('group:update-events', group_slug) 109 [end of server/apps/group/views.py] [start of server/apps/group/urls.py] 1 from django.conf.urls import url 2 from django.urls import path 3 4 from .views import * 5 6 app_name = 'group' 7 8 urlpatterns = [ 9 path('<slug:pk>/', DetailClubView.as_view(), name='detail'), 10 path('<slug:pk>/edit', UpdateClubView.as_view(), name='update'), 11 path('<slug:group_slug>/member/add/<slug:user_id>', add_member, name='add-member'), 12 path('<slug:pk>/members/edit', edit_named_memberships, name='editNamedMemberships'), 13 path('', ListClubView.as_view(), name='list'), 14 path('<slug:group_slug>/events/edit', UpdateClubEventsView.as_view(), name='update-events') 15 ] [end of server/apps/group/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/server/apps/group/urls.py b/server/apps/group/urls.py --- a/server/apps/group/urls.py +++ b/server/apps/group/urls.py @@ -9,7 +9,7 @@ path('<slug:pk>/', DetailClubView.as_view(), name='detail'), path('<slug:pk>/edit', UpdateClubView.as_view(), name='update'), path('<slug:group_slug>/member/add/<slug:user_id>', add_member, name='add-member'), - path('<slug:pk>/members/edit', edit_named_memberships, name='editNamedMemberships'), path('', ListClubView.as_view(), name='list'), - path('<slug:group_slug>/events/edit', UpdateClubEventsView.as_view(), name='update-events') + path('<slug:group_slug>/events/edit', UpdateGroupEventsView.as_view(), name='update-events'), + path('<slug:group_slug>/members/edit', UpdateGroupMembersView.as_view(), name='update-members') ] \ No newline at end of file diff --git a/server/apps/group/views.py b/server/apps/group/views.py --- a/server/apps/group/views.py +++ b/server/apps/group/views.py @@ -21,14 +21,8 @@ model = Club template_name = 'group/club_update.html' fields = ['description', 'admins', 'logo'] - def get_context_data(self, **kwargs): - context = super().get_context_data(**kwargs) - memberships = NamedMembership.objects.filter(group=self.object) - membersForm = NamedMembershipClubFormset(queryset=memberships) - context['members'] = membersForm - return context -class UpdateClubEventsView(UserPassesTestMixin, View): +class UpdateGroupEventsView(UserPassesTestMixin, View): template_name = 'group/club_events_update.html' def test_func(self): group = Group.get_group_by_slug(self.kwargs['group_slug']) @@ -46,10 +40,29 @@ def post(self, request, group_slug): return edit_events(request, group_slug) - def delete(self, request, group_slug, event_id): - print('Hello') - event = Event.objects.delete(group=group_slug, id=event_id) - return redirect('group:update-events') + +class UpdateGroupMembersView(UserPassesTestMixin, View): + template_name = 'group/club_members_update.html' + def test_func(self): + group = Group.get_group_by_slug(self.kwargs['group_slug']) + return group.is_admin(self.request.user) + + def get_context_data(self, **kwargs): + context = {} + context['object'] = Group.get_group_by_slug(kwargs['group_slug']) + if isinstance(context['object'], Club): + memberships = NamedMembership.objects.filter(group=context['object']) + membersForm = NamedMembershipClubFormset(queryset=memberships) + context['members'] = membersForm + return context + + def get(self, request, group_slug): + return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug)) + + def post(self, request, group_slug): + return edit_named_memberships(request, group_slug) + + class DetailClubView(DetailView): model = Club @@ -71,8 +84,8 @@ @require_http_methods(['POST']) @login_required -def edit_named_memberships(request, pk): - club = Club.objects.get(pk=pk) +def edit_named_memberships(request, group_slug): + club = Club.objects.filter(slug=group_slug).first() form = NamedMembershipClubFormset(request.POST) if form.is_valid(): members = form.save(commit=False) @@ -82,10 +95,10 @@ for member in form.deleted_objects: member.delete() messages.success(request, 'Membres modifies') - return redirect('group:update', pk) + return redirect('group:update', club.id) else: messages.warning(request, form.errors) - return redirect('group:update', pk) + return redirect('group:update', club.id) @login_required
{"golden_diff": "diff --git a/server/apps/group/urls.py b/server/apps/group/urls.py\n--- a/server/apps/group/urls.py\n+++ b/server/apps/group/urls.py\n@@ -9,7 +9,7 @@\n path('<slug:pk>/', DetailClubView.as_view(), name='detail'),\n path('<slug:pk>/edit', UpdateClubView.as_view(), name='update'),\n path('<slug:group_slug>/member/add/<slug:user_id>', add_member, name='add-member'),\n- path('<slug:pk>/members/edit', edit_named_memberships, name='editNamedMemberships'),\n path('', ListClubView.as_view(), name='list'),\n- path('<slug:group_slug>/events/edit', UpdateClubEventsView.as_view(), name='update-events')\n+ path('<slug:group_slug>/events/edit', UpdateGroupEventsView.as_view(), name='update-events'),\n+ path('<slug:group_slug>/members/edit', UpdateGroupMembersView.as_view(), name='update-members')\n ]\n\\ No newline at end of file\ndiff --git a/server/apps/group/views.py b/server/apps/group/views.py\n--- a/server/apps/group/views.py\n+++ b/server/apps/group/views.py\n@@ -21,14 +21,8 @@\n model = Club\n template_name = 'group/club_update.html'\n fields = ['description', 'admins', 'logo']\n- def get_context_data(self, **kwargs):\n- context = super().get_context_data(**kwargs)\n- memberships = NamedMembership.objects.filter(group=self.object)\n- membersForm = NamedMembershipClubFormset(queryset=memberships)\n- context['members'] = membersForm\n- return context\n \n-class UpdateClubEventsView(UserPassesTestMixin, View):\n+class UpdateGroupEventsView(UserPassesTestMixin, View):\n template_name = 'group/club_events_update.html'\n def test_func(self):\n group = Group.get_group_by_slug(self.kwargs['group_slug'])\n@@ -46,10 +40,29 @@\n def post(self, request, group_slug):\n return edit_events(request, group_slug)\n \n- def delete(self, request, group_slug, event_id):\n- print('Hello')\n- event = Event.objects.delete(group=group_slug, id=event_id)\n- return redirect('group:update-events')\n+\n+class UpdateGroupMembersView(UserPassesTestMixin, View):\n+ template_name = 'group/club_members_update.html'\n+ def test_func(self):\n+ group = Group.get_group_by_slug(self.kwargs['group_slug'])\n+ return group.is_admin(self.request.user)\n+\n+ def get_context_data(self, **kwargs):\n+ context = {}\n+ context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n+ if isinstance(context['object'], Club):\n+ memberships = NamedMembership.objects.filter(group=context['object'])\n+ membersForm = NamedMembershipClubFormset(queryset=memberships)\n+ context['members'] = membersForm\n+ return context\n+\n+ def get(self, request, group_slug):\n+ return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))\n+ \n+ def post(self, request, group_slug):\n+ return edit_named_memberships(request, group_slug)\n+\n+\n \n class DetailClubView(DetailView):\n model = Club\n@@ -71,8 +84,8 @@\n \n @require_http_methods(['POST'])\n @login_required\n-def edit_named_memberships(request, pk):\n- club = Club.objects.get(pk=pk)\n+def edit_named_memberships(request, group_slug):\n+ club = Club.objects.filter(slug=group_slug).first()\n form = NamedMembershipClubFormset(request.POST)\n if form.is_valid():\n members = form.save(commit=False)\n@@ -82,10 +95,10 @@\n for member in form.deleted_objects:\n member.delete()\n messages.success(request, 'Membres modifies')\n- return redirect('group:update', pk)\n+ return redirect('group:update', club.id)\n else:\n messages.warning(request, form.errors)\n- return redirect('group:update', pk)\n+ return redirect('group:update', club.id)\n \n \n @login_required\n", "issue": "Separer les differentes actions d'edition d'un groupe en plusieurs pages\nIl faudrait diviser les actions des pages d'edition en plusieurs sous pages\r\n\r\n- [ ] Convertir les pages pour utiliser le template update base\r\n- [ ] Mettre les membres dans une nouvelle page\n", "before_files": [{"content": "from django.shortcuts import redirect, render\nfrom django.views.generic import DetailView, UpdateView, ListView, View\nfrom .models import Club, Group, NamedMembership\nfrom .forms import NamedMembershipClubFormset\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_http_methods\nfrom django.contrib.auth.mixins import UserPassesTestMixin\n\nfrom apps.student.models import Student\nfrom apps.event.models import Event\n\nfrom apps.event.forms import EventGroupFormSet\n\nclass ListClubView(ListView):\n model = Club\n template_name = 'group/club_list.html'\n\n\nclass UpdateClubView(UpdateView):\n model = Club\n template_name = 'group/club_update.html'\n fields = ['description', 'admins', 'logo']\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n memberships = NamedMembership.objects.filter(group=self.object)\n membersForm = NamedMembershipClubFormset(queryset=memberships)\n context['members'] = membersForm\n return context\n\nclass UpdateClubEventsView(UserPassesTestMixin, View):\n template_name = 'group/club_events_update.html'\n def test_func(self):\n group = Group.get_group_by_slug(self.kwargs['group_slug'])\n return group.is_admin(self.request.user)\n def get_context_data(self, **kwargs):\n context = {}\n context['object'] = Group.get_group_by_slug(kwargs['group_slug'])\n context['events'] = Event.objects.filter(group=kwargs['group_slug'])\n context['form'] = EventGroupFormSet(queryset=context['events'])\n return context\n\n def get(self, request, group_slug):\n return render(request, self.template_name, context=self.get_context_data(group_slug=group_slug))\n\n def post(self, request, group_slug):\n return edit_events(request, group_slug)\n\n def delete(self, request, group_slug, event_id):\n print('Hello')\n event = Event.objects.delete(group=group_slug, id=event_id)\n return redirect('group:update-events')\n\nclass DetailClubView(DetailView):\n model = Club\n template_name = 'group/club_detail.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n members = NamedMembership.objects.filter(group=self.object)\n context['members'] = members\n return context\n\n@login_required\ndef add_member(request, group_slug, student_id):\n \"\"\"Add a user to a club\"\"\"\n group = Group.get_group_by_slug(group_slug)\n student = Student.objects.get(id=student_id)\n if isinstance(Club, group):\n NamedMembership.objects.create(student=student, group=group)\n\n\n@require_http_methods(['POST'])\n@login_required\ndef edit_named_memberships(request, pk):\n club = Club.objects.get(pk=pk)\n form = NamedMembershipClubFormset(request.POST)\n if form.is_valid():\n members = form.save(commit=False)\n for member in members:\n member.group = club\n member.save()\n for member in form.deleted_objects:\n member.delete()\n messages.success(request, 'Membres modifies')\n return redirect('group:update', pk)\n else:\n messages.warning(request, form.errors)\n return redirect('group:update', pk)\n\n\n@login_required\ndef edit_events(request, group_slug):\n group = Group.get_group_by_slug(group_slug)\n form = EventGroupFormSet(request.POST)\n if form.is_valid():\n events = form.save(commit=False)\n # Link each event to the group\n for event in events:\n event.group = group.slug\n event.save()\n # Delete missing events\n for event in form.deleted_objects:\n event.delete()\n messages.success(request, 'Events modifies')\n return redirect('group:update-events', group_slug)\n else:\n messages.warning(request, form.errors)\n return redirect('group:update-events', group_slug)\n", "path": "server/apps/group/views.py"}, {"content": "from django.conf.urls import url\nfrom django.urls import path\n\nfrom .views import *\n\napp_name = 'group'\n\nurlpatterns = [\n path('<slug:pk>/', DetailClubView.as_view(), name='detail'),\n path('<slug:pk>/edit', UpdateClubView.as_view(), name='update'),\n path('<slug:group_slug>/member/add/<slug:user_id>', add_member, name='add-member'),\n path('<slug:pk>/members/edit', edit_named_memberships, name='editNamedMemberships'),\n path('', ListClubView.as_view(), name='list'),\n path('<slug:group_slug>/events/edit', UpdateClubEventsView.as_view(), name='update-events')\n]", "path": "server/apps/group/urls.py"}]}
1,844
910
gh_patches_debug_63306
rasdani/github-patches
git_diff
scikit-hep__pyhf-363
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> consolidation: add simplemodls to __all__ # Description It would be nice if the snippet in the README could be shorter: right now this is needed ``` import pyhf import pyhf.simplemodels pdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0]) CLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf) ``` whereas if we pre-import `simplemodels` it could be ``` import pyhf pdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0]) CLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf) ``` since `simplemodels.py` doesn't add much code, i don't think it would slow down things a lot </issue> <code> [start of pyhf/__init__.py] 1 from .tensor import BackendRetriever as tensor 2 from .optimize import OptimizerRetriever as optimize 3 from .version import __version__ 4 from . import events 5 6 tensorlib = tensor.numpy_backend() 7 default_backend = tensorlib 8 optimizer = optimize.scipy_optimizer() 9 default_optimizer = optimizer 10 11 12 def get_backend(): 13 """ 14 Get the current backend and the associated optimizer 15 16 Example: 17 >>> import pyhf 18 >>> pyhf.get_backend() 19 (<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>, <pyhf.optimize.opt_scipy.scipy_optimizer object at 0x...>) 20 21 Returns: 22 backend, optimizer 23 """ 24 global tensorlib 25 global optimizer 26 return tensorlib, optimizer 27 28 29 @events.register('change_backend') 30 def set_backend(backend, custom_optimizer=None): 31 """ 32 Set the backend and the associated optimizer 33 34 Example: 35 >>> import pyhf 36 >>> import tensorflow as tf 37 >>> pyhf.set_backend(pyhf.tensor.tensorflow_backend(session=tf.Session())) 38 39 Args: 40 backend: One of the supported pyhf backends: NumPy, 41 TensorFlow, PyTorch, and MXNet 42 43 Returns: 44 None 45 """ 46 global tensorlib 47 global optimizer 48 49 # need to determine if the tensorlib changed or the optimizer changed for events 50 tensorlib_changed = bool(backend.name != tensorlib.name) 51 optimizer_changed = False 52 53 if backend.name == 'tensorflow': 54 new_optimizer = ( 55 custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend) 56 ) 57 if tensorlib.name == 'tensorflow': 58 tensorlib_changed |= bool(backend.session != tensorlib.session) 59 elif backend.name == 'pytorch': 60 new_optimizer = ( 61 custom_optimizer 62 if custom_optimizer 63 else optimize.pytorch_optimizer(tensorlib=backend) 64 ) 65 # TODO: Add support for mxnet_optimizer() 66 # elif tensorlib.name == 'mxnet': 67 # new_optimizer = custom_optimizer if custom_optimizer else mxnet_optimizer() 68 else: 69 new_optimizer = ( 70 custom_optimizer if custom_optimizer else optimize.scipy_optimizer() 71 ) 72 73 optimizer_changed = bool(optimizer != new_optimizer) 74 # set new backend 75 tensorlib = backend 76 optimizer = new_optimizer 77 # trigger events 78 if tensorlib_changed: 79 events.trigger("tensorlib_changed")() 80 if optimizer_changed: 81 events.trigger("optimizer_changed")() 82 83 84 from .pdf import Model 85 86 __all__ = ['Model', 'utils', 'modifiers', '__version__'] 87 [end of pyhf/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyhf/__init__.py b/pyhf/__init__.py --- a/pyhf/__init__.py +++ b/pyhf/__init__.py @@ -82,5 +82,6 @@ from .pdf import Model +from . import simplemodels -__all__ = ['Model', 'utils', 'modifiers', '__version__'] +__all__ = ['Model', 'utils', 'modifiers', 'simplemodels', '__version__']
{"golden_diff": "diff --git a/pyhf/__init__.py b/pyhf/__init__.py\n--- a/pyhf/__init__.py\n+++ b/pyhf/__init__.py\n@@ -82,5 +82,6 @@\n \n \n from .pdf import Model\n+from . import simplemodels\n \n-__all__ = ['Model', 'utils', 'modifiers', '__version__']\n+__all__ = ['Model', 'utils', 'modifiers', 'simplemodels', '__version__']\n", "issue": "consolidation: add simplemodls to __all__\n# Description\r\n\r\nIt would be nice if the snippet in the README could be shorter:\r\n\r\nright now this is needed\r\n```\r\nimport pyhf\r\nimport pyhf.simplemodels\r\npdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0])\r\nCLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf)\r\n```\r\n\r\nwhereas if we pre-import `simplemodels` it could be \r\n```\r\nimport pyhf\r\npdf = pyhf.simplemodels.hepdata_like(signal_data=[12.0], bkg_data=[50.0], bkg_uncerts=[3.0])\r\nCLs_obs = pyhf.utils.hypotest(1.0, [51] + pdf.config.auxdata, pdf)\r\n```\r\n\r\nsince `simplemodels.py` doesn't add much code, i don't think it would slow down things a lot\n", "before_files": [{"content": "from .tensor import BackendRetriever as tensor\nfrom .optimize import OptimizerRetriever as optimize\nfrom .version import __version__\nfrom . import events\n\ntensorlib = tensor.numpy_backend()\ndefault_backend = tensorlib\noptimizer = optimize.scipy_optimizer()\ndefault_optimizer = optimizer\n\n\ndef get_backend():\n \"\"\"\n Get the current backend and the associated optimizer\n\n Example:\n >>> import pyhf\n >>> pyhf.get_backend()\n (<pyhf.tensor.numpy_backend.numpy_backend object at 0x...>, <pyhf.optimize.opt_scipy.scipy_optimizer object at 0x...>)\n\n Returns:\n backend, optimizer\n \"\"\"\n global tensorlib\n global optimizer\n return tensorlib, optimizer\n\n\[email protected]('change_backend')\ndef set_backend(backend, custom_optimizer=None):\n \"\"\"\n Set the backend and the associated optimizer\n\n Example:\n >>> import pyhf\n >>> import tensorflow as tf\n >>> pyhf.set_backend(pyhf.tensor.tensorflow_backend(session=tf.Session()))\n\n Args:\n backend: One of the supported pyhf backends: NumPy,\n TensorFlow, PyTorch, and MXNet\n\n Returns:\n None\n \"\"\"\n global tensorlib\n global optimizer\n\n # need to determine if the tensorlib changed or the optimizer changed for events\n tensorlib_changed = bool(backend.name != tensorlib.name)\n optimizer_changed = False\n\n if backend.name == 'tensorflow':\n new_optimizer = (\n custom_optimizer if custom_optimizer else optimize.tflow_optimizer(backend)\n )\n if tensorlib.name == 'tensorflow':\n tensorlib_changed |= bool(backend.session != tensorlib.session)\n elif backend.name == 'pytorch':\n new_optimizer = (\n custom_optimizer\n if custom_optimizer\n else optimize.pytorch_optimizer(tensorlib=backend)\n )\n # TODO: Add support for mxnet_optimizer()\n # elif tensorlib.name == 'mxnet':\n # new_optimizer = custom_optimizer if custom_optimizer else mxnet_optimizer()\n else:\n new_optimizer = (\n custom_optimizer if custom_optimizer else optimize.scipy_optimizer()\n )\n\n optimizer_changed = bool(optimizer != new_optimizer)\n # set new backend\n tensorlib = backend\n optimizer = new_optimizer\n # trigger events\n if tensorlib_changed:\n events.trigger(\"tensorlib_changed\")()\n if optimizer_changed:\n events.trigger(\"optimizer_changed\")()\n\n\nfrom .pdf import Model\n\n__all__ = ['Model', 'utils', 'modifiers', '__version__']\n", "path": "pyhf/__init__.py"}]}
1,468
103
gh_patches_debug_3535
rasdani/github-patches
git_diff
scikit-image__scikit-image-1206
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> canny edge detection throws AttributeError exception I was trying out http://scikit-image.org/docs/dev/auto_examples/plot_canny.html And the following lines of code: # Generate noisy image of a square im = np.zeros((128, 128)) im[32:-32, 32:-32] = 1 im = ndimage.rotate(im, 15, mode='constant') im = ndimage.gaussian_filter(im, 4) im += 0.2 \* np.random.random(im.shape) # Compute the Canny filter for two values of sigma edges1 = filter.canny(im) produced this: Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/home/abc/virtualenvs/activepython/lib/python2.7/site-packages/scikit_image-0.11dev-py2.7-linux-x86_64.egg/skimage/_shared/utils.py", line 46, in __call__ msg = 'Call to deprecated function `%s`.' % func.**name** AttributeError: 'numpy.ndarray' object has no attribute '**name**' Any guess? Seems I'm using the latest version of scikit-image. </issue> <code> [start of skimage/filter/__init__.py] 1 from .lpi_filter import inverse, wiener, LPIFilter2D 2 from ._gaussian import gaussian_filter 3 from .edges import (sobel, hsobel, vsobel, scharr, hscharr, vscharr, prewitt, 4 hprewitt, vprewitt, roberts, roberts_positive_diagonal, 5 roberts_negative_diagonal) 6 from ._rank_order import rank_order 7 from ._gabor import gabor_kernel, gabor_filter 8 from .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen, 9 threshold_isodata) 10 from . import rank 11 12 from skimage._shared.utils import deprecated 13 from skimage import restoration 14 denoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\ 15 (restoration.denoise_bilateral) 16 denoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\ 17 (restoration.denoise_tv_bregman) 18 denoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\ 19 (restoration.denoise_tv_chambolle) 20 21 # Backward compatibility v<0.11 22 @deprecated 23 def canny(*args, **kwargs): 24 # Hack to avoid circular import 25 from skimage.feature._canny import canny as canny_ 26 return canny_(*args, **kwargs) 27 28 29 __all__ = ['inverse', 30 'wiener', 31 'LPIFilter2D', 32 'gaussian_filter', 33 'canny', 34 'sobel', 35 'hsobel', 36 'vsobel', 37 'scharr', 38 'hscharr', 39 'vscharr', 40 'prewitt', 41 'hprewitt', 42 'vprewitt', 43 'roberts', 44 'roberts_positive_diagonal', 45 'roberts_negative_diagonal', 46 'denoise_tv_chambolle', 47 'denoise_bilateral', 48 'denoise_tv_bregman', 49 'rank_order', 50 'gabor_kernel', 51 'gabor_filter', 52 'threshold_adaptive', 53 'threshold_otsu', 54 'threshold_yen', 55 'threshold_isodata', 56 'rank'] 57 [end of skimage/filter/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/filter/__init__.py b/skimage/filter/__init__.py --- a/skimage/filter/__init__.py +++ b/skimage/filter/__init__.py @@ -19,7 +19,7 @@ (restoration.denoise_tv_chambolle) # Backward compatibility v<0.11 -@deprecated +@deprecated('skimage.feature.canny') def canny(*args, **kwargs): # Hack to avoid circular import from skimage.feature._canny import canny as canny_
{"golden_diff": "diff --git a/skimage/filter/__init__.py b/skimage/filter/__init__.py\n--- a/skimage/filter/__init__.py\n+++ b/skimage/filter/__init__.py\n@@ -19,7 +19,7 @@\n (restoration.denoise_tv_chambolle)\n \n # Backward compatibility v<0.11\n-@deprecated\n+@deprecated('skimage.feature.canny')\n def canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n", "issue": "canny edge detection throws AttributeError exception\nI was trying out http://scikit-image.org/docs/dev/auto_examples/plot_canny.html \n\nAnd the following lines of code:\n# Generate noisy image of a square\n\nim = np.zeros((128, 128))\nim[32:-32, 32:-32] = 1\n\nim = ndimage.rotate(im, 15, mode='constant')\nim = ndimage.gaussian_filter(im, 4)\nim += 0.2 \\* np.random.random(im.shape)\n# Compute the Canny filter for two values of sigma\n\nedges1 = filter.canny(im)\n\n produced this:\n\nTraceback (most recent call last):\n File \"<stdin>\", line 2, in <module>\n File \"/home/abc/virtualenvs/activepython/lib/python2.7/site-packages/scikit_image-0.11dev-py2.7-linux-x86_64.egg/skimage/_shared/utils.py\", line 46, in __call__\n msg = 'Call to deprecated function `%s`.' % func.**name**\nAttributeError: 'numpy.ndarray' object has no attribute '**name**'\n\nAny guess? Seems I'm using the latest version of scikit-image. \n\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian_filter\nfrom .edges import (sobel, hsobel, vsobel, scharr, hscharr, vscharr, prewitt,\n hprewitt, vprewitt, roberts, roberts_positive_diagonal,\n roberts_negative_diagonal)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor_filter\nfrom .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata)\nfrom . import rank\n\nfrom skimage._shared.utils import deprecated\nfrom skimage import restoration\ndenoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\\\n (restoration.denoise_bilateral)\ndenoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\\\n (restoration.denoise_tv_bregman)\ndenoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\\\n (restoration.denoise_tv_chambolle)\n\n# Backward compatibility v<0.11\n@deprecated\ndef canny(*args, **kwargs):\n # Hack to avoid circular import\n from skimage.feature._canny import canny as canny_\n return canny_(*args, **kwargs)\n\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian_filter',\n 'canny',\n 'sobel',\n 'hsobel',\n 'vsobel',\n 'scharr',\n 'hscharr',\n 'vscharr',\n 'prewitt',\n 'hprewitt',\n 'vprewitt',\n 'roberts',\n 'roberts_positive_diagonal',\n 'roberts_negative_diagonal',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor_filter',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'rank']\n", "path": "skimage/filter/__init__.py"}]}
1,391
126
gh_patches_debug_33409
rasdani/github-patches
git_diff
bridgecrewio__checkov-4614
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> False positive for tests in bicep: CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8 **Describe the issue** false positive for tests CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8 **Examples** Here is the bicep code Code made shorter. ``` resource aks 'Microsoft.ContainerService/managedClusters@2022-11-01' = { properties: { enableRBAC: true } addonProfiles: { omsAgent: { enabled: true config: { logAnalyticsWorkspaceResourceId: logAnalyticsWorkspaceId } } azurePolicy: { enabled: true config: { version: 'v2' } } kubeDashboard: { enabled: false } } networkProfile: { networkPlugin: 'azure' networkPolicy: 'azure' } } } ``` **Version (please complete the following information):** - checkov v2.1.244 </issue> <code> [start of checkov/arm/checks/resource/AKSLoggingEnabled.py] 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.arm.base_resource_check import BaseResourceCheck 3 4 5 class AKSLoggingEnabled(BaseResourceCheck): 6 def __init__(self): 7 # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure 8 name = "Ensure AKS logging to Azure Monitoring is Configured" 9 id = "CKV_AZURE_4" 10 supported_resources = ['Microsoft.ContainerService/managedClusters'] 11 categories = [CheckCategories.KUBERNETES] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 13 14 def scan_resource_conf(self, conf): 15 if conf.get("apiVersion"): 16 if conf["apiVersion"] == "2017-08-31": 17 # No addonProfiles option to configure 18 return CheckResult.FAILED 19 20 properties = conf.get("properties") 21 if isinstance(properties, dict): 22 addon_profiles = properties.get("addonProfiles") 23 if isinstance(addon_profiles, dict): 24 omsagent = addon_profiles.get("omsagent") 25 if isinstance(omsagent, dict) and omsagent.get("enabled"): 26 return CheckResult.PASSED 27 28 return CheckResult.FAILED 29 30 31 check = AKSLoggingEnabled() 32 [end of checkov/arm/checks/resource/AKSLoggingEnabled.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/arm/checks/resource/AKSLoggingEnabled.py b/checkov/arm/checks/resource/AKSLoggingEnabled.py --- a/checkov/arm/checks/resource/AKSLoggingEnabled.py +++ b/checkov/arm/checks/resource/AKSLoggingEnabled.py @@ -1,18 +1,22 @@ +from __future__ import annotations + +from typing import Any + from checkov.common.models.enums import CheckResult, CheckCategories from checkov.arm.base_resource_check import BaseResourceCheck class AKSLoggingEnabled(BaseResourceCheck): - def __init__(self): + def __init__(self) -> None: # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure name = "Ensure AKS logging to Azure Monitoring is Configured" id = "CKV_AZURE_4" - supported_resources = ['Microsoft.ContainerService/managedClusters'] - categories = [CheckCategories.KUBERNETES] + supported_resources = ("Microsoft.ContainerService/managedClusters",) + categories = (CheckCategories.KUBERNETES,) super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def scan_resource_conf(self, conf): - if conf.get("apiVersion"): + def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult: + if "apiVersion" in conf: if conf["apiVersion"] == "2017-08-31": # No addonProfiles option to configure return CheckResult.FAILED @@ -22,6 +26,10 @@ addon_profiles = properties.get("addonProfiles") if isinstance(addon_profiles, dict): omsagent = addon_profiles.get("omsagent") + if not omsagent: + # it can be written in lowercase or camelCase + omsagent = addon_profiles.get("omsAgent") + if isinstance(omsagent, dict) and omsagent.get("enabled"): return CheckResult.PASSED
{"golden_diff": "diff --git a/checkov/arm/checks/resource/AKSLoggingEnabled.py b/checkov/arm/checks/resource/AKSLoggingEnabled.py\n--- a/checkov/arm/checks/resource/AKSLoggingEnabled.py\n+++ b/checkov/arm/checks/resource/AKSLoggingEnabled.py\n@@ -1,18 +1,22 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.arm.base_resource_check import BaseResourceCheck\n \n \n class AKSLoggingEnabled(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure\n name = \"Ensure AKS logging to Azure Monitoring is Configured\"\n id = \"CKV_AZURE_4\"\n- supported_resources = ['Microsoft.ContainerService/managedClusters']\n- categories = [CheckCategories.KUBERNETES]\n+ supported_resources = (\"Microsoft.ContainerService/managedClusters\",)\n+ categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- if conf.get(\"apiVersion\"):\n+ def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:\n+ if \"apiVersion\" in conf:\n if conf[\"apiVersion\"] == \"2017-08-31\":\n # No addonProfiles option to configure\n return CheckResult.FAILED\n@@ -22,6 +26,10 @@\n addon_profiles = properties.get(\"addonProfiles\")\n if isinstance(addon_profiles, dict):\n omsagent = addon_profiles.get(\"omsagent\")\n+ if not omsagent:\n+ # it can be written in lowercase or camelCase\n+ omsagent = addon_profiles.get(\"omsAgent\")\n+\n if isinstance(omsagent, dict) and omsagent.get(\"enabled\"):\n return CheckResult.PASSED\n", "issue": "False positive for tests in bicep: CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8\n**Describe the issue**\r\nfalse positive for tests CKV_AZURE_4, CKV_AZURE_5, CKV_AZURE_7, CKV_AZURE_8\r\n\r\n\r\n**Examples**\r\nHere is the bicep code\r\n Code made shorter.\r\n```\r\nresource aks 'Microsoft.ContainerService/managedClusters@2022-11-01' = {\r\n properties: {\r\n enableRBAC: true\r\n }\r\n addonProfiles: {\r\n omsAgent: {\r\n enabled: true\r\n config: {\r\n logAnalyticsWorkspaceResourceId: logAnalyticsWorkspaceId\r\n }\r\n }\r\n azurePolicy: {\r\n enabled: true\r\n config: {\r\n version: 'v2'\r\n }\r\n }\r\n kubeDashboard: {\r\n enabled: false\r\n }\r\n }\r\n networkProfile: {\r\n networkPlugin: 'azure'\r\n networkPolicy: 'azure'\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - checkov v2.1.244\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.arm.base_resource_check import BaseResourceCheck\n\n\nclass AKSLoggingEnabled(BaseResourceCheck):\n def __init__(self):\n # apiVersion 2017-08-03 = Fail - No addonProfiles option to configure\n name = \"Ensure AKS logging to Azure Monitoring is Configured\"\n id = \"CKV_AZURE_4\"\n supported_resources = ['Microsoft.ContainerService/managedClusters']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get(\"apiVersion\"):\n if conf[\"apiVersion\"] == \"2017-08-31\":\n # No addonProfiles option to configure\n return CheckResult.FAILED\n\n properties = conf.get(\"properties\")\n if isinstance(properties, dict):\n addon_profiles = properties.get(\"addonProfiles\")\n if isinstance(addon_profiles, dict):\n omsagent = addon_profiles.get(\"omsagent\")\n if isinstance(omsagent, dict) and omsagent.get(\"enabled\"):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = AKSLoggingEnabled()\n", "path": "checkov/arm/checks/resource/AKSLoggingEnabled.py"}]}
1,148
455
gh_patches_debug_18581
rasdani/github-patches
git_diff
scikit-image__scikit-image-219
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> message about unit tests not available If scikits-image is installed without nose, a message is printed about unit tests not being available is printed on import but it is a little bit distracting to the casual end-user. </issue> <code> [start of skimage/__init__.py] 1 """Image Processing SciKit (Toolbox for SciPy) 2 3 ``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image 4 processing and computer vision. 5 6 The main package of ``skimage`` only provides a few utilities for converting 7 between image data types; for most features, you need to import one of the 8 following subpackages: 9 10 Subpackages 11 ----------- 12 color 13 Color space conversion. 14 data 15 Test images and example data. 16 draw 17 Image drawing primitives (lines, text, etc.). 18 exposure 19 Image intensity adjustment (e.g., histogram equalization). 20 feature 21 Feature detection (e.g. texture analysis, corners, etc.). 22 filter 23 Sharpening, edge finding, denoising, etc. 24 graph 25 Graph-theoretic operations, e.g. dynamic programming (shortest paths). 26 io 27 Reading, saving, and displaying images and video. 28 measure 29 Measurement of image properties, e.g., similarity and contours. 30 morphology 31 Morphological operations, e.g. opening or skeletonization. 32 segmentation 33 Splitting an image into self-similar regions. 34 transform 35 Geometric and other transforms, e.g. rotation or the Radon transform. 36 util 37 Generic utilities. 38 39 Utility Functions 40 ----------------- 41 get_log 42 Returns the ``skimage`` log. Use this to print debug output. 43 img_as_float 44 Convert an image to floating point format, with values in [0, 1]. 45 img_as_uint 46 Convert an image to unsigned integer format, with values in [0, 65535]. 47 img_as_int 48 Convert an image to signed integer format, with values in [-32768, 32767]. 49 img_as_ubyte 50 Convert an image to unsigned byte format, with values in [0, 255]. 51 52 """ 53 54 import os.path as _osp 55 56 pkg_dir = _osp.abspath(_osp.dirname(__file__)) 57 data_dir = _osp.join(pkg_dir, 'data') 58 59 try: 60 from .version import version as __version__ 61 except ImportError: 62 __version__ = "unbuilt-dev" 63 64 65 def _setup_test(verbose=False): 66 import functools 67 68 args = ['', '--exe', '-w', pkg_dir] 69 if verbose: 70 args.extend(['-v', '-s']) 71 72 try: 73 import nose as _nose 74 except ImportError: 75 print("Could not load nose. Unit tests not available.") 76 return None 77 else: 78 f = functools.partial(_nose.run, 'skimage', argv=args) 79 f.__doc__ = 'Invoke the skimage test suite.' 80 return f 81 82 test = _setup_test() 83 if test is None: 84 try: 85 del test 86 except NameError: 87 pass 88 89 test_verbose = _setup_test(verbose=True) 90 if test_verbose is None: 91 try: 92 del test 93 except NameError: 94 pass 95 96 97 def get_log(name=None): 98 """Return a console logger. 99 100 Output may be sent to the logger using the `debug`, `info`, `warning`, 101 `error` and `critical` methods. 102 103 Parameters 104 ---------- 105 name : str 106 Name of the log. 107 108 References 109 ---------- 110 .. [1] Logging facility for Python, 111 http://docs.python.org/library/logging.html 112 113 """ 114 import logging 115 116 if name is None: 117 name = 'skimage' 118 else: 119 name = 'skimage.' + name 120 121 log = logging.getLogger(name) 122 return log 123 124 125 def _setup_log(): 126 """Configure root logger. 127 128 """ 129 import logging 130 import sys 131 132 log = logging.getLogger() 133 134 try: 135 handler = logging.StreamHandler(stream=sys.stdout) 136 except TypeError: 137 handler = logging.StreamHandler(strm=sys.stdout) 138 139 formatter = logging.Formatter( 140 '%(name)s: %(levelname)s: %(message)s' 141 ) 142 handler.setFormatter(formatter) 143 144 log.addHandler(handler) 145 log.setLevel(logging.WARNING) 146 147 _setup_log() 148 149 from .util.dtype import * 150 [end of skimage/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/__init__.py b/skimage/__init__.py --- a/skimage/__init__.py +++ b/skimage/__init__.py @@ -72,26 +72,20 @@ try: import nose as _nose except ImportError: - print("Could not load nose. Unit tests not available.") - return None + def broken_test_func(): + """This would invoke the skimage test suite, but nose couldn't be + imported so the test suite can not run. + """ + raise ImportError("Could not load nose. Unit tests not available.") + return broken_test_func else: f = functools.partial(_nose.run, 'skimage', argv=args) f.__doc__ = 'Invoke the skimage test suite.' return f -test = _setup_test() -if test is None: - try: - del test - except NameError: - pass +test = _setup_test() test_verbose = _setup_test(verbose=True) -if test_verbose is None: - try: - del test - except NameError: - pass def get_log(name=None):
{"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -72,26 +72,20 @@\n try:\n import nose as _nose\n except ImportError:\n- print(\"Could not load nose. Unit tests not available.\")\n- return None\n+ def broken_test_func():\n+ \"\"\"This would invoke the skimage test suite, but nose couldn't be\n+ imported so the test suite can not run.\n+ \"\"\"\n+ raise ImportError(\"Could not load nose. Unit tests not available.\")\n+ return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n \n-test = _setup_test()\n-if test is None:\n- try:\n- del test\n- except NameError:\n- pass\n \n+test = _setup_test()\n test_verbose = _setup_test(verbose=True)\n-if test_verbose is None:\n- try:\n- del test\n- except NameError:\n- pass\n \n \n def get_log(name=None):\n", "issue": "message about unit tests not available\nIf scikits-image is installed without nose, a message is printed about unit tests not being available is printed on import but it is a little bit distracting to the casual end-user.\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', '--exe', '-w', pkg_dir]\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n print(\"Could not load nose. Unit tests not available.\")\n return None\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\ntest = _setup_test()\nif test is None:\n try:\n del test\n except NameError:\n pass\n\ntest_verbose = _setup_test(verbose=True)\nif test_verbose is None:\n try:\n del test\n except NameError:\n pass\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n log = logging.getLogger()\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n handler.setFormatter(formatter)\n\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]}
1,792
266
gh_patches_debug_18589
rasdani/github-patches
git_diff
ethereum__web3.py-1763
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upgrade Parity version in test infrastructure ### What was wrong? Our Parity tests run on version 2.3.5. According to a recent OpenEthereum [blog post](https://medium.com/openethereum/vision-for-openethereum-ex-parity-client-eb7b11f6eef8), latest stable recommendation is 2.5.13. </issue> <code> [start of web3/middleware/validation.py] 1 from typing import ( 2 TYPE_CHECKING, 3 Any, 4 Callable, 5 ) 6 7 from eth_utils.curried import ( 8 apply_formatter_at_index, 9 apply_formatter_if, 10 apply_formatters_to_dict, 11 is_null, 12 ) 13 from eth_utils.toolz import ( 14 complement, 15 compose, 16 curry, 17 dissoc, 18 ) 19 from hexbytes import ( 20 HexBytes, 21 ) 22 23 from web3._utils.rpc_abi import ( 24 RPC, 25 ) 26 from web3.exceptions import ( 27 ExtraDataLengthError, 28 ValidationError, 29 ) 30 from web3.middleware.formatting import ( 31 construct_web3_formatting_middleware, 32 ) 33 from web3.types import ( 34 FormattersDict, 35 TxParams, 36 ) 37 38 if TYPE_CHECKING: 39 from web3 import Web3 # noqa: F401 40 41 MAX_EXTRADATA_LENGTH = 32 42 43 is_not_null = complement(is_null) 44 45 46 @curry 47 def validate_chain_id(web3: "Web3", chain_id: int) -> int: 48 if int(chain_id) == web3.eth.chainId: 49 return chain_id 50 else: 51 raise ValidationError( 52 "The transaction declared chain ID %r, " 53 "but the connected node is on %r" % ( 54 chain_id, 55 web3.eth.chainId, 56 ) 57 ) 58 59 60 def check_extradata_length(val: Any) -> Any: 61 if not isinstance(val, (str, int, bytes)): 62 return val 63 result = HexBytes(val) 64 if len(result) > MAX_EXTRADATA_LENGTH: 65 raise ExtraDataLengthError( 66 "The field extraData is %d bytes, but should be %d. " 67 "It is quite likely that you are connected to a POA chain. " 68 "Refer to " 69 "http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority " 70 "for more details. The full extraData is: %r" % ( 71 len(result), MAX_EXTRADATA_LENGTH, result 72 ) 73 ) 74 return val 75 76 77 def transaction_normalizer(transaction: TxParams) -> TxParams: 78 return dissoc(transaction, 'chainId') 79 80 81 def transaction_param_validator(web3: "Web3") -> Callable[..., Any]: 82 transactions_params_validators = { 83 "chainId": apply_formatter_if( 84 # Bypass `validate_chain_id` if chainId can't be determined 85 lambda _: is_not_null(web3.eth.chainId), 86 validate_chain_id(web3), 87 ), 88 } 89 return apply_formatter_at_index( 90 apply_formatters_to_dict(transactions_params_validators), 91 0 92 ) 93 94 95 BLOCK_VALIDATORS = { 96 'extraData': check_extradata_length, 97 } 98 99 100 block_validator = apply_formatter_if( 101 is_not_null, 102 apply_formatters_to_dict(BLOCK_VALIDATORS) 103 ) 104 105 106 @curry 107 def chain_id_validator(web3: "Web3") -> Callable[..., Any]: 108 return compose( 109 apply_formatter_at_index(transaction_normalizer, 0), 110 transaction_param_validator(web3) 111 ) 112 113 114 def build_validators_with_web3(w3: "Web3") -> FormattersDict: 115 return dict( 116 request_formatters={ 117 RPC.eth_sendTransaction: chain_id_validator(w3), 118 RPC.eth_estimateGas: chain_id_validator(w3), 119 RPC.eth_call: chain_id_validator(w3), 120 }, 121 result_formatters={ 122 RPC.eth_getBlockByHash: block_validator, 123 RPC.eth_getBlockByNumber: block_validator, 124 }, 125 ) 126 127 128 validation_middleware = construct_web3_formatting_middleware(build_validators_with_web3) 129 [end of web3/middleware/validation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/web3/middleware/validation.py b/web3/middleware/validation.py --- a/web3/middleware/validation.py +++ b/web3/middleware/validation.py @@ -9,6 +9,7 @@ apply_formatter_if, apply_formatters_to_dict, is_null, + is_string, ) from eth_utils.toolz import ( complement, @@ -20,6 +21,9 @@ HexBytes, ) +from web3._utils.formatters import ( + hex_to_integer, +) from web3._utils.rpc_abi import ( RPC, ) @@ -42,10 +46,12 @@ is_not_null = complement(is_null) +to_integer_if_hex = apply_formatter_if(is_string, hex_to_integer) + @curry def validate_chain_id(web3: "Web3", chain_id: int) -> int: - if int(chain_id) == web3.eth.chainId: + if to_integer_if_hex(chain_id) == web3.eth.chainId: return chain_id else: raise ValidationError(
{"golden_diff": "diff --git a/web3/middleware/validation.py b/web3/middleware/validation.py\n--- a/web3/middleware/validation.py\n+++ b/web3/middleware/validation.py\n@@ -9,6 +9,7 @@\n apply_formatter_if,\n apply_formatters_to_dict,\n is_null,\n+ is_string,\n )\n from eth_utils.toolz import (\n complement,\n@@ -20,6 +21,9 @@\n HexBytes,\n )\n \n+from web3._utils.formatters import (\n+ hex_to_integer,\n+)\n from web3._utils.rpc_abi import (\n RPC,\n )\n@@ -42,10 +46,12 @@\n \n is_not_null = complement(is_null)\n \n+to_integer_if_hex = apply_formatter_if(is_string, hex_to_integer)\n+\n \n @curry\n def validate_chain_id(web3: \"Web3\", chain_id: int) -> int:\n- if int(chain_id) == web3.eth.chainId:\n+ if to_integer_if_hex(chain_id) == web3.eth.chainId:\n return chain_id\n else:\n raise ValidationError(\n", "issue": "Upgrade Parity version in test infrastructure\n### What was wrong?\r\n\r\nOur Parity tests run on version 2.3.5. According to a recent OpenEthereum [blog post](https://medium.com/openethereum/vision-for-openethereum-ex-parity-client-eb7b11f6eef8), latest stable recommendation is 2.5.13.\n", "before_files": [{"content": "from typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\n\nfrom eth_utils.curried import (\n apply_formatter_at_index,\n apply_formatter_if,\n apply_formatters_to_dict,\n is_null,\n)\nfrom eth_utils.toolz import (\n complement,\n compose,\n curry,\n dissoc,\n)\nfrom hexbytes import (\n HexBytes,\n)\n\nfrom web3._utils.rpc_abi import (\n RPC,\n)\nfrom web3.exceptions import (\n ExtraDataLengthError,\n ValidationError,\n)\nfrom web3.middleware.formatting import (\n construct_web3_formatting_middleware,\n)\nfrom web3.types import (\n FormattersDict,\n TxParams,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\nMAX_EXTRADATA_LENGTH = 32\n\nis_not_null = complement(is_null)\n\n\n@curry\ndef validate_chain_id(web3: \"Web3\", chain_id: int) -> int:\n if int(chain_id) == web3.eth.chainId:\n return chain_id\n else:\n raise ValidationError(\n \"The transaction declared chain ID %r, \"\n \"but the connected node is on %r\" % (\n chain_id,\n web3.eth.chainId,\n )\n )\n\n\ndef check_extradata_length(val: Any) -> Any:\n if not isinstance(val, (str, int, bytes)):\n return val\n result = HexBytes(val)\n if len(result) > MAX_EXTRADATA_LENGTH:\n raise ExtraDataLengthError(\n \"The field extraData is %d bytes, but should be %d. \"\n \"It is quite likely that you are connected to a POA chain. \"\n \"Refer to \"\n \"http://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority \"\n \"for more details. The full extraData is: %r\" % (\n len(result), MAX_EXTRADATA_LENGTH, result\n )\n )\n return val\n\n\ndef transaction_normalizer(transaction: TxParams) -> TxParams:\n return dissoc(transaction, 'chainId')\n\n\ndef transaction_param_validator(web3: \"Web3\") -> Callable[..., Any]:\n transactions_params_validators = {\n \"chainId\": apply_formatter_if(\n # Bypass `validate_chain_id` if chainId can't be determined\n lambda _: is_not_null(web3.eth.chainId),\n validate_chain_id(web3),\n ),\n }\n return apply_formatter_at_index(\n apply_formatters_to_dict(transactions_params_validators),\n 0\n )\n\n\nBLOCK_VALIDATORS = {\n 'extraData': check_extradata_length,\n}\n\n\nblock_validator = apply_formatter_if(\n is_not_null,\n apply_formatters_to_dict(BLOCK_VALIDATORS)\n)\n\n\n@curry\ndef chain_id_validator(web3: \"Web3\") -> Callable[..., Any]:\n return compose(\n apply_formatter_at_index(transaction_normalizer, 0),\n transaction_param_validator(web3)\n )\n\n\ndef build_validators_with_web3(w3: \"Web3\") -> FormattersDict:\n return dict(\n request_formatters={\n RPC.eth_sendTransaction: chain_id_validator(w3),\n RPC.eth_estimateGas: chain_id_validator(w3),\n RPC.eth_call: chain_id_validator(w3),\n },\n result_formatters={\n RPC.eth_getBlockByHash: block_validator,\n RPC.eth_getBlockByNumber: block_validator,\n },\n )\n\n\nvalidation_middleware = construct_web3_formatting_middleware(build_validators_with_web3)\n", "path": "web3/middleware/validation.py"}]}
1,664
237
gh_patches_debug_18666
rasdani/github-patches
git_diff
mozilla__pontoon-3061
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fluent pretranslations not serialized in canonical form Fluent pretranslations are not stored in a canonical form. For example, `key = {example}` should actually be stored as `key = { example }` (mind the whitespace). In the exported files in the repository, the strings appear as they should, which results in sync trying to import pretranslations, because they aren't the same as in Pontoon DB. That also triggers #2284. In the UI (editor, string list) the strings are also rendered properly, which only makes the problem more difficult to spot. We should make sure that pretranslations are always stored them in the canonical form. </issue> <code> [start of pontoon/pretranslation/pretranslate.py] 1 import logging 2 import operator 3 import re 4 5 from django.db.models import CharField, Value as V 6 from django.db.models.functions import Concat 7 8 from fluent.syntax import FluentParser, FluentSerializer 9 from functools import reduce 10 11 from pontoon.base.models import User, TranslatedResource 12 from pontoon.machinery.utils import ( 13 get_google_translate_data, 14 get_translation_memory_data, 15 ) 16 from pontoon.pretranslation import AUTHORS 17 from .transformer import ApplyPretranslation 18 19 20 log = logging.getLogger(__name__) 21 22 parser = FluentParser() 23 serializer = FluentSerializer() 24 25 26 def get_pretranslations(entity, locale, preserve_placeables=False): 27 """ 28 Get pretranslations for the entity-locale pair using internal translation memory and 29 Google's machine translation. 30 31 For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements 32 and then only pretranslate TextElements. Set the most frequent TextElement 33 pretranslation author as the author of the entire pretranslation. 34 35 :arg Entity entity: the Entity object 36 :arg Locale locale: the Locale object 37 :arg boolean preserve_placeables 38 39 :returns: a list of tuples, consisting of: 40 - a pretranslation of the entity 41 - a plural form 42 - a user (representing TM or GT service) 43 """ 44 source = entity.string 45 services = {k: User.objects.get(email=email) for k, email in AUTHORS.items()} 46 47 if entity.resource.format == "ftl": 48 entry = parser.parse_entry(source) 49 pretranslate = ApplyPretranslation( 50 locale, entry, get_pretranslated_data, preserve_placeables 51 ) 52 53 try: 54 pretranslate.visit(entry) 55 except ValueError as e: 56 log.info(f"Fluent pretranslation error: {e}") 57 return [] 58 59 pretranslation = serializer.serialize_entry(entry) 60 61 authors = [services[service] for service in pretranslate.services] 62 author = max(set(authors), key=authors.count) if authors else services["tm"] 63 64 return [(pretranslation, None, author)] 65 66 else: 67 pretranslation, service = get_pretranslated_data( 68 source, locale, preserve_placeables 69 ) 70 71 if pretranslation is None: 72 return [] 73 74 author = services[service] 75 if entity.string_plural == "": 76 return [(pretranslation, None, author)] 77 else: 78 plural_forms = range(0, locale.nplurals or 1) 79 return [ 80 (pretranslation, plural_form, author) for plural_form in plural_forms 81 ] 82 83 84 def get_pretranslated_data(source, locale, preserve_placeables): 85 # Empty strings do not need translation 86 if re.search("^\\s*$", source): 87 return source, "tm" 88 89 # Try to get matches from Translation Memory 90 tm_response = get_translation_memory_data(text=source, locale=locale) 91 tm_perfect = [t for t in tm_response if int(t["quality"]) == 100] 92 if tm_perfect: 93 return tm_perfect[0]["target"], "tm" 94 95 # Fetch from Google Translate 96 elif locale.google_translate_code: 97 gt_response = get_google_translate_data( 98 text=source, locale=locale, preserve_placeables=preserve_placeables 99 ) 100 if gt_response["status"]: 101 return gt_response["translation"], "gt" 102 103 return None, None 104 105 106 def update_changed_instances(tr_filter, tr_dict, translations): 107 """ 108 Update the latest activity and stats for changed Locales, ProjectLocales 109 & TranslatedResources 110 """ 111 tr_filter = tuple(tr_filter) 112 # Combine all generated filters with an OK operator. 113 # `operator.ior` is the '|' Python operator, which turns into a logical OR 114 # when used between django ORM query objects. 115 tr_query = reduce(operator.ior, tr_filter) 116 117 translatedresources = TranslatedResource.objects.filter(tr_query).annotate( 118 locale_resource=Concat( 119 "locale_id", V("-"), "resource_id", output_field=CharField() 120 ) 121 ) 122 123 translatedresources.update_stats() 124 125 for tr in translatedresources: 126 index = tr_dict[tr.locale_resource] 127 translation = translations[index] 128 translation.update_latest_translation() 129 [end of pontoon/pretranslation/pretranslate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py --- a/pontoon/pretranslation/pretranslate.py +++ b/pontoon/pretranslation/pretranslate.py @@ -58,6 +58,10 @@ pretranslation = serializer.serialize_entry(entry) + # Parse and serialize pretranslation again in order to assure cannonical style + parsed_pretranslation = parser.parse_entry(pretranslation) + pretranslation = serializer.serialize_entry(parsed_pretranslation) + authors = [services[service] for service in pretranslate.services] author = max(set(authors), key=authors.count) if authors else services["tm"] @@ -82,7 +86,7 @@ def get_pretranslated_data(source, locale, preserve_placeables): - # Empty strings do not need translation + # Empty strings and strings containing whitespace only do not need translation if re.search("^\\s*$", source): return source, "tm"
{"golden_diff": "diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py\n--- a/pontoon/pretranslation/pretranslate.py\n+++ b/pontoon/pretranslation/pretranslate.py\n@@ -58,6 +58,10 @@\n \n pretranslation = serializer.serialize_entry(entry)\n \n+ # Parse and serialize pretranslation again in order to assure cannonical style\n+ parsed_pretranslation = parser.parse_entry(pretranslation)\n+ pretranslation = serializer.serialize_entry(parsed_pretranslation)\n+\n authors = [services[service] for service in pretranslate.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n \n@@ -82,7 +86,7 @@\n \n \n def get_pretranslated_data(source, locale, preserve_placeables):\n- # Empty strings do not need translation\n+ # Empty strings and strings containing whitespace only do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n", "issue": "Fluent pretranslations not serialized in canonical form\nFluent pretranslations are not stored in a canonical form. For example, `key = {example}` should actually be stored as `key = { example }` (mind the whitespace).\r\n\r\nIn the exported files in the repository, the strings appear as they should, which results in sync trying to import pretranslations, because they aren't the same as in Pontoon DB. That also triggers #2284. \r\n\r\nIn the UI (editor, string list) the strings are also rendered properly, which only makes the problem more difficult to spot.\r\n\r\nWe should make sure that pretranslations are always stored them in the canonical form.\n", "before_files": [{"content": "import logging\nimport operator\nimport re\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom fluent.syntax import FluentParser, FluentSerializer\nfrom functools import reduce\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\nfrom pontoon.pretranslation import AUTHORS\nfrom .transformer import ApplyPretranslation\n\n\nlog = logging.getLogger(__name__)\n\nparser = FluentParser()\nserializer = FluentSerializer()\n\n\ndef get_pretranslations(entity, locale, preserve_placeables=False):\n \"\"\"\n Get pretranslations for the entity-locale pair using internal translation memory and\n Google's machine translation.\n\n For Fluent strings, uplift SelectExpressions, serialize Placeables as TextElements\n and then only pretranslate TextElements. Set the most frequent TextElement\n pretranslation author as the author of the entire pretranslation.\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n :arg boolean preserve_placeables\n\n :returns: a list of tuples, consisting of:\n - a pretranslation of the entity\n - a plural form\n - a user (representing TM or GT service)\n \"\"\"\n source = entity.string\n services = {k: User.objects.get(email=email) for k, email in AUTHORS.items()}\n\n if entity.resource.format == \"ftl\":\n entry = parser.parse_entry(source)\n pretranslate = ApplyPretranslation(\n locale, entry, get_pretranslated_data, preserve_placeables\n )\n\n try:\n pretranslate.visit(entry)\n except ValueError as e:\n log.info(f\"Fluent pretranslation error: {e}\")\n return []\n\n pretranslation = serializer.serialize_entry(entry)\n\n authors = [services[service] for service in pretranslate.services]\n author = max(set(authors), key=authors.count) if authors else services[\"tm\"]\n\n return [(pretranslation, None, author)]\n\n else:\n pretranslation, service = get_pretranslated_data(\n source, locale, preserve_placeables\n )\n\n if pretranslation is None:\n return []\n\n author = services[service]\n if entity.string_plural == \"\":\n return [(pretranslation, None, author)]\n else:\n plural_forms = range(0, locale.nplurals or 1)\n return [\n (pretranslation, plural_form, author) for plural_form in plural_forms\n ]\n\n\ndef get_pretranslated_data(source, locale, preserve_placeables):\n # Empty strings do not need translation\n if re.search(\"^\\\\s*$\", source):\n return source, \"tm\"\n\n # Try to get matches from Translation Memory\n tm_response = get_translation_memory_data(text=source, locale=locale)\n tm_perfect = [t for t in tm_response if int(t[\"quality\"]) == 100]\n if tm_perfect:\n return tm_perfect[0][\"target\"], \"tm\"\n\n # Fetch from Google Translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(\n text=source, locale=locale, preserve_placeables=preserve_placeables\n )\n if gt_response[\"status\"]:\n return gt_response[\"translation\"], \"gt\"\n\n return None, None\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}]}
1,853
216
gh_patches_debug_18957
rasdani/github-patches
git_diff
wagtail__wagtail-8171
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tags field needs universal help text to explain need to quote multi-word tags Wagtail's tagging system requires multi-word tags to be quoted, to prevent a two-word tag becoming two separate tags. This appears to catch users out repeatedly and has generated several Issues in recent months. The tag field widget, wherever used (page models, images, documents etc) should have standard help text (appearing beneath it like all help text) explaining the need for quoting. Explaining the need for comma-delimiting might also be worth adding while we're at it. </issue> <code> [start of wagtail/admin/widgets/tags.py] 1 import json 2 3 from django.conf import settings 4 from django.urls import reverse 5 from taggit.forms import TagWidget 6 from taggit.models import Tag 7 8 9 class AdminTagWidget(TagWidget): 10 template_name = "wagtailadmin/widgets/tag_widget.html" 11 12 def __init__(self, *args, **kwargs): 13 self.tag_model = kwargs.pop("tag_model", Tag) 14 # free_tagging = None means defer to the tag model's setting 15 self.free_tagging = kwargs.pop("free_tagging", None) 16 super().__init__(*args, **kwargs) 17 18 def get_context(self, name, value, attrs): 19 context = super().get_context(name, value, attrs) 20 21 if self.tag_model == Tag: 22 autocomplete_url = reverse("wagtailadmin_tag_autocomplete") 23 else: 24 autocomplete_url = reverse( 25 "wagtailadmin_tag_model_autocomplete", 26 args=(self.tag_model._meta.app_label, self.tag_model._meta.model_name), 27 ) 28 29 if self.free_tagging is None: 30 free_tagging = getattr(self.tag_model, "free_tagging", True) 31 else: 32 free_tagging = self.free_tagging 33 34 context["widget"]["autocomplete_url"] = autocomplete_url 35 context["widget"]["options_json"] = json.dumps( 36 { 37 "allowSpaces": getattr(settings, "TAG_SPACES_ALLOWED", True), 38 "tagLimit": getattr(settings, "TAG_LIMIT", None), 39 "autocompleteOnly": not free_tagging, 40 } 41 ) 42 43 return context 44 [end of wagtail/admin/widgets/tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/admin/widgets/tags.py b/wagtail/admin/widgets/tags.py --- a/wagtail/admin/widgets/tags.py +++ b/wagtail/admin/widgets/tags.py @@ -2,6 +2,7 @@ from django.conf import settings from django.urls import reverse +from django.utils.translation import gettext_lazy as _ from taggit.forms import TagWidget from taggit.models import Tag @@ -31,6 +32,15 @@ else: free_tagging = self.free_tagging + tag_spaces_allowed = getattr(settings, "TAG_SPACES_ALLOWED", True) + if tag_spaces_allowed: + help_text = _( + 'Multi-word tags with spaces will automatically be enclosed in double quotes (").' + ) + else: + help_text = _("Tags can only consist of a single word, no spaces allowed.") + + context["widget"]["help_text"] = help_text context["widget"]["autocomplete_url"] = autocomplete_url context["widget"]["options_json"] = json.dumps( {
{"golden_diff": "diff --git a/wagtail/admin/widgets/tags.py b/wagtail/admin/widgets/tags.py\n--- a/wagtail/admin/widgets/tags.py\n+++ b/wagtail/admin/widgets/tags.py\n@@ -2,6 +2,7 @@\n \n from django.conf import settings\n from django.urls import reverse\n+from django.utils.translation import gettext_lazy as _\n from taggit.forms import TagWidget\n from taggit.models import Tag\n \n@@ -31,6 +32,15 @@\n else:\n free_tagging = self.free_tagging\n \n+ tag_spaces_allowed = getattr(settings, \"TAG_SPACES_ALLOWED\", True)\n+ if tag_spaces_allowed:\n+ help_text = _(\n+ 'Multi-word tags with spaces will automatically be enclosed in double quotes (\").'\n+ )\n+ else:\n+ help_text = _(\"Tags can only consist of a single word, no spaces allowed.\")\n+\n+ context[\"widget\"][\"help_text\"] = help_text\n context[\"widget\"][\"autocomplete_url\"] = autocomplete_url\n context[\"widget\"][\"options_json\"] = json.dumps(\n {\n", "issue": "Tags field needs universal help text to explain need to quote multi-word tags\nWagtail's tagging system requires multi-word tags to be quoted, to prevent a two-word tag becoming two separate tags. This appears to catch users out repeatedly and has generated several Issues in recent months.\n\nThe tag field widget, wherever used (page models, images, documents etc) should have standard help text (appearing beneath it like all help text) explaining the need for quoting. Explaining the need for comma-delimiting might also be worth adding while we're at it.\n\n", "before_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom taggit.forms import TagWidget\nfrom taggit.models import Tag\n\n\nclass AdminTagWidget(TagWidget):\n template_name = \"wagtailadmin/widgets/tag_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.tag_model = kwargs.pop(\"tag_model\", Tag)\n # free_tagging = None means defer to the tag model's setting\n self.free_tagging = kwargs.pop(\"free_tagging\", None)\n super().__init__(*args, **kwargs)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n\n if self.tag_model == Tag:\n autocomplete_url = reverse(\"wagtailadmin_tag_autocomplete\")\n else:\n autocomplete_url = reverse(\n \"wagtailadmin_tag_model_autocomplete\",\n args=(self.tag_model._meta.app_label, self.tag_model._meta.model_name),\n )\n\n if self.free_tagging is None:\n free_tagging = getattr(self.tag_model, \"free_tagging\", True)\n else:\n free_tagging = self.free_tagging\n\n context[\"widget\"][\"autocomplete_url\"] = autocomplete_url\n context[\"widget\"][\"options_json\"] = json.dumps(\n {\n \"allowSpaces\": getattr(settings, \"TAG_SPACES_ALLOWED\", True),\n \"tagLimit\": getattr(settings, \"TAG_LIMIT\", None),\n \"autocompleteOnly\": not free_tagging,\n }\n )\n\n return context\n", "path": "wagtail/admin/widgets/tags.py"}]}
1,058
230
gh_patches_debug_18003
rasdani/github-patches
git_diff
rasterio__rasterio-223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> guard_transform prevents translation of unprojected files Where by translation I mean the pattern ``` python with rasterio.open(file1.tif) as src: with rasterio.open(file2.tif, 'w', **src.meta) as dst: ... ``` Traceback snippet: ``` Traceback (most recent call last): ... File "copier.py", line 15, in to_png with rio.open(dst_path, "w", **meta) as dst: File "xxx/anaconda/lib/python2.7/site-packages/rasterio/__init__.py", line 91, in open transform = guard_transform(transform) File "xxx/anaconda/lib/python2.7/site-packages/rasterio/transform.py", line 27, in guard_transform transform.a, transform.e)) ValueError: Transform has invalid coefficients a, e: (0.000000, 0.000000) ``` </issue> <code> [start of rasterio/transform.py] 1 2 import warnings 3 4 from affine import Affine 5 6 IDENTITY = Affine.identity() 7 8 def tastes_like_gdal(t): 9 return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0 10 11 def guard_transform(transform): 12 """Return an Affine transformation instance""" 13 if not isinstance(transform, Affine): 14 if tastes_like_gdal(transform): 15 warnings.warn( 16 "GDAL-style transforms are deprecated and will not " 17 "be supported in Rasterio 1.0.", 18 FutureWarning, 19 stacklevel=2) 20 transform = Affine.from_gdal(*transform) 21 else: 22 transform = Affine(*transform) 23 a, e = transform.a, transform.e 24 if a == 0.0 or e == 0.0: 25 raise ValueError( 26 "Transform has invalid coefficients a, e: (%f, %f)" % ( 27 transform.a, transform.e)) 28 return transform 29 30 [end of rasterio/transform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/transform.py b/rasterio/transform.py --- a/rasterio/transform.py +++ b/rasterio/transform.py @@ -1,12 +1,14 @@ - import warnings from affine import Affine IDENTITY = Affine.identity() -def tastes_like_gdal(t): - return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0 + +def tastes_like_gdal(seq): + """Return True if `seq` matches the GDAL geotransform pattern.""" + return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0 + def guard_transform(transform): """Return an Affine transformation instance""" @@ -20,10 +22,4 @@ transform = Affine.from_gdal(*transform) else: transform = Affine(*transform) - a, e = transform.a, transform.e - if a == 0.0 or e == 0.0: - raise ValueError( - "Transform has invalid coefficients a, e: (%f, %f)" % ( - transform.a, transform.e)) return transform -
{"golden_diff": "diff --git a/rasterio/transform.py b/rasterio/transform.py\n--- a/rasterio/transform.py\n+++ b/rasterio/transform.py\n@@ -1,12 +1,14 @@\n-\n import warnings\n \n from affine import Affine\n \n IDENTITY = Affine.identity()\n \n-def tastes_like_gdal(t):\n- return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0\n+\n+def tastes_like_gdal(seq):\n+ \"\"\"Return True if `seq` matches the GDAL geotransform pattern.\"\"\"\n+ return seq[2] == seq[4] == 0.0 and seq[1] > 0 and seq[5] < 0\n+\n \n def guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n@@ -20,10 +22,4 @@\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n- a, e = transform.a, transform.e\n- if a == 0.0 or e == 0.0:\n- raise ValueError(\n- \"Transform has invalid coefficients a, e: (%f, %f)\" % (\n- transform.a, transform.e))\n return transform\n-\n", "issue": "guard_transform prevents translation of unprojected files\nWhere by translation I mean the pattern\n\n``` python\nwith rasterio.open(file1.tif) as src:\n with rasterio.open(file2.tif, 'w', **src.meta) as dst:\n ...\n```\n\nTraceback snippet:\n\n```\nTraceback (most recent call last):\n ...\n File \"copier.py\", line 15, in to_png\n with rio.open(dst_path, \"w\", **meta) as dst:\n File \"xxx/anaconda/lib/python2.7/site-packages/rasterio/__init__.py\", line 91, in open\n transform = guard_transform(transform)\n File \"xxx/anaconda/lib/python2.7/site-packages/rasterio/transform.py\", line 27, in guard_transform\n transform.a, transform.e))\nValueError: Transform has invalid coefficients a, e: (0.000000, 0.000000)\n```\n\n", "before_files": [{"content": "\nimport warnings\n\nfrom affine import Affine\n\nIDENTITY = Affine.identity()\n\ndef tastes_like_gdal(t):\n return t[2] == t[4] == 0.0 and t[1] > 0 and t[5] < 0\n\ndef guard_transform(transform):\n \"\"\"Return an Affine transformation instance\"\"\"\n if not isinstance(transform, Affine):\n if tastes_like_gdal(transform):\n warnings.warn(\n \"GDAL-style transforms are deprecated and will not \"\n \"be supported in Rasterio 1.0.\",\n FutureWarning,\n stacklevel=2)\n transform = Affine.from_gdal(*transform)\n else:\n transform = Affine(*transform)\n a, e = transform.a, transform.e\n if a == 0.0 or e == 0.0:\n raise ValueError(\n \"Transform has invalid coefficients a, e: (%f, %f)\" % (\n transform.a, transform.e))\n return transform\n\n", "path": "rasterio/transform.py"}]}
1,011
287
gh_patches_debug_9370
rasdani/github-patches
git_diff
liqd__a4-product-360
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [f2f module] wording Here are the wordings: 1 Edit face-to-face participation information Informationen zur Vor-Ort-Beteiligung bearbeiten 2 Title Titel 3 Highlighted Info Hervorgehobene Information 3a (Hilfetext) Highlight important information like the time or location of your face-to-face event Zur Hervorhebung von wichtigen Informationen wie Ort oder Zeitraum der Vor-Ort-Beteiligung 4 Description Beschreibung 5 Face-to-Face Information Informationen Vor-Ort-Beteiligung 6 Face-to-Face Participation Vor-Ort-Beteiligung 7 With this module you can provide information about events or phases for face-to-face participation. No online participation is possible in this module. Mit diesem Modul können Informationen über Veranstaltungen und Phasen zur Vor-Ort-Beteiligung bereitgestellt werden. In diesem Modul ist keine Online-Beteiligung möglich. 8 Phase 1: Provide information about face-to-face participation events Phase 1: Informationen zur Vor-Ort-Beteiligung bereitstellen ![f2f-screenshot_1](https://user-images.githubusercontent.com/15341015/39699926-9f0fe034-51fb-11e8-951f-9c8cdbae013f.png) ![f2f-screenshot_2](https://user-images.githubusercontent.com/15341015/39699932-a4223c98-51fb-11e8-9642-11eac8aee6ea.png) </issue> <code> [start of liqd_product/apps/dashboard/blueprints.py] 1 from django.utils.translation import ugettext_lazy as _ 2 3 from adhocracy4.dashboard.blueprints import ProjectBlueprint 4 from meinberlin.apps.activities import phases as activities_phases 5 from meinberlin.apps.budgeting import phases as budgeting_phases 6 from meinberlin.apps.documents import phases as documents_phases 7 from meinberlin.apps.ideas import phases as ideas_phases 8 from meinberlin.apps.mapideas import phases as mapideas_phases 9 from meinberlin.apps.polls import phases as poll_phases 10 11 blueprints = [ 12 ('brainstorming', 13 ProjectBlueprint( 14 title=_('Brainstorming'), 15 description=_( 16 'Collect first ideas for a specific topic and comment on them.' 17 ), 18 content=[ 19 ideas_phases.CollectPhase(), 20 ], 21 image='images/brainstorming.svg', 22 settings_model=None, 23 )), 24 ('map-brainstorming', 25 ProjectBlueprint( 26 title=_('Spatial Brainstorming'), 27 description=_( 28 'Collect location specific ideas for a topic and comment on them.' 29 ), 30 content=[ 31 mapideas_phases.CollectPhase(), 32 ], 33 image='images/map-brainstorming.svg', 34 settings_model=('a4maps', 'AreaSettings'), 35 )), 36 ('map-idea-collection', 37 ProjectBlueprint( 38 title=_('Spatial Idea Collection'), 39 description=_( 40 'Collect location specific ideas that can be rated and commented.' 41 ), 42 content=[ 43 mapideas_phases.CollectPhase(), 44 mapideas_phases.RatingPhase() 45 ], 46 image='images/map-idea-collection.svg', 47 settings_model=('a4maps', 'AreaSettings'), 48 )), 49 ('agenda-setting', 50 ProjectBlueprint( 51 title=_('Agenda Setting'), 52 description=_( 53 'With Agenda-Setting it’s possible to identify topics and to ' 54 'define mission statements. Afterwards anyone can comment and ' 55 'rate on different topics.' 56 ), 57 content=[ 58 ideas_phases.CollectPhase(), 59 ideas_phases.RatingPhase(), 60 ], 61 image='images/agenda-setting.svg', 62 settings_model=None, 63 )), 64 ('text-review', 65 ProjectBlueprint( 66 title=_('Text Review'), 67 description=_( 68 'In the text-review it’s possible to structure draft texts ' 69 'that can be commented.' 70 ), 71 content=[ 72 documents_phases.CommentPhase(), 73 ], 74 image='images/text-review.svg', 75 settings_model=None, 76 )), 77 ('poll', 78 ProjectBlueprint( 79 title=_('Poll'), 80 description=_( 81 'Create a poll with multiple questions and possible answers. ' 82 'Anyone can cast votes and comment on the poll.' 83 ), 84 content=[ 85 poll_phases.VotingPhase(), 86 ], 87 image='images/poll.svg', 88 settings_model=None, 89 )), 90 ('participatory-budgeting', 91 ProjectBlueprint( 92 title=_('Participatory budgeting'), 93 description=_( 94 'With participatory-budgeting it’s possible to make proposals ' 95 'with budget specifications and locate them. Anyone can comment ' 96 'and rate on different proposals.'), 97 content=[ 98 budgeting_phases.RequestPhase()], 99 image='images/participatory-budgeting.svg', 100 settings_model=('a4maps', 'AreaSettings'), 101 )), 102 ('facetoface', 103 ProjectBlueprint( 104 title=_('Face to Face Participation'), 105 description=_( 106 'Share info about a face to face participation event.' 107 ), 108 content=[ 109 activities_phases.FaceToFacePhase(), 110 ], 111 image='images/facetoface.svg', 112 settings_model=None, 113 )), 114 ] 115 [end of liqd_product/apps/dashboard/blueprints.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liqd_product/apps/dashboard/blueprints.py b/liqd_product/apps/dashboard/blueprints.py --- a/liqd_product/apps/dashboard/blueprints.py +++ b/liqd_product/apps/dashboard/blueprints.py @@ -101,9 +101,11 @@ )), ('facetoface', ProjectBlueprint( - title=_('Face to Face Participation'), + title=_('Face-to-Face Participation'), description=_( - 'Share info about a face to face participation event.' + 'With this module you can provide information about events or ' + 'phases for face-to-face participation. No online participation ' + 'is possible in this module.' ), content=[ activities_phases.FaceToFacePhase(),
{"golden_diff": "diff --git a/liqd_product/apps/dashboard/blueprints.py b/liqd_product/apps/dashboard/blueprints.py\n--- a/liqd_product/apps/dashboard/blueprints.py\n+++ b/liqd_product/apps/dashboard/blueprints.py\n@@ -101,9 +101,11 @@\n )),\n ('facetoface',\n ProjectBlueprint(\n- title=_('Face to Face Participation'),\n+ title=_('Face-to-Face Participation'),\n description=_(\n- 'Share info about a face to face participation event.'\n+ 'With this module you can provide information about events or '\n+ 'phases for face-to-face participation. No online participation '\n+ 'is possible in this module.'\n ),\n content=[\n activities_phases.FaceToFacePhase(),\n", "issue": "[f2f module] wording\nHere are the wordings:\r\n\r\n1 \r\nEdit face-to-face participation information\r\nInformationen zur Vor-Ort-Beteiligung bearbeiten\r\n\r\n2\r\nTitle\r\nTitel\r\n\r\n3\r\nHighlighted Info\r\nHervorgehobene Information\r\n\r\n3a (Hilfetext)\r\nHighlight important information like the time or location of your face-to-face event\r\nZur Hervorhebung von wichtigen Informationen wie Ort oder Zeitraum der Vor-Ort-Beteiligung\r\n\r\n4\r\nDescription\r\nBeschreibung\r\n\r\n5\r\nFace-to-Face Information\r\nInformationen Vor-Ort-Beteiligung\r\n\r\n6\r\nFace-to-Face Participation\r\nVor-Ort-Beteiligung\r\n\r\n7\r\nWith this module you can provide information about events or phases for face-to-face participation. No online participation is possible in this module.\r\nMit diesem Modul k\u00f6nnen Informationen \u00fcber Veranstaltungen und Phasen zur Vor-Ort-Beteiligung bereitgestellt werden. In diesem Modul ist keine Online-Beteiligung m\u00f6glich.\r\n\r\n8\r\nPhase 1: Provide information about face-to-face participation events\r\nPhase 1: Informationen zur Vor-Ort-Beteiligung bereitstellen\r\n\r\n![f2f-screenshot_1](https://user-images.githubusercontent.com/15341015/39699926-9f0fe034-51fb-11e8-951f-9c8cdbae013f.png)\r\n![f2f-screenshot_2](https://user-images.githubusercontent.com/15341015/39699932-a4223c98-51fb-11e8-9642-11eac8aee6ea.png)\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.activities import phases as activities_phases\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.polls import phases as poll_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n mapideas_phases.RatingPhase()\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it\u2019s possible to identify topics and to '\n 'define mission statements. Afterwards anyone can comment and '\n 'rate on different topics.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it\u2019s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it\u2019s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'),\n content=[\n budgeting_phases.RequestPhase()],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('facetoface',\n ProjectBlueprint(\n title=_('Face to Face Participation'),\n description=_(\n 'Share info about a face to face participation event.'\n ),\n content=[\n activities_phases.FaceToFacePhase(),\n ],\n image='images/facetoface.svg',\n settings_model=None,\n )),\n]\n", "path": "liqd_product/apps/dashboard/blueprints.py"}]}
1,911
160
gh_patches_debug_28372
rasdani/github-patches
git_diff
pypa__setuptools-4283
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deduplicate testing dependencies by referencing `[testing-integration]` <!-- First time contributors: Take a moment to review https://setuptools.pypa.io/en/latest/development/developer-guide.html! --> <!-- Remove sections if not applicable --> ## Summary of changes This was my original idea, but in https://github.com/pypa/setuptools/pull/4257#discussion_r1514754970, @abravalheri mentioned that: > [...] I think we just remove the `testing-integration` and use `testing` everywhere... > > Although it might add a bit of overhead for the integration tests, it will simplify and streamline the setup... This way we reduce the divergence with `skeleton`. The additional overhead in the integration tests should not be too much, and the integration tests just run before the releases anyway. But I'm still opening this PR as an option / for comparison. Accepting this closes #4282 ### Pull Request Checklist - [x] Changes have tests (these are test changes) - [x] News fragment added in [`newsfragments/`]. (no user facing changes) _(See [documentation][PR docs] for details)_ [`newsfragments/`]: https://github.com/pypa/setuptools/tree/master/newsfragments [PR docs]: https://setuptools.pypa.io/en/latest/development/developer-guide.html#making-a-pull-request </issue> <code> [start of conftest.py] 1 import sys 2 3 import pytest 4 5 6 pytest_plugins = 'setuptools.tests.fixtures' 7 8 9 def pytest_addoption(parser): 10 parser.addoption( 11 "--package_name", 12 action="append", 13 default=[], 14 help="list of package_name to pass to test functions", 15 ) 16 parser.addoption( 17 "--integration", 18 action="store_true", 19 default=False, 20 help="run integration tests (only)", 21 ) 22 23 24 def pytest_configure(config): 25 config.addinivalue_line("markers", "integration: integration tests") 26 config.addinivalue_line("markers", "uses_network: tests may try to download files") 27 28 29 collect_ignore = [ 30 'tests/manual_test.py', 31 'setuptools/tests/mod_with_constant.py', 32 'setuptools/_distutils', 33 '_distutils_hack', 34 'setuptools/extern', 35 'pkg_resources/extern', 36 'pkg_resources/tests/data', 37 'setuptools/_vendor', 38 'pkg_resources/_vendor', 39 'setuptools/config/_validate_pyproject', 40 'setuptools/modified.py', 41 ] 42 43 44 if sys.version_info < (3, 9) or sys.platform == 'cygwin': 45 collect_ignore.append('tools/finalize.py') 46 47 48 @pytest.fixture(autouse=True) 49 def _skip_integration(request): 50 running_integration_tests = request.config.getoption("--integration") 51 is_integration_test = request.node.get_closest_marker("integration") 52 if running_integration_tests and not is_integration_test: 53 pytest.skip("running integration tests only") 54 if not running_integration_tests and is_integration_test: 55 pytest.skip("skipping integration tests") 56 [end of conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conftest.py b/conftest.py --- a/conftest.py +++ b/conftest.py @@ -24,6 +24,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "integration: integration tests") config.addinivalue_line("markers", "uses_network: tests may try to download files") + _IntegrationTestSpeedups.disable_plugins_already_run(config) collect_ignore = [ @@ -47,9 +48,25 @@ @pytest.fixture(autouse=True) def _skip_integration(request): - running_integration_tests = request.config.getoption("--integration") - is_integration_test = request.node.get_closest_marker("integration") - if running_integration_tests and not is_integration_test: - pytest.skip("running integration tests only") - if not running_integration_tests and is_integration_test: - pytest.skip("skipping integration tests") + _IntegrationTestSpeedups.conditional_skip(request) + + +class _IntegrationTestSpeedups: + """Speed-up integration tests by only running what does not run in other tests.""" + + RUNS_ON_NORMAL_TESTS = ("checkdocks", "cov", "mypy", "perf", "ruff") + + @classmethod + def disable_plugins_already_run(cls, config): + if config.getoption("--integration"): + for plugin in cls.RUNS_ON_NORMAL_TESTS: # no need to run again + config.pluginmanager.set_blocked(plugin) + + @staticmethod + def conditional_skip(request): + running_integration_tests = request.config.getoption("--integration") + is_integration_test = request.node.get_closest_marker("integration") + if running_integration_tests and not is_integration_test: + pytest.skip("running integration tests only") + if not running_integration_tests and is_integration_test: + pytest.skip("skipping integration tests")
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -24,6 +24,7 @@\n def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"integration: integration tests\")\n config.addinivalue_line(\"markers\", \"uses_network: tests may try to download files\")\n+ _IntegrationTestSpeedups.disable_plugins_already_run(config)\n \n \n collect_ignore = [\n@@ -47,9 +48,25 @@\n \n @pytest.fixture(autouse=True)\n def _skip_integration(request):\n- running_integration_tests = request.config.getoption(\"--integration\")\n- is_integration_test = request.node.get_closest_marker(\"integration\")\n- if running_integration_tests and not is_integration_test:\n- pytest.skip(\"running integration tests only\")\n- if not running_integration_tests and is_integration_test:\n- pytest.skip(\"skipping integration tests\")\n+ _IntegrationTestSpeedups.conditional_skip(request)\n+\n+\n+class _IntegrationTestSpeedups:\n+ \"\"\"Speed-up integration tests by only running what does not run in other tests.\"\"\"\n+\n+ RUNS_ON_NORMAL_TESTS = (\"checkdocks\", \"cov\", \"mypy\", \"perf\", \"ruff\")\n+\n+ @classmethod\n+ def disable_plugins_already_run(cls, config):\n+ if config.getoption(\"--integration\"):\n+ for plugin in cls.RUNS_ON_NORMAL_TESTS: # no need to run again\n+ config.pluginmanager.set_blocked(plugin)\n+\n+ @staticmethod\n+ def conditional_skip(request):\n+ running_integration_tests = request.config.getoption(\"--integration\")\n+ is_integration_test = request.node.get_closest_marker(\"integration\")\n+ if running_integration_tests and not is_integration_test:\n+ pytest.skip(\"running integration tests only\")\n+ if not running_integration_tests and is_integration_test:\n+ pytest.skip(\"skipping integration tests\")\n", "issue": "Deduplicate testing dependencies by referencing `[testing-integration]`\n<!-- First time contributors: Take a moment to review https://setuptools.pypa.io/en/latest/development/developer-guide.html! -->\r\n<!-- Remove sections if not applicable -->\r\n\r\n## Summary of changes\r\n\r\nThis was my original idea, but in https://github.com/pypa/setuptools/pull/4257#discussion_r1514754970, @abravalheri mentioned that:\r\n> [...] I think we just remove the `testing-integration` and use `testing` everywhere...\r\n> \r\n> Although it might add a bit of overhead for the integration tests, it will simplify and streamline the setup... This way we reduce the divergence with `skeleton`. The additional overhead in the integration tests should not be too much, and the integration tests just run before the releases anyway.\r\n\r\nBut I'm still opening this PR as an option / for comparison.\r\n\r\nAccepting this closes #4282\r\n\r\n### Pull Request Checklist\r\n- [x] Changes have tests (these are test changes)\r\n- [x] News fragment added in [`newsfragments/`]. (no user facing changes)\r\n _(See [documentation][PR docs] for details)_\r\n\r\n\r\n[`newsfragments/`]: https://github.com/pypa/setuptools/tree/master/newsfragments\r\n[PR docs]:\r\nhttps://setuptools.pypa.io/en/latest/development/developer-guide.html#making-a-pull-request\r\n\n", "before_files": [{"content": "import sys\n\nimport pytest\n\n\npytest_plugins = 'setuptools.tests.fixtures'\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--package_name\",\n action=\"append\",\n default=[],\n help=\"list of package_name to pass to test functions\",\n )\n parser.addoption(\n \"--integration\",\n action=\"store_true\",\n default=False,\n help=\"run integration tests (only)\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"integration: integration tests\")\n config.addinivalue_line(\"markers\", \"uses_network: tests may try to download files\")\n\n\ncollect_ignore = [\n 'tests/manual_test.py',\n 'setuptools/tests/mod_with_constant.py',\n 'setuptools/_distutils',\n '_distutils_hack',\n 'setuptools/extern',\n 'pkg_resources/extern',\n 'pkg_resources/tests/data',\n 'setuptools/_vendor',\n 'pkg_resources/_vendor',\n 'setuptools/config/_validate_pyproject',\n 'setuptools/modified.py',\n]\n\n\nif sys.version_info < (3, 9) or sys.platform == 'cygwin':\n collect_ignore.append('tools/finalize.py')\n\n\[email protected](autouse=True)\ndef _skip_integration(request):\n running_integration_tests = request.config.getoption(\"--integration\")\n is_integration_test = request.node.get_closest_marker(\"integration\")\n if running_integration_tests and not is_integration_test:\n pytest.skip(\"running integration tests only\")\n if not running_integration_tests and is_integration_test:\n pytest.skip(\"skipping integration tests\")\n", "path": "conftest.py"}]}
1,283
409
gh_patches_debug_63356
rasdani/github-patches
git_diff
getredash__redash-716
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deleting and re-adding widgets to a dashboard breaks it There's a specific set of steps that has messed up some of our users' dashboards: 1. Create a new dashboard 2. Add multiple widgets to it. 3. Remove all those widgets from the dashboard 4. Re-add some widgets 5. Click the "Edit Dashboard (Name/Layout)" button 6. Click Save without changing anything. 7. Refresh the page This makes none of the widgets appear and causes the "Layout" array in the admin panel to contain one or more "null" values (depending on how many widgets you added/deleted): ![image](https://cloud.githubusercontent.com/assets/844493/11826038/76c749fa-a348-11e5-9f46-bbae43c4b6f6.png) The only way to recover from this state is to manually delete the "null" values through the admin interface. This is on re:dash version 0.8.2 </issue> <code> [start of redash/handlers/widgets.py] 1 import json 2 3 from flask import request 4 5 from redash import models 6 from redash.wsgi import api 7 from redash.permissions import require_permission 8 from redash.handlers.base import BaseResource 9 10 11 class WidgetListAPI(BaseResource): 12 @require_permission('edit_dashboard') 13 def post(self): 14 widget_properties = request.get_json(force=True) 15 widget_properties['options'] = json.dumps(widget_properties['options']) 16 widget_properties.pop('id', None) 17 widget_properties['dashboard'] = widget_properties.pop('dashboard_id') 18 widget_properties['visualization'] = widget_properties.pop('visualization_id') 19 widget = models.Widget(**widget_properties) 20 widget.save() 21 22 layout = json.loads(widget.dashboard.layout) 23 new_row = True 24 25 if len(layout) == 0 or widget.width == 2: 26 layout.append([widget.id]) 27 elif len(layout[-1]) == 1: 28 neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0]) 29 if neighbour_widget.width == 1: 30 layout[-1].append(widget.id) 31 new_row = False 32 else: 33 layout.append([widget.id]) 34 else: 35 layout.append([widget.id]) 36 37 widget.dashboard.layout = json.dumps(layout) 38 widget.dashboard.save() 39 40 return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row} 41 42 43 class WidgetAPI(BaseResource): 44 @require_permission('edit_dashboard') 45 def delete(self, widget_id): 46 widget = models.Widget.get(models.Widget.id == widget_id) 47 widget.delete_instance() 48 49 api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets') 50 api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget') 51 [end of redash/handlers/widgets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py --- a/redash/handlers/widgets.py +++ b/redash/handlers/widgets.py @@ -46,5 +46,7 @@ widget = models.Widget.get(models.Widget.id == widget_id) widget.delete_instance() + return {'layout': widget.dashboard.layout } + api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets') api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')
{"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -46,5 +46,7 @@\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n \n+ return {'layout': widget.dashboard.layout }\n+\n api.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\n api.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "issue": "Deleting and re-adding widgets to a dashboard breaks it\nThere's a specific set of steps that has messed up some of our users' dashboards:\n1. Create a new dashboard\n2. Add multiple widgets to it.\n3. Remove all those widgets from the dashboard\n4. Re-add some widgets\n5. Click the \"Edit Dashboard (Name/Layout)\" button\n6. Click Save without changing anything.\n7. Refresh the page\n\nThis makes none of the widgets appear and causes the \"Layout\" array in the admin panel to contain one or more \"null\" values (depending on how many widgets you added/deleted):\n![image](https://cloud.githubusercontent.com/assets/844493/11826038/76c749fa-a348-11e5-9f46-bbae43c4b6f6.png)\n\nThe only way to recover from this state is to manually delete the \"null\" values through the admin interface.\n\nThis is on re:dash version 0.8.2\n\n", "before_files": [{"content": "import json\n\nfrom flask import request\n\nfrom redash import models\nfrom redash.wsgi import api\nfrom redash.permissions import require_permission\nfrom redash.handlers.base import BaseResource\n\n\nclass WidgetListAPI(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n widget_properties = request.get_json(force=True)\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = widget_properties.pop('dashboard_id')\n widget_properties['visualization'] = widget_properties.pop('visualization_id')\n widget = models.Widget(**widget_properties)\n widget.save()\n\n layout = json.loads(widget.dashboard.layout)\n new_row = True\n\n if len(layout) == 0 or widget.width == 2:\n layout.append([widget.id])\n elif len(layout[-1]) == 1:\n neighbour_widget = models.Widget.get(models.Widget.id == layout[-1][0])\n if neighbour_widget.width == 1:\n layout[-1].append(widget.id)\n new_row = False\n else:\n layout.append([widget.id])\n else:\n layout.append([widget.id])\n\n widget.dashboard.layout = json.dumps(layout)\n widget.dashboard.save()\n\n return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row}\n\n\nclass WidgetAPI(BaseResource):\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n widget = models.Widget.get(models.Widget.id == widget_id)\n widget.delete_instance()\n\napi.add_resource(WidgetListAPI, '/api/widgets', endpoint='widgets')\napi.add_resource(WidgetAPI, '/api/widgets/<int:widget_id>', endpoint='widget')\n", "path": "redash/handlers/widgets.py"}]}
1,221
119
gh_patches_debug_1763
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-802
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve push notification form ### Motivation <!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. --> ### Proposed Solution <!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. --> - [x] Make input field to text area - [x] Show how many characters are remaining - [x] Rename "Push-Notifications" to "News" / "Nachrichten" - [x] Change not sent symbol "x" to text "Message not sent yet" ### Additional Context ![Bildschirmfoto vom 2021-04-13 16-42-11](https://user-images.githubusercontent.com/15608678/114572956-3be7fd00-9c78-11eb-928e-b156602af7ac.png) Files that need some editing for that issue are: - src/cms/templates/push_notifications/push_notification_form.html (for everything related to the HTML-Input Field) - src/cms/locale/de/LC_MESSAGES/django.po (for everything that needs some new translation. Search for "Push Notification" and change the german translation to "Nachricht/Nachrichten" </issue> <code> [start of src/cms/models/push_notifications/push_notification_translation.py] 1 from django.db import models 2 from django.utils import timezone 3 from django.utils.translation import ugettext_lazy as _ 4 5 from ..languages.language import Language 6 from .push_notification import PushNotification 7 8 9 class PushNotificationTranslation(models.Model): 10 """ 11 Data model representing a push notification translation 12 """ 13 14 title = models.CharField( 15 max_length=250, 16 blank=True, 17 verbose_name=_("title"), 18 ) 19 text = models.CharField( 20 max_length=250, 21 blank=True, 22 verbose_name=_("content"), 23 ) 24 language = models.ForeignKey( 25 Language, 26 on_delete=models.CASCADE, 27 related_name="push_notification_translations", 28 verbose_name=_("language"), 29 ) 30 push_notification = models.ForeignKey( 31 PushNotification, 32 on_delete=models.CASCADE, 33 related_name="translations", 34 verbose_name=_("push notification"), 35 ) 36 created_date = models.DateTimeField( 37 default=timezone.now, 38 verbose_name=_("creation date"), 39 ) 40 last_updated = models.DateTimeField( 41 auto_now=True, 42 verbose_name=_("modification date"), 43 ) 44 45 def __str__(self): 46 """ 47 This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``PushNotificationTranslation object (id)``. 48 It is used in the Django admin backend and as label for ModelChoiceFields. 49 50 :return: A readable string representation of the event 51 :rtype: str 52 """ 53 return self.title 54 55 def __repr__(self): 56 """ 57 This overwrites the default Django ``__repr__()`` method which would return ``<PushNotificationTranslation: PushNotificationTranslation object (id)>``. 58 It is used for logging. 59 60 :return: The canonical string representation of the event 61 :rtype: str 62 """ 63 return f"<PushNotificationTranslation (id: {self.id}, push_notification_id: {self.push_notification.id}, title: {self.title})>" 64 65 class Meta: 66 #: The verbose name of the model 67 verbose_name = _("push notification translation") 68 #: The plural verbose name of the model 69 verbose_name_plural = _("push notification translations") 70 #: The default permissions for this model 71 default_permissions = () 72 #: Sets of field names that, taken together, must be unique 73 unique_together = ["push_notification", "language"] 74 [end of src/cms/models/push_notifications/push_notification_translation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cms/models/push_notifications/push_notification_translation.py b/src/cms/models/push_notifications/push_notification_translation.py --- a/src/cms/models/push_notifications/push_notification_translation.py +++ b/src/cms/models/push_notifications/push_notification_translation.py @@ -16,7 +16,7 @@ blank=True, verbose_name=_("title"), ) - text = models.CharField( + text = models.TextField( max_length=250, blank=True, verbose_name=_("content"),
{"golden_diff": "diff --git a/src/cms/models/push_notifications/push_notification_translation.py b/src/cms/models/push_notifications/push_notification_translation.py\n--- a/src/cms/models/push_notifications/push_notification_translation.py\n+++ b/src/cms/models/push_notifications/push_notification_translation.py\n@@ -16,7 +16,7 @@\n blank=True,\n verbose_name=_(\"title\"),\n )\n- text = models.CharField(\n+ text = models.TextField(\n max_length=250,\n blank=True,\n verbose_name=_(\"content\"),\n", "issue": "Improve push notification form\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\n\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\n- [x] Make input field to text area\r\n- [x] Show how many characters are remaining\r\n- [x] Rename \"Push-Notifications\" to \"News\" / \"Nachrichten\"\r\n- [x] Change not sent symbol \"x\" to text \"Message not sent yet\"\r\n\r\n\r\n### Additional Context\r\n![Bildschirmfoto vom 2021-04-13 16-42-11](https://user-images.githubusercontent.com/15608678/114572956-3be7fd00-9c78-11eb-928e-b156602af7ac.png)\r\n\r\nFiles that need some editing for that issue are:\r\n\r\n- src/cms/templates/push_notifications/push_notification_form.html (for everything related to the HTML-Input Field)\r\n- src/cms/locale/de/LC_MESSAGES/django.po (for everything that needs some new translation. Search for \"Push Notification\" and change the german translation to \"Nachricht/Nachrichten\"\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..languages.language import Language\nfrom .push_notification import PushNotification\n\n\nclass PushNotificationTranslation(models.Model):\n \"\"\"\n Data model representing a push notification translation\n \"\"\"\n\n title = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_(\"title\"),\n )\n text = models.CharField(\n max_length=250,\n blank=True,\n verbose_name=_(\"content\"),\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.CASCADE,\n related_name=\"push_notification_translations\",\n verbose_name=_(\"language\"),\n )\n push_notification = models.ForeignKey(\n PushNotification,\n on_delete=models.CASCADE,\n related_name=\"translations\",\n verbose_name=_(\"push notification\"),\n )\n created_date = models.DateTimeField(\n default=timezone.now,\n verbose_name=_(\"creation date\"),\n )\n last_updated = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"modification date\"),\n )\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``PushNotificationTranslation object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the event\n :rtype: str\n \"\"\"\n return self.title\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<PushNotificationTranslation: PushNotificationTranslation object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the event\n :rtype: str\n \"\"\"\n return f\"<PushNotificationTranslation (id: {self.id}, push_notification_id: {self.push_notification.id}, title: {self.title})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"push notification translation\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"push notification translations\")\n #: The default permissions for this model\n default_permissions = ()\n #: Sets of field names that, taken together, must be unique\n unique_together = [\"push_notification\", \"language\"]\n", "path": "src/cms/models/push_notifications/push_notification_translation.py"}]}
1,465
113
gh_patches_debug_3531
rasdani/github-patches
git_diff
web2py__web2py-928
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typo in models/db.py for mail server Hi, I found a small typo, which prohibits sending mails in models/db.py line 65: mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender') should be (smtp.server instead of smtp.sender): mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server') thanks </issue> <code> [start of applications/welcome/models/db.py] 1 # -*- coding: utf-8 -*- 2 3 ######################################################################### 4 ## This scaffolding model makes your app work on Google App Engine too 5 ## File is released under public domain and you can use without limitations 6 ######################################################################### 7 8 ## if SSL/HTTPS is properly configured and you want all HTTP requests to 9 ## be redirected to HTTPS, uncomment the line below: 10 # request.requires_https() 11 12 ## app configuration made easy. Look inside private/appconfig.ini 13 from gluon.contrib.appconfig import AppConfig 14 ## once in production, remove reload=True to gain full speed 15 myconf = AppConfig(reload=True) 16 17 18 if not request.env.web2py_runtime_gae: 19 ## if NOT running on Google App Engine use SQLite or other DB 20 db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all']) 21 else: 22 ## connect to Google BigTable (optional 'google:datastore://namespace') 23 db = DAL('google:datastore+ndb') 24 ## store sessions and tickets there 25 session.connect(request, response, db=db) 26 ## or store session in Memcache, Redis, etc. 27 ## from gluon.contrib.memdb import MEMDB 28 ## from google.appengine.api.memcache import Client 29 ## session.connect(request, response, db = MEMDB(Client())) 30 31 ## by default give a view/generic.extension to all actions from localhost 32 ## none otherwise. a pattern can be 'controller/function.extension' 33 response.generic_patterns = ['*'] if request.is_local else [] 34 ## choose a style for forms 35 response.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other 36 response.form_label_separator = myconf.take('forms.separator') 37 38 39 ## (optional) optimize handling of static files 40 # response.optimize_css = 'concat,minify,inline' 41 # response.optimize_js = 'concat,minify,inline' 42 ## (optional) static assets folder versioning 43 # response.static_version = '0.0.0' 44 ######################################################################### 45 ## Here is sample code if you need for 46 ## - email capabilities 47 ## - authentication (registration, login, logout, ... ) 48 ## - authorization (role based authorization) 49 ## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss) 50 ## - old style crud actions 51 ## (more options discussed in gluon/tools.py) 52 ######################################################################### 53 54 from gluon.tools import Auth, Service, PluginManager 55 56 auth = Auth(db) 57 service = Service() 58 plugins = PluginManager() 59 60 ## create all tables needed by auth if not custom tables 61 auth.define_tables(username=False, signature=False) 62 63 ## configure email 64 mail = auth.settings.mailer 65 mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender') 66 mail.settings.sender = myconf.take('smtp.sender') 67 mail.settings.login = myconf.take('smtp.login') 68 69 ## configure auth policy 70 auth.settings.registration_requires_verification = False 71 auth.settings.registration_requires_approval = False 72 auth.settings.reset_password_requires_verification = True 73 74 ######################################################################### 75 ## Define your tables below (or better in another model file) for example 76 ## 77 ## >>> db.define_table('mytable',Field('myfield','string')) 78 ## 79 ## Fields can be 'string','text','password','integer','double','boolean' 80 ## 'date','time','datetime','blob','upload', 'reference TABLENAME' 81 ## There is an implicit 'id integer autoincrement' field 82 ## Consult manual for more options, validators, etc. 83 ## 84 ## More API examples for controllers: 85 ## 86 ## >>> db.mytable.insert(myfield='value') 87 ## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL) 88 ## >>> for row in rows: print row.id, row.myfield 89 ######################################################################### 90 91 ## after defining tables, uncomment below to enable auditing 92 # auth.enable_record_versioning(db) 93 [end of applications/welcome/models/db.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/applications/welcome/models/db.py b/applications/welcome/models/db.py --- a/applications/welcome/models/db.py +++ b/applications/welcome/models/db.py @@ -62,7 +62,7 @@ ## configure email mail = auth.settings.mailer -mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender') +mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server') mail.settings.sender = myconf.take('smtp.sender') mail.settings.login = myconf.take('smtp.login')
{"golden_diff": "diff --git a/applications/welcome/models/db.py b/applications/welcome/models/db.py\n--- a/applications/welcome/models/db.py\n+++ b/applications/welcome/models/db.py\n@@ -62,7 +62,7 @@\n \n ## configure email\n mail = auth.settings.mailer\n-mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')\n+mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')\n mail.settings.sender = myconf.take('smtp.sender')\n mail.settings.login = myconf.take('smtp.login')\n", "issue": "Typo in models/db.py for mail server\nHi, \n\nI found a small typo, which prohibits sending mails in models/db.py line 65:\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')\n\nshould be (smtp.server instead of smtp.sender):\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server') \n\nthanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n#########################################################################\n## This scaffolding model makes your app work on Google App Engine too\n## File is released under public domain and you can use without limitations\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\n\n## app configuration made easy. Look inside private/appconfig.ini\nfrom gluon.contrib.appconfig import AppConfig\n## once in production, remove reload=True to gain full speed\nmyconf = AppConfig(reload=True)\n\n\nif not request.env.web2py_runtime_gae:\n ## if NOT running on Google App Engine use SQLite or other DB\n db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])\nelse:\n ## connect to Google BigTable (optional 'google:datastore://namespace')\n db = DAL('google:datastore+ndb')\n ## store sessions and tickets there\n session.connect(request, response, db=db)\n ## or store session in Memcache, Redis, etc.\n ## from gluon.contrib.memdb import MEMDB\n ## from google.appengine.api.memcache import Client\n ## session.connect(request, response, db = MEMDB(Client()))\n\n## by default give a view/generic.extension to all actions from localhost\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n## choose a style for forms\nresponse.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other\nresponse.form_label_separator = myconf.take('forms.separator')\n\n\n## (optional) optimize handling of static files\n# response.optimize_css = 'concat,minify,inline'\n# response.optimize_js = 'concat,minify,inline'\n## (optional) static assets folder versioning\n# response.static_version = '0.0.0'\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Service, PluginManager\n\nauth = Auth(db)\nservice = Service()\nplugins = PluginManager()\n\n## create all tables needed by auth if not custom tables\nauth.define_tables(username=False, signature=False)\n\n## configure email\nmail = auth.settings.mailer\nmail.settings.server = 'logging' if request.is_local else myconf.take('smtp.sender')\nmail.settings.sender = myconf.take('smtp.sender')\nmail.settings.login = myconf.take('smtp.login')\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\n## after defining tables, uncomment below to enable auditing\n# auth.enable_record_versioning(db)\n", "path": "applications/welcome/models/db.py"}]}
1,602
122
gh_patches_debug_13859
rasdani/github-patches
git_diff
bridgecrewio__checkov-5413
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_299 Fails for Values That Can't be Set **Describe the issue** Failing on - Check: CKV_AWS_299: "Ensure DMS S3 defines in-transit encryption" When the endpoint of a DMS Migration Task has a Endpoint that is S3, there is no option in AWS to specify ssl_mode. AWS does not have an ssl_mode for DMS Endpoints of type S3. Setting this value on extra connections does nothing. Also, setting this in Terraform does nothing. I may be crazy, but I think DMS transfers to S3 uses SSL by default. **Examples** ``` from __future__ import annotations from typing import Any from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck from checkov.common.models.enums import CheckCategories class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck): def __init__(self) -> None: name = "Ensure DMS S3 defines in-transit encryption" id = "CKV_AWS_299" supported_resources = ("aws_dms_s3_endpoint",) categories = (CheckCategories.ENCRYPTION,) super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def get_inspected_key(self) -> str: return 'ssl_mode' def get_expected_values(self) -> list[Any]: return ["require", "verify-ca", "verify-full"] check = DMSS3DefinesIntransitEncryption() ``` **Version (please complete the following information):** - Checkov Version 2.3.343 **Additional context** This is property does nothing in terraform when the aws_dme_enpoint is s3 and I file and issue over there, however, it was closed with no fix. </issue> <code> [start of checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py] 1 from __future__ import annotations 2 3 from typing import Any 4 5 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 6 from checkov.common.models.enums import CheckCategories 7 8 9 class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck): 10 def __init__(self) -> None: 11 name = "Ensure DMS S3 defines in-transit encryption" 12 id = "CKV_AWS_299" 13 supported_resources = ("aws_dms_s3_endpoint",) 14 categories = (CheckCategories.ENCRYPTION,) 15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 16 17 def get_inspected_key(self) -> str: 18 return 'ssl_mode' 19 20 def get_expected_values(self) -> list[Any]: 21 return ["require", "verify-ca", "verify-full"] 22 23 24 check = DMSS3DefinesIntransitEncryption() 25 [end of checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py b/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py deleted file mode 100644 --- a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck -from checkov.common.models.enums import CheckCategories - - -class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck): - def __init__(self) -> None: - name = "Ensure DMS S3 defines in-transit encryption" - id = "CKV_AWS_299" - supported_resources = ("aws_dms_s3_endpoint",) - categories = (CheckCategories.ENCRYPTION,) - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - - def get_inspected_key(self) -> str: - return 'ssl_mode' - - def get_expected_values(self) -> list[Any]: - return ["require", "verify-ca", "verify-full"] - - -check = DMSS3DefinesIntransitEncryption()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py b/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-from __future__ import annotations\n-\n-from typing import Any\n-\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):\n- def __init__(self) -> None:\n- name = \"Ensure DMS S3 defines in-transit encryption\"\n- id = \"CKV_AWS_299\"\n- supported_resources = (\"aws_dms_s3_endpoint\",)\n- categories = (CheckCategories.ENCRYPTION,)\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self) -> str:\n- return 'ssl_mode'\n-\n- def get_expected_values(self) -> list[Any]:\n- return [\"require\", \"verify-ca\", \"verify-full\"]\n-\n-\n-check = DMSS3DefinesIntransitEncryption()\n", "issue": "CKV_AWS_299 Fails for Values That Can't be Set\n**Describe the issue**\r\nFailing on - Check: CKV_AWS_299: \"Ensure DMS S3 defines in-transit encryption\"\r\n\r\nWhen the endpoint of a DMS Migration Task has a Endpoint that is S3, there is no option in AWS to specify ssl_mode. AWS does not have an ssl_mode for DMS Endpoints of type S3. Setting this value on extra connections does nothing. Also, setting this in Terraform does nothing. I may be crazy, but I think DMS transfers to S3 uses SSL by default.\r\n\r\n**Examples**\r\n```\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Any\r\n\r\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\r\nfrom checkov.common.models.enums import CheckCategories\r\n\r\n\r\nclass DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):\r\n def __init__(self) -> None:\r\n name = \"Ensure DMS S3 defines in-transit encryption\"\r\n id = \"CKV_AWS_299\"\r\n supported_resources = (\"aws_dms_s3_endpoint\",)\r\n categories = (CheckCategories.ENCRYPTION,)\r\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\r\n\r\n def get_inspected_key(self) -> str:\r\n return 'ssl_mode'\r\n\r\n def get_expected_values(self) -> list[Any]:\r\n return [\"require\", \"verify-ca\", \"verify-full\"]\r\n\r\n\r\ncheck = DMSS3DefinesIntransitEncryption()\r\n\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.343\r\n\r\n**Additional context**\r\n\r\nThis is property does nothing in terraform when the aws_dme_enpoint is s3 and I file and issue over there, however, it was closed with no fix.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):\n def __init__(self) -> None:\n name = \"Ensure DMS S3 defines in-transit encryption\"\n id = \"CKV_AWS_299\"\n supported_resources = (\"aws_dms_s3_endpoint\",)\n categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return 'ssl_mode'\n\n def get_expected_values(self) -> list[Any]:\n return [\"require\", \"verify-ca\", \"verify-full\"]\n\n\ncheck = DMSS3DefinesIntransitEncryption()\n", "path": "checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py"}]}
1,192
297
gh_patches_debug_27488
rasdani/github-patches
git_diff
coala__coala-2732
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WIP: JuliaRequirement: Add installation methods </issue> <code> [start of coalib/bears/requirements/JuliaRequirement.py] 1 from coalib.bears.requirements.PackageRequirement import PackageRequirement 2 3 4 class JuliaRequirement(PackageRequirement): 5 """ 6 This class is a subclass of ``PackageRequirement``, and helps specifying 7 requirements from ``julia``, without using the manager name. 8 """ 9 10 def __init__(self, package, version=""): 11 """ 12 Constructs a new ``JuliaRequirement``, using the ``PackageRequirement`` 13 constructor. 14 15 >>> pr = JuliaRequirement('Lint', '19.2') 16 >>> pr.manager 17 'julia' 18 >>> pr.package 19 'Lint' 20 >>> pr.version 21 '19.2' 22 23 :param package: A string with the name of the package to be installed. 24 :param version: A version string. Leave empty to specify latest version. 25 """ 26 PackageRequirement.__init__(self, 'julia', package, version) 27 [end of coalib/bears/requirements/JuliaRequirement.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/coalib/bears/requirements/JuliaRequirement.py b/coalib/bears/requirements/JuliaRequirement.py --- a/coalib/bears/requirements/JuliaRequirement.py +++ b/coalib/bears/requirements/JuliaRequirement.py @@ -1,4 +1,9 @@ +import shlex + from coalib.bears.requirements.PackageRequirement import PackageRequirement +from coalib.misc.Shell import call_without_output + +from coala_utils.string_processing import escape class JuliaRequirement(PackageRequirement): @@ -24,3 +29,31 @@ :param version: A version string. Leave empty to specify latest version. """ PackageRequirement.__init__(self, 'julia', package, version) + + def install_command(self): + """ + Creates the installation command for the instance of the class. + + >>> JuliaRequirement('Lint').install_command() + 'julia -e \\'Pkg.add("Lint")\\'' + + :return: A string with the installation command. + """ + code = 'Pkg.add("{}")'.format(escape(self.package, '\\"')) + args = ('julia', '-e', shlex.quote(code)) + return ' '.join(args) + + def is_installed(self): + """ + Checks if the dependency is installed. + + :return: ``True`` if dependency is installed, ``False`` otherwise. + """ + # We need to check explicitly if `nothing` is returned, as this happens + # when the package is *registered*, but *not installed*. If it's not + # even registered, julia will throw an exception which lets julia exit + # with an error code different from 0. + code = 'Pkg.installed("{}")==nothing?exit(1):exit(0)'.format( + escape(self.package, '\\"')) + args = ('julia', '-e', code) + return not call_without_output(args)
{"golden_diff": "diff --git a/coalib/bears/requirements/JuliaRequirement.py b/coalib/bears/requirements/JuliaRequirement.py\n--- a/coalib/bears/requirements/JuliaRequirement.py\n+++ b/coalib/bears/requirements/JuliaRequirement.py\n@@ -1,4 +1,9 @@\n+import shlex\n+\n from coalib.bears.requirements.PackageRequirement import PackageRequirement\n+from coalib.misc.Shell import call_without_output\n+\n+from coala_utils.string_processing import escape\n \n \n class JuliaRequirement(PackageRequirement):\n@@ -24,3 +29,31 @@\n :param version: A version string. Leave empty to specify latest version.\n \"\"\"\n PackageRequirement.__init__(self, 'julia', package, version)\n+\n+ def install_command(self):\n+ \"\"\"\n+ Creates the installation command for the instance of the class.\n+\n+ >>> JuliaRequirement('Lint').install_command()\n+ 'julia -e \\\\'Pkg.add(\"Lint\")\\\\''\n+\n+ :return: A string with the installation command.\n+ \"\"\"\n+ code = 'Pkg.add(\"{}\")'.format(escape(self.package, '\\\\\"'))\n+ args = ('julia', '-e', shlex.quote(code))\n+ return ' '.join(args)\n+\n+ def is_installed(self):\n+ \"\"\"\n+ Checks if the dependency is installed.\n+\n+ :return: ``True`` if dependency is installed, ``False`` otherwise.\n+ \"\"\"\n+ # We need to check explicitly if `nothing` is returned, as this happens\n+ # when the package is *registered*, but *not installed*. If it's not\n+ # even registered, julia will throw an exception which lets julia exit\n+ # with an error code different from 0.\n+ code = 'Pkg.installed(\"{}\")==nothing?exit(1):exit(0)'.format(\n+ escape(self.package, '\\\\\"'))\n+ args = ('julia', '-e', code)\n+ return not call_without_output(args)\n", "issue": "WIP: JuliaRequirement: Add installation methods\n\n", "before_files": [{"content": "from coalib.bears.requirements.PackageRequirement import PackageRequirement\n\n\nclass JuliaRequirement(PackageRequirement):\n \"\"\"\n This class is a subclass of ``PackageRequirement``, and helps specifying\n requirements from ``julia``, without using the manager name.\n \"\"\"\n\n def __init__(self, package, version=\"\"):\n \"\"\"\n Constructs a new ``JuliaRequirement``, using the ``PackageRequirement``\n constructor.\n\n >>> pr = JuliaRequirement('Lint', '19.2')\n >>> pr.manager\n 'julia'\n >>> pr.package\n 'Lint'\n >>> pr.version\n '19.2'\n\n :param package: A string with the name of the package to be installed.\n :param version: A version string. Leave empty to specify latest version.\n \"\"\"\n PackageRequirement.__init__(self, 'julia', package, version)\n", "path": "coalib/bears/requirements/JuliaRequirement.py"}]}
789
447
gh_patches_debug_8769
rasdani/github-patches
git_diff
translate__translate-3429
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Multistring doesnt work as a python2 string Previously you were able to so ~~`str(multistring("foo"))`~~ but it seems this no longer works. EDIT: apologies that was a bad example it should have read something like `str(multistring("é"))` Im wondering if we should add py2 compat similar to the one in django (https://docs.djangoproject.com/en/1.9/_modules/django/utils/six/#python_2_unicode_compatible) @claudep any thoughts on this? </issue> <code> [start of translate/misc/multistring.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2006 Zuza Software Foundation 4 # 5 # This file is part of translate. 6 # 7 # translate is free software; you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation; either version 2 of the License, or 10 # (at your option) any later version. 11 # 12 # translate is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with this program; if not, see <http://www.gnu.org/licenses/>. 19 20 """Supports a hybrid Unicode string that can also have a list of alternate 21 strings in the strings attribute 22 """ 23 24 import six 25 26 27 class multistring(six.text_type): 28 29 def __new__(newtype, string=u"", *args, **kwargs): 30 if isinstance(string, list): 31 if not string: 32 raise ValueError("multistring must contain at least one string") 33 mainstring = string[0] 34 newstring = multistring.__new__(newtype, string[0]) 35 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]] 36 else: 37 newstring = six.text_type.__new__(newtype, string) 38 newstring.strings = [newstring] 39 return newstring 40 41 def __init__(self, *args, **kwargs): 42 super(multistring, self).__init__() 43 if not hasattr(self, "strings"): 44 self.strings = [] 45 46 def __cmp__(self, otherstring): 47 def cmp_compat(s1, s2): 48 # Python 3 compatible cmp() equivalent 49 return (s1 > s2) - (s1 < s2) 50 if isinstance(otherstring, multistring): 51 parentcompare = cmp_compat(six.text_type(self), otherstring) 52 if parentcompare: 53 return parentcompare 54 else: 55 return cmp_compat(self.strings[1:], otherstring.strings[1:]) 56 elif isinstance(otherstring, six.text_type): 57 return cmp_compat(six.text_type(self), otherstring) 58 elif isinstance(otherstring, bytes): 59 return cmp_compat(self.encode('utf-8'), otherstring) 60 elif isinstance(otherstring, list) and otherstring: 61 return cmp_compat(self, multistring(otherstring)) 62 else: 63 return cmp_compat(str(type(self)), str(type(otherstring))) 64 65 def __hash__(self): 66 return hash(''.join(self.strings)) 67 68 def __ne__(self, otherstring): 69 return self.__cmp__(otherstring) != 0 70 71 def __eq__(self, otherstring): 72 return self.__cmp__(otherstring) == 0 73 74 def __repr__(self): 75 _repr = "multistring([" + u",".join(self.strings) + "])" 76 return _repr.encode('utf-8') if six.PY2 else _repr 77 78 def replace(self, old, new, count=None): 79 if count is None: 80 newstr = multistring(super(multistring, self).replace(old, new)) 81 else: 82 newstr = multistring(super(multistring, self).replace(old, new, count)) 83 for s in self.strings[1:]: 84 if count is None: 85 newstr.strings.append(s.replace(old, new)) 86 else: 87 newstr.strings.append(s.replace(old, new, count)) 88 return newstr 89 [end of translate/misc/multistring.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py --- a/translate/misc/multistring.py +++ b/translate/misc/multistring.py @@ -75,6 +75,11 @@ _repr = "multistring([" + u",".join(self.strings) + "])" return _repr.encode('utf-8') if six.PY2 else _repr + def __str__(self): + if six.PY2: + return self.encode('utf-8') + return super(multistring, self).__str__() + def replace(self, old, new, count=None): if count is None: newstr = multistring(super(multistring, self).replace(old, new))
{"golden_diff": "diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -75,6 +75,11 @@\n _repr = \"multistring([\" + u\",\".join(self.strings) + \"])\"\n return _repr.encode('utf-8') if six.PY2 else _repr\n \n+ def __str__(self):\n+ if six.PY2:\n+ return self.encode('utf-8')\n+ return super(multistring, self).__str__()\n+\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n", "issue": "Multistring doesnt work as a python2 string\nPreviously you were able to so ~~`str(multistring(\"foo\"))`~~ but it seems this no longer works.\n\nEDIT: apologies that was a bad example it should have read something like `str(multistring(\"\u00e9\"))`\n\nIm wondering if we should add py2 compat similar to the one in django (https://docs.djangoproject.com/en/1.9/_modules/django/utils/six/#python_2_unicode_compatible)\n\n@claudep any thoughts on this?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport six\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n mainstring = string[0]\n newstring = multistring.__new__(newtype, string[0])\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = six.text_type.__new__(newtype, string)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(''.join(self.strings))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n _repr = \"multistring([\" + u\",\".join(self.strings) + \"])\"\n return _repr.encode('utf-8') if six.PY2 else _repr\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}]}
1,592
168
gh_patches_debug_15897
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-563
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve help access and prompts from the CLI A new user might enter any of the following and expect to be guided to the Help information about using the CLI: - `cookiecutter` - `cookiecutter -h` - `cookiecutter --h` - `cookiecutter help` It would be helpful to improve the CLI messages or handle some of the additional cases for a new user. @audreyr Thanks for the handy tool :bouquet: <img width="773" alt="screen shot 2015-08-07 at 7 54 57 am" src="https://cloud.githubusercontent.com/assets/2680980/9138629/e64cb42a-3cd9-11e5-8c9c-cb9d726bd1eb.png"> </issue> <code> [start of cookiecutter/cli.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.cli 6 ----------------- 7 8 Main `cookiecutter` CLI. 9 """ 10 11 import os 12 import sys 13 import logging 14 15 import click 16 17 from cookiecutter import __version__ 18 from cookiecutter.main import cookiecutter 19 from cookiecutter.exceptions import ( 20 OutputDirExistsException, InvalidModeException 21 ) 22 23 logger = logging.getLogger(__name__) 24 25 26 def version_msg(): 27 python_version = sys.version[:3] 28 location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 29 message = u'Cookiecutter %(version)s from {} (Python {})' 30 return message.format(location, python_version) 31 32 33 @click.command() 34 @click.version_option(__version__, u'-V', u'--version', message=version_msg()) 35 @click.argument(u'template') 36 @click.option( 37 u'--no-input', is_flag=True, 38 help=u'Do not prompt for parameters and only use cookiecutter.json ' 39 u'file content', 40 ) 41 @click.option( 42 u'-c', u'--checkout', 43 help=u'branch, tag or commit to checkout after git clone', 44 ) 45 @click.option( 46 '-v', '--verbose', 47 is_flag=True, help='Print debug information', default=False 48 ) 49 @click.option( 50 u'--replay', is_flag=True, 51 help=u'Do not prompt for parameters and only use information entered ' 52 u'previously', 53 ) 54 @click.option( 55 u'-f', u'--overwrite-if-exists', is_flag=True, 56 help=u'Overwrite the contents of the output directory if it already exists' 57 ) 58 @click.option( 59 u'-o', u'--output-dir', default='.', type=click.Path(), 60 help=u'Where to output the generated project dir into' 61 ) 62 def main(template, no_input, checkout, verbose, replay, overwrite_if_exists, 63 output_dir): 64 """Create a project from a Cookiecutter project template (TEMPLATE).""" 65 if verbose: 66 logging.basicConfig( 67 format=u'%(levelname)s %(filename)s: %(message)s', 68 level=logging.DEBUG 69 ) 70 else: 71 # Log info and above to console 72 logging.basicConfig( 73 format=u'%(levelname)s: %(message)s', 74 level=logging.INFO 75 ) 76 77 try: 78 cookiecutter( 79 template, checkout, no_input, 80 replay=replay, 81 overwrite_if_exists=overwrite_if_exists, 82 output_dir=output_dir 83 ) 84 except (OutputDirExistsException, InvalidModeException) as e: 85 click.echo(e) 86 sys.exit(1) 87 88 if __name__ == "__main__": 89 main() 90 [end of cookiecutter/cli.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py --- a/cookiecutter/cli.py +++ b/cookiecutter/cli.py @@ -30,7 +30,7 @@ return message.format(location, python_version) [email protected]() [email protected](context_settings=dict(help_option_names=[u'-h', u'--help'])) @click.version_option(__version__, u'-V', u'--version', message=version_msg()) @click.argument(u'template') @click.option( @@ -75,6 +75,13 @@ ) try: + + # If you _need_ to support a local template in a directory + # called 'help', use a qualified path to the directory. + if template == u'help': + click.echo(click.get_current_context().get_help()) + sys.exit(0) + cookiecutter( template, checkout, no_input, replay=replay,
{"golden_diff": "diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py\n--- a/cookiecutter/cli.py\n+++ b/cookiecutter/cli.py\n@@ -30,7 +30,7 @@\n return message.format(location, python_version)\n \n \[email protected]()\[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))\n @click.version_option(__version__, u'-V', u'--version', message=version_msg())\n @click.argument(u'template')\n @click.option(\n@@ -75,6 +75,13 @@\n )\n \n try:\n+\n+ # If you _need_ to support a local template in a directory\n+ # called 'help', use a qualified path to the directory.\n+ if template == u'help':\n+ click.echo(click.get_current_context().get_help())\n+ sys.exit(0)\n+\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n", "issue": "Improve help access and prompts from the CLI\nA new user might enter any of the following and expect to be guided to the Help information about using the CLI:\n- `cookiecutter`\n- `cookiecutter -h`\n- `cookiecutter --h`\n- `cookiecutter help`\n\nIt would be helpful to improve the CLI messages or handle some of the additional cases for a new user.\n\n@audreyr Thanks for the handy tool :bouquet: \n\n<img width=\"773\" alt=\"screen shot 2015-08-07 at 7 54 57 am\" src=\"https://cloud.githubusercontent.com/assets/2680980/9138629/e64cb42a-3cd9-11e5-8c9c-cb9d726bd1eb.png\">\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException, InvalidModeException\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\[email protected]()\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\[email protected](\n u'-o', u'--output-dir', default='.', type=click.Path(),\n help=u'Where to output the generated project dir into'\n)\ndef main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n output_dir):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n except (OutputDirExistsException, InvalidModeException) as e:\n click.echo(e)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}]}
1,464
215
gh_patches_debug_9774
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1214
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> NoEcho password warning on a Lambda Function named AccountPassword *cfn-lint version: 0.25.1* *Description of issue.* I'm getting this warning: ``` W2501 Inappropriate map found for password on Resources/AccountPassword ``` on this Resource in my SAM template: ```yaml AccountPassword: Type: AWS::Serverless::Function Properties: FunctionName: !Sub ${RepositoryName}-account-password-${StageName} Runtime: nodejs10.x Layers: - !Ref LambdaDependencyLayer CodeUri: dist/account Handler: password.handler Description: Reset User Password Timeout: 20 Events: GET: Type: Api Properties: RestApiId: Ref: RestApi Path: /account/password Method: get Role: !GetAtt LambdaExecutionRole.Arn DeploymentPreference: Alarms: - !Ref AccountPasswordLatestVersionErrorMetricGreaterThanZeroAlarm ``` I'm guessing the resolution would be to make sure this check is only done on Parameters and not Resouces. </issue> <code> [start of src/cfnlint/rules/resources/properties/Password.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import re 6 import six 7 from cfnlint.rules import CloudFormationLintRule 8 from cfnlint.rules import RuleMatch 9 from cfnlint.helpers import REGEX_DYN_REF_SSM, REGEX_DYN_REF 10 11 12 class Password(CloudFormationLintRule): 13 """Check if Password Properties are properly configured""" 14 id = 'W2501' 15 shortdesc = 'Check if Password Properties are correctly configured' 16 description = 'Password properties should be strings and if parameter using NoEcho' 17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html#creds' 18 tags = ['parameters', 'passwords', 'security'] 19 20 def match(self, cfn): 21 """Check CloudFormation Password Parameters""" 22 23 matches = [] 24 password_properties = ['AccountPassword', 'AdminPassword', 'ADDomainJoinPassword', 'CrossRealmTrustPrincipalPassword', 25 'KdcAdminPassword', 'Password', 'DbPassword', 'MasterUserPassword', 'PasswordParam'] 26 27 parameters = cfn.get_parameter_names() 28 fix_params = [] 29 for password_property in password_properties: 30 # Build the list of refs 31 refs = cfn.search_deep_keys(password_property) 32 trees = [tree for tree in refs if tree[0] == 'Resources'] 33 34 for tree in trees: 35 obj = tree[-1] 36 if isinstance(obj, (six.string_types)): 37 if re.match(REGEX_DYN_REF, obj): 38 if re.match(REGEX_DYN_REF_SSM, obj): 39 message = 'Password should use a secure dynamic reference for %s' % ( 40 '/'.join(map(str, tree[:-1]))) 41 matches.append(RuleMatch(tree[:-1], message)) 42 else: 43 message = 'Password shouldn\'t be hardcoded for %s' % ( 44 '/'.join(map(str, tree[:-1]))) 45 matches.append(RuleMatch(tree[:-1], message)) 46 elif isinstance(obj, dict): 47 if len(obj) == 1: 48 for key, value in obj.items(): 49 if key == 'Ref': 50 if value in parameters: 51 param = cfn.template['Parameters'][value] 52 if 'NoEcho' in param: 53 if not param['NoEcho']: 54 fix_params.append( 55 {'Name': value, 'Use': password_property}) 56 else: 57 fix_params.append({'Name': value, 'Use': password_property}) 58 else: 59 message = 'Inappropriate map found for password on %s' % ( 60 '/'.join(map(str, tree[:-1]))) 61 matches.append(RuleMatch(tree[:-1], message)) 62 63 for paramname in fix_params: 64 message = 'Parameter {} used as {}, therefore NoEcho should be True'.format( 65 paramname['Name'], paramname['Use']) 66 tree = ['Parameters', paramname['Name']] 67 matches.append(RuleMatch(tree, message)) 68 return matches 69 [end of src/cfnlint/rules/resources/properties/Password.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/properties/Password.py b/src/cfnlint/rules/resources/properties/Password.py --- a/src/cfnlint/rules/resources/properties/Password.py +++ b/src/cfnlint/rules/resources/properties/Password.py @@ -29,7 +29,11 @@ for password_property in password_properties: # Build the list of refs refs = cfn.search_deep_keys(password_property) - trees = [tree for tree in refs if tree[0] == 'Resources'] + trees = [] + for tree in refs: + if len(tree) > 2: + if tree[0] == 'Resources' and tree[2] == 'Properties': + trees.append(tree) for tree in trees: obj = tree[-1]
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/Password.py b/src/cfnlint/rules/resources/properties/Password.py\n--- a/src/cfnlint/rules/resources/properties/Password.py\n+++ b/src/cfnlint/rules/resources/properties/Password.py\n@@ -29,7 +29,11 @@\n for password_property in password_properties:\n # Build the list of refs\n refs = cfn.search_deep_keys(password_property)\n- trees = [tree for tree in refs if tree[0] == 'Resources']\n+ trees = []\n+ for tree in refs:\n+ if len(tree) > 2:\n+ if tree[0] == 'Resources' and tree[2] == 'Properties':\n+ trees.append(tree)\n \n for tree in trees:\n obj = tree[-1]\n", "issue": "NoEcho password warning on a Lambda Function named AccountPassword\n*cfn-lint version: 0.25.1*\r\n\r\n*Description of issue.*\r\n\r\nI'm getting this warning:\r\n\r\n```\r\nW2501 Inappropriate map found for password on Resources/AccountPassword\r\n```\r\n\r\non this Resource in my SAM template:\r\n\r\n```yaml\r\n AccountPassword:\r\n Type: AWS::Serverless::Function\r\n Properties:\r\n FunctionName: !Sub ${RepositoryName}-account-password-${StageName}\r\n Runtime: nodejs10.x\r\n Layers:\r\n - !Ref LambdaDependencyLayer\r\n CodeUri: dist/account\r\n Handler: password.handler\r\n Description: Reset User Password\r\n Timeout: 20\r\n Events:\r\n GET:\r\n Type: Api\r\n Properties:\r\n RestApiId:\r\n Ref: RestApi\r\n Path: /account/password\r\n Method: get\r\n Role: !GetAtt LambdaExecutionRole.Arn\r\n DeploymentPreference:\r\n Alarms:\r\n - !Ref AccountPasswordLatestVersionErrorMetricGreaterThanZeroAlarm\r\n```\r\n\r\nI'm guessing the resolution would be to make sure this check is only done on Parameters and not Resouces.\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import REGEX_DYN_REF_SSM, REGEX_DYN_REF\n\n\nclass Password(CloudFormationLintRule):\n \"\"\"Check if Password Properties are properly configured\"\"\"\n id = 'W2501'\n shortdesc = 'Check if Password Properties are correctly configured'\n description = 'Password properties should be strings and if parameter using NoEcho'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/best-practices.html#creds'\n tags = ['parameters', 'passwords', 'security']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Password Parameters\"\"\"\n\n matches = []\n password_properties = ['AccountPassword', 'AdminPassword', 'ADDomainJoinPassword', 'CrossRealmTrustPrincipalPassword',\n 'KdcAdminPassword', 'Password', 'DbPassword', 'MasterUserPassword', 'PasswordParam']\n\n parameters = cfn.get_parameter_names()\n fix_params = []\n for password_property in password_properties:\n # Build the list of refs\n refs = cfn.search_deep_keys(password_property)\n trees = [tree for tree in refs if tree[0] == 'Resources']\n\n for tree in trees:\n obj = tree[-1]\n if isinstance(obj, (six.string_types)):\n if re.match(REGEX_DYN_REF, obj):\n if re.match(REGEX_DYN_REF_SSM, obj):\n message = 'Password should use a secure dynamic reference for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n message = 'Password shouldn\\'t be hardcoded for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n elif isinstance(obj, dict):\n if len(obj) == 1:\n for key, value in obj.items():\n if key == 'Ref':\n if value in parameters:\n param = cfn.template['Parameters'][value]\n if 'NoEcho' in param:\n if not param['NoEcho']:\n fix_params.append(\n {'Name': value, 'Use': password_property})\n else:\n fix_params.append({'Name': value, 'Use': password_property})\n else:\n message = 'Inappropriate map found for password on %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n for paramname in fix_params:\n message = 'Parameter {} used as {}, therefore NoEcho should be True'.format(\n paramname['Name'], paramname['Use'])\n tree = ['Parameters', paramname['Name']]\n matches.append(RuleMatch(tree, message))\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Password.py"}]}
1,565
178
gh_patches_debug_29381
rasdani/github-patches
git_diff
opensearch-project__opensearch-build-193
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bundle system - copy opensearch-tar-install script into bundle The [opensearch-tar-install](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-install.sh) script needs to be included with assembled bundles similar to [this](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-build.sh#L156) shell script. </issue> <code> [start of bundle-workflow/python/assemble.py] 1 #!/usr/bin/env python 2 3 # Copyright OpenSearch Contributors. 4 # SPDX-License-Identifier: Apache-2.0 5 6 import os 7 import tempfile 8 import argparse 9 from assemble_workflow.bundle import Bundle 10 from assemble_workflow.bundle_recorder import BundleRecorder 11 from manifests.build_manifest import BuildManifest 12 13 parser = argparse.ArgumentParser(description = "Assemble an OpenSearch Bundle") 14 parser.add_argument('manifest', type = argparse.FileType('r'), help="Manifest file.") 15 args = parser.parse_args() 16 17 build_manifest = BuildManifest.from_file(args.manifest) 18 build = build_manifest.build 19 artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name)) 20 output_dir = os.path.join(os.getcwd(), 'bundle') 21 os.makedirs(output_dir, exist_ok=True) 22 23 with tempfile.TemporaryDirectory() as work_dir: 24 print(f'Bundling {build.name} ({build.architecture}) into {output_dir} ...') 25 26 os.chdir(work_dir) 27 28 bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir) 29 bundle = Bundle(build_manifest, artifacts_dir, bundle_recorder) 30 31 bundle.install_plugins() 32 print(f'Installed plugins: {bundle.installed_plugins}') 33 34 # Save a copy of the manifest inside of the tar 35 bundle_recorder.write_manifest(bundle.archive_path) 36 bundle.build_tar(output_dir) 37 38 bundle_recorder.write_manifest(output_dir) 39 40 print(f'Done.') 41 [end of bundle-workflow/python/assemble.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bundle-workflow/python/assemble.py b/bundle-workflow/python/assemble.py --- a/bundle-workflow/python/assemble.py +++ b/bundle-workflow/python/assemble.py @@ -6,6 +6,7 @@ import os import tempfile import argparse +import shutil from assemble_workflow.bundle import Bundle from assemble_workflow.bundle_recorder import BundleRecorder from manifests.build_manifest import BuildManifest @@ -14,6 +15,12 @@ parser.add_argument('manifest', type = argparse.FileType('r'), help="Manifest file.") args = parser.parse_args() +tarball_installation_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../release/tar/linux/opensearch-tar-install.sh') + +if not os.path.isfile(tarball_installation_script): + print(f'No installation script found at path: {tarball_installation_script}') + exit(1) + build_manifest = BuildManifest.from_file(args.manifest) build = build_manifest.build artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name)) @@ -31,6 +38,9 @@ bundle.install_plugins() print(f'Installed plugins: {bundle.installed_plugins}') + # Copy the tar installation script into the bundle + shutil.copyfile(tarball_installation_script, os.path.join(bundle.archive_path, os.path.basename(tarball_installation_script))) + # Save a copy of the manifest inside of the tar bundle_recorder.write_manifest(bundle.archive_path) bundle.build_tar(output_dir)
{"golden_diff": "diff --git a/bundle-workflow/python/assemble.py b/bundle-workflow/python/assemble.py\n--- a/bundle-workflow/python/assemble.py\n+++ b/bundle-workflow/python/assemble.py\n@@ -6,6 +6,7 @@\n import os\n import tempfile\n import argparse\n+import shutil\n from assemble_workflow.bundle import Bundle\n from assemble_workflow.bundle_recorder import BundleRecorder\n from manifests.build_manifest import BuildManifest\n@@ -14,6 +15,12 @@\n parser.add_argument('manifest', type = argparse.FileType('r'), help=\"Manifest file.\")\n args = parser.parse_args()\n \n+tarball_installation_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../release/tar/linux/opensearch-tar-install.sh')\n+\n+if not os.path.isfile(tarball_installation_script):\n+ print(f'No installation script found at path: {tarball_installation_script}')\n+ exit(1)\n+\n build_manifest = BuildManifest.from_file(args.manifest)\n build = build_manifest.build\n artifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\n@@ -31,6 +38,9 @@\n bundle.install_plugins()\n print(f'Installed plugins: {bundle.installed_plugins}')\n \n+ # Copy the tar installation script into the bundle\n+ shutil.copyfile(tarball_installation_script, os.path.join(bundle.archive_path, os.path.basename(tarball_installation_script)))\n+\n # Save a copy of the manifest inside of the tar\n bundle_recorder.write_manifest(bundle.archive_path)\n bundle.build_tar(output_dir)\n", "issue": "Bundle system - copy opensearch-tar-install script into bundle\nThe [opensearch-tar-install](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-install.sh) script needs to be included with assembled bundles similar to [this](https://github.com/opensearch-project/opensearch-build/blob/main/release/tar/linux/opensearch-tar-build.sh#L156) shell script.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport tempfile\nimport argparse\nfrom assemble_workflow.bundle import Bundle\nfrom assemble_workflow.bundle_recorder import BundleRecorder\nfrom manifests.build_manifest import BuildManifest\n\nparser = argparse.ArgumentParser(description = \"Assemble an OpenSearch Bundle\")\nparser.add_argument('manifest', type = argparse.FileType('r'), help=\"Manifest file.\")\nargs = parser.parse_args()\n\nbuild_manifest = BuildManifest.from_file(args.manifest)\nbuild = build_manifest.build\nartifacts_dir = os.path.dirname(os.path.realpath(args.manifest.name))\noutput_dir = os.path.join(os.getcwd(), 'bundle')\nos.makedirs(output_dir, exist_ok=True)\n\nwith tempfile.TemporaryDirectory() as work_dir:\n print(f'Bundling {build.name} ({build.architecture}) into {output_dir} ...')\n\n os.chdir(work_dir)\n\n bundle_recorder = BundleRecorder(build, output_dir, artifacts_dir)\n bundle = Bundle(build_manifest, artifacts_dir, bundle_recorder)\n\n bundle.install_plugins()\n print(f'Installed plugins: {bundle.installed_plugins}')\n\n # Save a copy of the manifest inside of the tar\n bundle_recorder.write_manifest(bundle.archive_path)\n bundle.build_tar(output_dir)\n\n bundle_recorder.write_manifest(output_dir)\n\nprint(f'Done.')\n", "path": "bundle-workflow/python/assemble.py"}]}
1,001
340
gh_patches_debug_5960
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-402
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ${aws:username} now fails *cfn-lint version: 0.8 E1029 Found an embedded parameter outside of an "Fn::Sub" at Resources/ForceMFAPolicy/Properties/PolicyDocument/Statement/3/Resource/0/arn:aws:iam::*:mfa/${aws:username} It looks like the IAM variables are now being detected as embedded parameters in cfn-lint. Effect: "Allow" Action: - "iam:UploadSSHPublicKey" Resource: "arn:aws:iam::*:user/${aws:username}" </issue> <code> [start of src/cfnlint/rules/functions/SubNeeded.py] 1 """ 2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import re 18 from cfnlint import CloudFormationLintRule 19 from cfnlint import RuleMatch 20 21 class SubNeeded(CloudFormationLintRule): 22 """Check if a substitution string exists without a substitution function""" 23 id = 'E1029' 24 shortdesc = 'Sub is required if a variable is used in a string' 25 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.' 26 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html' 27 tags = ['functions', 'sub'] 28 29 # Free-form text properties to exclude from this rule 30 excludes = ['UserData', 'ZipFile'] 31 32 def _match_values(self, searchRegex, cfnelem, path): 33 """Recursively search for values matching the searchRegex""" 34 values = [] 35 if isinstance(cfnelem, dict): 36 for key in cfnelem: 37 pathprop = path[:] 38 pathprop.append(key) 39 values.extend(self._match_values(searchRegex, cfnelem[key], pathprop)) 40 elif isinstance(cfnelem, list): 41 for index, item in enumerate(cfnelem): 42 pathprop = path[:] 43 pathprop.append(index) 44 values.extend(self._match_values(searchRegex, item, pathprop)) 45 else: 46 # Leaf node 47 if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem): 48 values.append(path + [cfnelem]) 49 50 return values 51 52 def match_values(self, searchRegex, cfn): 53 """ 54 Search for values in all parts of the templates that match the searchRegex 55 """ 56 results = [] 57 results.extend(self._match_values(searchRegex, cfn.template, [])) 58 # Globals are removed during a transform. They need to be checked manually 59 results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), [])) 60 return results 61 62 def match(self, cfn): 63 """Basic Rule Matching""" 64 65 matches = [] 66 67 # Generic regex to match a string containing at least one ${parameter} 68 parameter_search = re.compile(r'^(.*\$\{.*\}.*(\$\{.*\}.*)*)$') 69 70 # Get a list of paths to every leaf node string containing at least one ${parameter} 71 parameter_string_paths = self.match_values(parameter_search, cfn) 72 73 # We want to search all of the paths to check if each one contains an 'Fn::Sub' 74 for parameter_string_path in parameter_string_paths: 75 found_sub = False 76 77 # Does the path contain an 'Fn::Sub'? 78 for step in parameter_string_path: 79 if step == 'Fn::Sub' or step in self.excludes: 80 found_sub = True 81 82 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly 83 if not found_sub: 84 message = 'Found an embedded parameter outside of an "Fn::Sub" at {}'.format('/'.join(map(str, parameter_string_path))) 85 matches.append(RuleMatch(parameter_string_path, message)) 86 87 return matches 88 [end of src/cfnlint/rules/functions/SubNeeded.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py --- a/src/cfnlint/rules/functions/SubNeeded.py +++ b/src/cfnlint/rules/functions/SubNeeded.py @@ -27,7 +27,7 @@ tags = ['functions', 'sub'] # Free-form text properties to exclude from this rule - excludes = ['UserData', 'ZipFile'] + excludes = ['UserData', 'ZipFile', 'Resource', 'Condition'] def _match_values(self, searchRegex, cfnelem, path): """Recursively search for values matching the searchRegex"""
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -27,7 +27,7 @@\n tags = ['functions', 'sub']\n \n # Free-form text properties to exclude from this rule\n- excludes = ['UserData', 'ZipFile']\n+ excludes = ['UserData', 'ZipFile', 'Resource', 'Condition']\n \n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n", "issue": "${aws:username} now fails\n*cfn-lint version: 0.8\r\n\r\nE1029 Found an embedded parameter outside of an \"Fn::Sub\" at Resources/ForceMFAPolicy/Properties/PolicyDocument/Statement/3/Resource/0/arn:aws:iam::*:mfa/${aws:username}\r\n\r\nIt looks like the IAM variables are now being detected as embedded parameters in cfn-lint.\r\n\r\n Effect: \"Allow\"\r\n Action:\r\n - \"iam:UploadSSHPublicKey\"\r\n Resource: \"arn:aws:iam::*:user/${aws:username}\"\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile']\n\n def _match_values(self, searchRegex, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(searchRegex, cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(searchRegex, item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, str) and re.match(searchRegex, cfnelem):\n values.append(path + [cfnelem])\n\n return values\n\n def match_values(self, searchRegex, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(searchRegex, cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(searchRegex, cfn.template.get('Globals', {}), []))\n return results\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Generic regex to match a string containing at least one ${parameter}\n parameter_search = re.compile(r'^(.*\\$\\{.*\\}.*(\\$\\{.*\\}.*)*)$')\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(parameter_search, cfn)\n\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n found_sub = False\n\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n message = 'Found an embedded parameter outside of an \"Fn::Sub\" at {}'.format('/'.join(map(str, parameter_string_path)))\n matches.append(RuleMatch(parameter_string_path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]}
1,726
140
gh_patches_debug_15881
rasdani/github-patches
git_diff
svthalia__concrexit-2496
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add album to event foreign key to api Feature request (I'm making this via the Github app so the template doesn't work) See title, Julian wants this :) </issue> <code> [start of website/photos/api/v2/serializers/album.py] 1 from rest_framework import serializers 2 3 from photos import services 4 from photos.api.v2.serializers.photo import PhotoSerializer, PhotoListSerializer 5 from photos.models import Album 6 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import ( 7 CleanedModelSerializer, 8 ) 9 10 11 class AlbumSerializer(CleanedModelSerializer): 12 """API serializer for albums.""" 13 14 class Meta: 15 """Meta class for the serializer.""" 16 17 model = Album 18 fields = ("slug", "title", "accessible", "shareable", "cover", "photos") 19 20 cover = PhotoSerializer() 21 accessible = serializers.SerializerMethodField("_accessible") 22 photos = PhotoListSerializer(source="photo_set", many=True) 23 24 def _accessible(self, obj): 25 return services.is_album_accessible(self.context["request"], obj) 26 27 28 class AlbumListSerializer(AlbumSerializer): 29 class Meta: 30 """Meta class for the serializer.""" 31 32 model = Album 33 fields = ("slug", "title", "accessible", "shareable", "cover") 34 [end of website/photos/api/v2/serializers/album.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/photos/api/v2/serializers/album.py b/website/photos/api/v2/serializers/album.py --- a/website/photos/api/v2/serializers/album.py +++ b/website/photos/api/v2/serializers/album.py @@ -15,7 +15,15 @@ """Meta class for the serializer.""" model = Album - fields = ("slug", "title", "accessible", "shareable", "cover", "photos") + fields = ( + "slug", + "title", + "accessible", + "shareable", + "cover", + "photos", + "event", + ) cover = PhotoSerializer() accessible = serializers.SerializerMethodField("_accessible") @@ -30,4 +38,4 @@ """Meta class for the serializer.""" model = Album - fields = ("slug", "title", "accessible", "shareable", "cover") + fields = ("slug", "title", "accessible", "shareable", "cover", "event")
{"golden_diff": "diff --git a/website/photos/api/v2/serializers/album.py b/website/photos/api/v2/serializers/album.py\n--- a/website/photos/api/v2/serializers/album.py\n+++ b/website/photos/api/v2/serializers/album.py\n@@ -15,7 +15,15 @@\n \"\"\"Meta class for the serializer.\"\"\"\n \n model = Album\n- fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"photos\")\n+ fields = (\n+ \"slug\",\n+ \"title\",\n+ \"accessible\",\n+ \"shareable\",\n+ \"cover\",\n+ \"photos\",\n+ \"event\",\n+ )\n \n cover = PhotoSerializer()\n accessible = serializers.SerializerMethodField(\"_accessible\")\n@@ -30,4 +38,4 @@\n \"\"\"Meta class for the serializer.\"\"\"\n \n model = Album\n- fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\")\n+ fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"event\")\n", "issue": "Add album to event foreign key to api\nFeature request (I'm making this via the Github app so the template doesn't work) \n\n\nSee title, Julian wants this :)\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom photos import services\nfrom photos.api.v2.serializers.photo import PhotoSerializer, PhotoListSerializer\nfrom photos.models import Album\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass AlbumSerializer(CleanedModelSerializer):\n \"\"\"API serializer for albums.\"\"\"\n\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = Album\n fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\", \"photos\")\n\n cover = PhotoSerializer()\n accessible = serializers.SerializerMethodField(\"_accessible\")\n photos = PhotoListSerializer(source=\"photo_set\", many=True)\n\n def _accessible(self, obj):\n return services.is_album_accessible(self.context[\"request\"], obj)\n\n\nclass AlbumListSerializer(AlbumSerializer):\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = Album\n fields = (\"slug\", \"title\", \"accessible\", \"shareable\", \"cover\")\n", "path": "website/photos/api/v2/serializers/album.py"}]}
857
241
gh_patches_debug_35760
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1406
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PS-1] RPC service `push_gradient` async-SGD implementation Async apply gradients to non-embedding parameters. Updating embedding parameters will be supported in issue #1306 . </issue> <code> [start of elasticdl/python/ps/parameters.py] 1 import tensorflow as tf 2 3 from elasticdl.python.common.tensor import tensor_pb_to_ndarray 4 from elasticdl.python.ps.embedding_table import create_embedding_table 5 6 7 class Parameters(object): 8 """ 9 There are two kinds of parameters: 10 11 1. non-embedding parameters, or dense tensor parameters. We save it 12 in a hashmap `non-embedding_params`, the key is the parameter name, 13 the value is a tf.Variable` object. 14 2. embedding parameters, or row-sparse parameters. We save it in a 15 hashmap `embedding_params`, the key is the embedding layer name, 16 the value is an `EmbeddingTable` object. 17 18 """ 19 20 def __init__(self): 21 self.version = 0 22 self.init_status = False 23 self.non_embedding_params = {} 24 self.embedding_params = {} 25 26 def get_embedding_param(self, name, indices): 27 if name not in self.embedding_params: 28 raise ValueError( 29 "Please initialize embedding param %s first!", name 30 ) 31 return self.embedding_params[name].get(indices) 32 33 def set_embedding_param(self, name, indices, values): 34 if name not in self.embedding_params: 35 raise ValueError( 36 "Please initialize embedding param %s first!", name 37 ) 38 self.embedding_params[name].set(indices, values) 39 40 def init_from_model_pb(self, model_pb): 41 if not self.init_status: 42 tensors_pb = model_pb.param 43 embeddings_pb = model_pb.embedding_table_info 44 self._init_non_embedding_params(tensors_pb) 45 self._init_embedding_params(embeddings_pb) 46 self.version = model_pb.version 47 self.init_status = True 48 49 def _init_non_embedding_params(self, tensors_pb): 50 for pb in tensors_pb: 51 name = pb.name 52 arr = tensor_pb_to_ndarray(pb) 53 var = tf.Variable(name=name, initial_value=arr, trainable=True) 54 self.non_embedding_params[name] = var 55 56 def _init_embedding_params(self, embeddings_pb): 57 for pb in embeddings_pb: 58 self.embedding_params[pb.name] = create_embedding_table(pb) 59 [end of elasticdl/python/ps/parameters.py] [start of elasticdl/python/ps/servicer.py] 1 import threading 2 3 from google.protobuf import empty_pb2 4 5 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc 6 from elasticdl.python.common.dtypes import dtype_numpy_to_tensor 7 from elasticdl.python.common.tensor import Tensor, serialize_tensor 8 9 10 class PserverServicer(elasticdl_pb2_grpc.PserverServicer): 11 """PS service implementation""" 12 13 def __init__( 14 self, 15 parameters, 16 grads_to_wait, 17 optimizer, 18 lr_staleness_modulation=False, 19 use_async=False, 20 ): 21 self._parameters = parameters 22 self._grads_to_wait = grads_to_wait 23 self._optimizer = optimizer 24 self._lr_staleness_modulation = lr_staleness_modulation 25 self._use_async = use_async 26 self._version = 0 27 self._lock = threading.Lock() 28 29 def pull_variable(self, request, _): 30 """ 31 Response with all non-embedding parameters if initialized. 32 """ 33 res = elasticdl_pb2.PullVariableResponse() 34 if not self._parameters.init_status: 35 res.model_init_status = False 36 return res 37 38 # Only sync-SGD needs lock 39 # TODO: use a read-write lock to support multiple concurrent reads 40 if not self._use_async: 41 self._lock.acquire() 42 res.model.version = self._parameters.version 43 for name, var in self._parameters.non_embedding_params.items(): 44 tensor = res.model.param.add() 45 tensor.name = name 46 tensor.dim.extend(var.shape.as_list()) 47 var_values = var.numpy() 48 tensor.content = var_values.tobytes() 49 tensor.dtype = dtype_numpy_to_tensor(var_values.dtype) 50 if not self._use_async: 51 self._lock.release() 52 res.model_init_status = True 53 return res 54 55 def pull_embedding_vector(self, request, _): 56 ret = elasticdl_pb2.Tensor() 57 if not request.ids: 58 return ret 59 embedding_vectors = self._parameters.get_embedding_param( 60 request.name, request.ids 61 ) 62 tensor = Tensor(values=embedding_vectors) 63 serialize_tensor(tensor, ret) 64 return ret 65 66 def push_model(self, request, _): 67 with self._lock: 68 self._parameters.init_from_model_pb(request) 69 return empty_pb2.Empty() 70 71 def push_gradient(self, request, _): 72 # TODO: implement this RPC service 73 return elasticdl_pb2.PushGradientResponse() 74 [end of elasticdl/python/ps/servicer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py --- a/elasticdl/python/ps/parameters.py +++ b/elasticdl/python/ps/parameters.py @@ -23,6 +23,9 @@ self.non_embedding_params = {} self.embedding_params = {} + def get_non_embedding_param(self, name, default_value=None): + return self.non_embedding_params.get(name, default_value) + def get_embedding_param(self, name, indices): if name not in self.embedding_params: raise ValueError( diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py --- a/elasticdl/python/ps/servicer.py +++ b/elasticdl/python/ps/servicer.py @@ -4,6 +4,7 @@ from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc from elasticdl.python.common.dtypes import dtype_numpy_to_tensor +from elasticdl.python.common.log_utils import default_logger as logger from elasticdl.python.common.tensor import Tensor, serialize_tensor @@ -23,7 +24,7 @@ self._optimizer = optimizer self._lr_staleness_modulation = lr_staleness_modulation self._use_async = use_async - self._version = 0 + self._version_lock = threading.Lock() self._lock = threading.Lock() def pull_variable(self, request, _): @@ -69,5 +70,29 @@ return empty_pb2.Empty() def push_gradient(self, request, _): - # TODO: implement this RPC service + if self._use_async: + grad_vars = [] + for pb in request.gradients: + tensor = Tensor.from_tensor_pb(pb) + var = self._parameters.get_non_embedding_param(tensor.name) + if var is None: + logger.warning( + "Gradients with invalid name %s" % tensor.name + ) + continue + grad = tensor.to_tf_tensor() + grad_vars.append((grad, var)) + + self._optimizer.apply_gradients(grad_vars) + with self._version_lock: + self._parameters.version += 1 + + res = elasticdl_pb2.PushGradientResponse() + res.accepted = True + res.model_version = self._parameters.version + return res + + raise NotImplementedError( + "Updating parameters synchronously is not implemented." + ) return elasticdl_pb2.PushGradientResponse()
{"golden_diff": "diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py\n--- a/elasticdl/python/ps/parameters.py\n+++ b/elasticdl/python/ps/parameters.py\n@@ -23,6 +23,9 @@\n self.non_embedding_params = {}\n self.embedding_params = {}\n \n+ def get_non_embedding_param(self, name, default_value=None):\n+ return self.non_embedding_params.get(name, default_value)\n+\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\ndiff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -4,6 +4,7 @@\n \n from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n from elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n+from elasticdl.python.common.log_utils import default_logger as logger\n from elasticdl.python.common.tensor import Tensor, serialize_tensor\n \n \n@@ -23,7 +24,7 @@\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n- self._version = 0\n+ self._version_lock = threading.Lock()\n self._lock = threading.Lock()\n \n def pull_variable(self, request, _):\n@@ -69,5 +70,29 @@\n return empty_pb2.Empty()\n \n def push_gradient(self, request, _):\n- # TODO: implement this RPC service\n+ if self._use_async:\n+ grad_vars = []\n+ for pb in request.gradients:\n+ tensor = Tensor.from_tensor_pb(pb)\n+ var = self._parameters.get_non_embedding_param(tensor.name)\n+ if var is None:\n+ logger.warning(\n+ \"Gradients with invalid name %s\" % tensor.name\n+ )\n+ continue\n+ grad = tensor.to_tf_tensor()\n+ grad_vars.append((grad, var))\n+\n+ self._optimizer.apply_gradients(grad_vars)\n+ with self._version_lock:\n+ self._parameters.version += 1\n+\n+ res = elasticdl_pb2.PushGradientResponse()\n+ res.accepted = True\n+ res.model_version = self._parameters.version\n+ return res\n+\n+ raise NotImplementedError(\n+ \"Updating parameters synchronously is not implemented.\"\n+ )\n return elasticdl_pb2.PushGradientResponse()\n", "issue": "[PS-1] RPC service `push_gradient` async-SGD implementation\nAsync apply gradients to non-embedding parameters. Updating embedding parameters will be supported in issue #1306 .\n", "before_files": [{"content": "import tensorflow as tf\n\nfrom elasticdl.python.common.tensor import tensor_pb_to_ndarray\nfrom elasticdl.python.ps.embedding_table import create_embedding_table\n\n\nclass Parameters(object):\n \"\"\"\n There are two kinds of parameters:\n\n 1. non-embedding parameters, or dense tensor parameters. We save it\n in a hashmap `non-embedding_params`, the key is the parameter name,\n the value is a tf.Variable` object.\n 2. embedding parameters, or row-sparse parameters. We save it in a\n hashmap `embedding_params`, the key is the embedding layer name,\n the value is an `EmbeddingTable` object.\n\n \"\"\"\n\n def __init__(self):\n self.version = 0\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n return self.embedding_params[name].get(indices)\n\n def set_embedding_param(self, name, indices, values):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n self.embedding_params[name].set(indices, values)\n\n def init_from_model_pb(self, model_pb):\n if not self.init_status:\n tensors_pb = model_pb.param\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n self.version = model_pb.version\n self.init_status = True\n\n def _init_non_embedding_params(self, tensors_pb):\n for pb in tensors_pb:\n name = pb.name\n arr = tensor_pb_to_ndarray(pb)\n var = tf.Variable(name=name, initial_value=arr, trainable=True)\n self.non_embedding_params[name] = var\n\n def _init_embedding_params(self, embeddings_pb):\n for pb in embeddings_pb:\n self.embedding_params[pb.name] = create_embedding_table(pb)\n", "path": "elasticdl/python/ps/parameters.py"}, {"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\nfrom elasticdl.python.common.tensor import Tensor, serialize_tensor\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n \"\"\"\n Response with all non-embedding parameters if initialized.\n \"\"\"\n res = elasticdl_pb2.PullVariableResponse()\n if not self._parameters.init_status:\n res.model_init_status = False\n return res\n\n # Only sync-SGD needs lock\n # TODO: use a read-write lock to support multiple concurrent reads\n if not self._use_async:\n self._lock.acquire()\n res.model.version = self._parameters.version\n for name, var in self._parameters.non_embedding_params.items():\n tensor = res.model.param.add()\n tensor.name = name\n tensor.dim.extend(var.shape.as_list())\n var_values = var.numpy()\n tensor.content = var_values.tobytes()\n tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)\n if not self._use_async:\n self._lock.release()\n res.model_init_status = True\n return res\n\n def pull_embedding_vector(self, request, _):\n ret = elasticdl_pb2.Tensor()\n if not request.ids:\n return ret\n embedding_vectors = self._parameters.get_embedding_param(\n request.name, request.ids\n )\n tensor = Tensor(values=embedding_vectors)\n serialize_tensor(tensor, ret)\n return ret\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}]}
1,817
568
gh_patches_debug_6233
rasdani/github-patches
git_diff
WeblateOrg__weblate-6217
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Auth provider title and image override do not work for auth providers with '-' in it **Describe the issue** The new feature to override the name and image for an auth provider (introduced in 159ae591e91e7c2065ede9c39f14d743f5ab07e5) does not work for auth providers with '-' in them. <!-- A clear and concise description of the problem you are facing. --> **I already tried** Describe the steps you tried to solve the problem yourself. - [x] I've read and searched [the docs](https://docs.weblate.org/) and did not find the answer there. If you didn’t try already, try to search there what you wrote above. **To Reproduce the issue** Steps to reproduce the behavior: 1. Add an auth to your settings - e.g. `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY` 2. Set the title and image override (`SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TITLE` and `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_IMAGE`) 3. Go to the login page 4. The title and image are not overriden **Expected behavior** The title and image should be overridden. </issue> <code> [start of weblate/accounts/templatetags/authnames.py] 1 # 2 # Copyright © 2012 - 2021 Michal Čihař <[email protected]> 3 # 4 # This file is part of Weblate <https://weblate.org/> 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this program. If not, see <https://www.gnu.org/licenses/>. 18 # 19 """Provide user friendly names for social authentication methods.""" 20 21 from django import template 22 from django.conf import settings 23 from django.contrib.staticfiles.storage import staticfiles_storage 24 from django.utils.safestring import mark_safe 25 from django.utils.translation import gettext_lazy 26 27 register = template.Library() 28 29 SOCIALS = { 30 "auth0": {"name": "Auth0", "image": "auth0.svg"}, 31 "saml": {"name": "SAML", "image": "saml.svg"}, 32 "google": {"name": "Google", "image": "google.svg"}, 33 "google-oauth2": {"name": "Google", "image": "google.svg"}, 34 "google-plus": {"name": "Google+", "image": "google.svg"}, 35 "password": {"name": gettext_lazy("Password"), "image": "password.svg"}, 36 "email": {"name": gettext_lazy("E-mail"), "image": "email.svg"}, 37 "ubuntu": {"name": "Ubuntu", "image": "ubuntu.svg"}, 38 "opensuse": {"name": "openSUSE", "image": "opensuse.svg"}, 39 "fedora": {"name": "Fedora", "image": "fedora.svg"}, 40 "facebook": {"name": "Facebook", "image": "facebook.svg"}, 41 "github": {"name": "GitHub", "image": "github.svg"}, 42 "github-enterprise": {"name": "GitHub Enterprise", "image": "github.svg"}, 43 "bitbucket": {"name": "Bitbucket", "image": "bitbucket.svg"}, 44 "bitbucket-oauth2": {"name": "Bitbucket", "image": "bitbucket.svg"}, 45 "azuread-oauth2": {"name": "Azure", "image": "azure.svg"}, 46 "azuread-tenant-oauth2": {"name": "Azure", "image": "azure.svg"}, 47 "gitlab": {"name": "GitLab", "image": "gitlab.svg"}, 48 "amazon": {"name": "Amazon", "image": "amazon.svg"}, 49 "twitter": {"name": "Twitter", "image": "twitter.svg"}, 50 "stackoverflow": {"name": "Stack Overflow", "image": "stackoverflow.svg"}, 51 } 52 53 IMAGE_SOCIAL_TEMPLATE = """ 54 <img class="auth-image" src="{image}" /> 55 """ 56 57 SOCIAL_TEMPLATE = """ 58 {icon} 59 {separator} 60 {name} 61 """ 62 63 64 def get_auth_params(auth: str): 65 """Returns authentication parameters.""" 66 # Fallback values 67 params = {"name": auth, "image": "password.svg"} 68 69 # Hardcoded names 70 if auth in SOCIALS: 71 params.update(SOCIALS[auth]) 72 73 # Settings override 74 settings_params = { 75 "name": f"SOCIAL_AUTH_{auth.upper()}_TITLE", 76 "image": f"SOCIAL_AUTH_{auth.upper()}_IMAGE", 77 } 78 for target, source in settings_params.items(): 79 value = getattr(settings, source, None) 80 if value: 81 params[target] = value 82 83 return params 84 85 86 @register.simple_tag 87 def auth_name(auth: str, separator: str = "<br />"): 88 """Create HTML markup for social authentication method.""" 89 params = get_auth_params(auth) 90 91 if not params["image"].startswith("http"): 92 params["image"] = staticfiles_storage.url("auth/" + params["image"]) 93 params["icon"] = IMAGE_SOCIAL_TEMPLATE.format(separator=separator, **params) 94 95 return mark_safe(SOCIAL_TEMPLATE.format(separator=separator, **params)) 96 97 98 def get_auth_name(auth: str): 99 """Get nice name for authentication backend.""" 100 return get_auth_params(auth)["name"] 101 [end of weblate/accounts/templatetags/authnames.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/weblate/accounts/templatetags/authnames.py b/weblate/accounts/templatetags/authnames.py --- a/weblate/accounts/templatetags/authnames.py +++ b/weblate/accounts/templatetags/authnames.py @@ -72,8 +72,8 @@ # Settings override settings_params = { - "name": f"SOCIAL_AUTH_{auth.upper()}_TITLE", - "image": f"SOCIAL_AUTH_{auth.upper()}_IMAGE", + "name": f"SOCIAL_AUTH_{auth.upper().replace('-','_')}_TITLE", + "image": f"SOCIAL_AUTH_{auth.upper().replace('-','_')}_IMAGE", } for target, source in settings_params.items(): value = getattr(settings, source, None)
{"golden_diff": "diff --git a/weblate/accounts/templatetags/authnames.py b/weblate/accounts/templatetags/authnames.py\n--- a/weblate/accounts/templatetags/authnames.py\n+++ b/weblate/accounts/templatetags/authnames.py\n@@ -72,8 +72,8 @@\n \n # Settings override\n settings_params = {\n- \"name\": f\"SOCIAL_AUTH_{auth.upper()}_TITLE\",\n- \"image\": f\"SOCIAL_AUTH_{auth.upper()}_IMAGE\",\n+ \"name\": f\"SOCIAL_AUTH_{auth.upper().replace('-','_')}_TITLE\",\n+ \"image\": f\"SOCIAL_AUTH_{auth.upper().replace('-','_')}_IMAGE\",\n }\n for target, source in settings_params.items():\n value = getattr(settings, source, None)\n", "issue": "Auth provider title and image override do not work for auth providers with '-' in it\n**Describe the issue**\r\n\r\nThe new feature to override the name and image for an auth provider (introduced in 159ae591e91e7c2065ede9c39f14d743f5ab07e5) does not work for auth providers with '-' in them. \r\n\r\n<!--\r\nA clear and concise description of the problem you are facing.\r\n-->\r\n\r\n**I already tried**\r\n\r\nDescribe the steps you tried to solve the problem yourself.\r\n\r\n- [x] I've read and searched [the docs](https://docs.weblate.org/) and did not find the answer there.\r\n If you didn\u2019t try already, try to search there what you wrote above.\r\n\r\n**To Reproduce the issue**\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Add an auth to your settings - e.g. `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY`\r\n2. Set the title and image override (`SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TITLE` and `SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_IMAGE`)\r\n3. Go to the login page\r\n4. The title and image are not overriden\r\n\r\n**Expected behavior**\r\n\r\nThe title and image should be overridden. \r\n\n", "before_files": [{"content": "#\n# Copyright \u00a9 2012 - 2021 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Provide user friendly names for social authentication methods.\"\"\"\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy\n\nregister = template.Library()\n\nSOCIALS = {\n \"auth0\": {\"name\": \"Auth0\", \"image\": \"auth0.svg\"},\n \"saml\": {\"name\": \"SAML\", \"image\": \"saml.svg\"},\n \"google\": {\"name\": \"Google\", \"image\": \"google.svg\"},\n \"google-oauth2\": {\"name\": \"Google\", \"image\": \"google.svg\"},\n \"google-plus\": {\"name\": \"Google+\", \"image\": \"google.svg\"},\n \"password\": {\"name\": gettext_lazy(\"Password\"), \"image\": \"password.svg\"},\n \"email\": {\"name\": gettext_lazy(\"E-mail\"), \"image\": \"email.svg\"},\n \"ubuntu\": {\"name\": \"Ubuntu\", \"image\": \"ubuntu.svg\"},\n \"opensuse\": {\"name\": \"openSUSE\", \"image\": \"opensuse.svg\"},\n \"fedora\": {\"name\": \"Fedora\", \"image\": \"fedora.svg\"},\n \"facebook\": {\"name\": \"Facebook\", \"image\": \"facebook.svg\"},\n \"github\": {\"name\": \"GitHub\", \"image\": \"github.svg\"},\n \"github-enterprise\": {\"name\": \"GitHub Enterprise\", \"image\": \"github.svg\"},\n \"bitbucket\": {\"name\": \"Bitbucket\", \"image\": \"bitbucket.svg\"},\n \"bitbucket-oauth2\": {\"name\": \"Bitbucket\", \"image\": \"bitbucket.svg\"},\n \"azuread-oauth2\": {\"name\": \"Azure\", \"image\": \"azure.svg\"},\n \"azuread-tenant-oauth2\": {\"name\": \"Azure\", \"image\": \"azure.svg\"},\n \"gitlab\": {\"name\": \"GitLab\", \"image\": \"gitlab.svg\"},\n \"amazon\": {\"name\": \"Amazon\", \"image\": \"amazon.svg\"},\n \"twitter\": {\"name\": \"Twitter\", \"image\": \"twitter.svg\"},\n \"stackoverflow\": {\"name\": \"Stack Overflow\", \"image\": \"stackoverflow.svg\"},\n}\n\nIMAGE_SOCIAL_TEMPLATE = \"\"\"\n<img class=\"auth-image\" src=\"{image}\" />\n\"\"\"\n\nSOCIAL_TEMPLATE = \"\"\"\n{icon}\n{separator}\n{name}\n\"\"\"\n\n\ndef get_auth_params(auth: str):\n \"\"\"Returns authentication parameters.\"\"\"\n # Fallback values\n params = {\"name\": auth, \"image\": \"password.svg\"}\n\n # Hardcoded names\n if auth in SOCIALS:\n params.update(SOCIALS[auth])\n\n # Settings override\n settings_params = {\n \"name\": f\"SOCIAL_AUTH_{auth.upper()}_TITLE\",\n \"image\": f\"SOCIAL_AUTH_{auth.upper()}_IMAGE\",\n }\n for target, source in settings_params.items():\n value = getattr(settings, source, None)\n if value:\n params[target] = value\n\n return params\n\n\[email protected]_tag\ndef auth_name(auth: str, separator: str = \"<br />\"):\n \"\"\"Create HTML markup for social authentication method.\"\"\"\n params = get_auth_params(auth)\n\n if not params[\"image\"].startswith(\"http\"):\n params[\"image\"] = staticfiles_storage.url(\"auth/\" + params[\"image\"])\n params[\"icon\"] = IMAGE_SOCIAL_TEMPLATE.format(separator=separator, **params)\n\n return mark_safe(SOCIAL_TEMPLATE.format(separator=separator, **params))\n\n\ndef get_auth_name(auth: str):\n \"\"\"Get nice name for authentication backend.\"\"\"\n return get_auth_params(auth)[\"name\"]\n", "path": "weblate/accounts/templatetags/authnames.py"}]}
2,003
184
gh_patches_debug_696
rasdani/github-patches
git_diff
boto__botocore-658
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pin jmespatch dependency version Can this library pin its jmespath dependency to a specific version? Currently, it depends on the development branch of the jmespath GitHub repo - which is not stable nor deterministic. Currently, this project's setup.py requires version 0.7.1 but the upstream GitHub repo/branch does not deliver that version - so this project's dependency graph is disconnected. This can result in runtime errors for downstream consumers - like my organization did today. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import botocore 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 requires = ['jmespath==0.7.1', 9 'python-dateutil>=2.1,<3.0.0', 10 'docutils>=0.10'] 11 12 13 if sys.version_info[:2] == (2, 6): 14 # For python2.6 we have a few other dependencies. 15 # First we need an ordered dictionary so we use the 16 # 2.6 backport. 17 requires.append('ordereddict==1.1') 18 # Then we need simplejson. This is because we need 19 # a json version that allows us to specify we want to 20 # use an ordereddict instead of a normal dict for the 21 # JSON objects. The 2.7 json module has this. For 2.6 22 # we need simplejson. 23 requires.append('simplejson==3.3.0') 24 25 26 setup( 27 name='botocore', 28 version=botocore.__version__, 29 description='Low-level, data-driven core of boto 3.', 30 long_description=open('README.rst').read(), 31 author='Amazon Web Services', 32 url='https://github.com/boto/botocore', 33 scripts=[], 34 packages=find_packages(exclude=['tests*']), 35 package_data={'botocore': ['data/*.json', 'data/*/*.json'], 36 'botocore.vendored.requests': ['*.pem']}, 37 include_package_data=True, 38 install_requires=requires, 39 extras_require={ 40 ':python_version=="2.6"': [ 41 'ordereddict==1.1', 42 'simplejson==3.3.0', 43 ] 44 }, 45 license="Apache License 2.0", 46 classifiers=( 47 'Development Status :: 5 - Production/Stable', 48 'Intended Audience :: Developers', 49 'Intended Audience :: System Administrators', 50 'Natural Language :: English', 51 'License :: OSI Approved :: Apache Software License', 52 'Programming Language :: Python', 53 'Programming Language :: Python :: 2.6', 54 'Programming Language :: Python :: 2.7', 55 'Programming Language :: Python :: 3', 56 'Programming Language :: Python :: 3.3', 57 'Programming Language :: Python :: 3.4', 58 ), 59 ) 60 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ from setuptools import setup, find_packages -requires = ['jmespath==0.7.1', +requires = ['jmespath>=0.7.1,<1.0.0', 'python-dateutil>=2.1,<3.0.0', 'docutils>=0.10']
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,7 @@\n from setuptools import setup, find_packages\n \n \n-requires = ['jmespath==0.7.1',\n+requires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n", "issue": "Pin jmespatch dependency version\nCan this library pin its jmespath dependency to a specific version? Currently, it depends on the development branch of the jmespath GitHub repo - which is not stable nor deterministic.\n\nCurrently, this project's setup.py requires version 0.7.1 but the upstream GitHub repo/branch does not deliver that version - so this project's dependency graph is disconnected.\n\nThis can result in runtime errors for downstream consumers - like my organization did today.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath==0.7.1',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ),\n)\n", "path": "setup.py"}]}
1,247
100
gh_patches_debug_21576
rasdani/github-patches
git_diff
shuup__shuup-1707
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Admin: Product delete returns 404 </issue> <code> [start of shuup/admin/modules/products/views/delete.py] 1 # -*- coding: utf-8 -*- 2 # This file is part of Shuup. 3 # 4 # Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved. 5 # 6 # This source code is licensed under the OSL-3.0 license found in the 7 # LICENSE file in the root directory of this source tree. 8 9 from __future__ import unicode_literals 10 11 from django.contrib import messages 12 from django.core.urlresolvers import reverse 13 from django.http.response import HttpResponseRedirect 14 from django.utils.translation import ugettext as _ 15 from django.views.generic import DetailView 16 17 from shuup.admin.utils.urls import get_model_url 18 from shuup.core.models import Product 19 20 21 class ProductDeleteView(DetailView): 22 model = Product 23 context_object_name = "product" 24 25 def get(self, request, *args, **kwargs): 26 return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop)) 27 28 def post(self, request, *args, **kwargs): 29 product = self.get_object() 30 product.soft_delete(user=request.user) 31 messages.success(request, _(u"%s has been marked deleted.") % product) 32 return HttpResponseRedirect(reverse("shuup_admin:shop_product.list")) 33 [end of shuup/admin/modules/products/views/delete.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shuup/admin/modules/products/views/delete.py b/shuup/admin/modules/products/views/delete.py --- a/shuup/admin/modules/products/views/delete.py +++ b/shuup/admin/modules/products/views/delete.py @@ -15,18 +15,19 @@ from django.views.generic import DetailView from shuup.admin.utils.urls import get_model_url -from shuup.core.models import Product +from shuup.core.models import ShopProduct class ProductDeleteView(DetailView): - model = Product + model = ShopProduct context_object_name = "product" def get(self, request, *args, **kwargs): - return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop)) + product = self.get_object().product + return HttpResponseRedirect(get_model_url(product, shop=self.request.shop)) def post(self, request, *args, **kwargs): - product = self.get_object() + product = self.get_object().product product.soft_delete(user=request.user) messages.success(request, _(u"%s has been marked deleted.") % product) return HttpResponseRedirect(reverse("shuup_admin:shop_product.list"))
{"golden_diff": "diff --git a/shuup/admin/modules/products/views/delete.py b/shuup/admin/modules/products/views/delete.py\n--- a/shuup/admin/modules/products/views/delete.py\n+++ b/shuup/admin/modules/products/views/delete.py\n@@ -15,18 +15,19 @@\n from django.views.generic import DetailView\n \n from shuup.admin.utils.urls import get_model_url\n-from shuup.core.models import Product\n+from shuup.core.models import ShopProduct\n \n \n class ProductDeleteView(DetailView):\n- model = Product\n+ model = ShopProduct\n context_object_name = \"product\"\n \n def get(self, request, *args, **kwargs):\n- return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop))\n+ product = self.get_object().product\n+ return HttpResponseRedirect(get_model_url(product, shop=self.request.shop))\n \n def post(self, request, *args, **kwargs):\n- product = self.get_object()\n+ product = self.get_object().product\n product.soft_delete(user=request.user)\n messages.success(request, _(u\"%s has been marked deleted.\") % product)\n return HttpResponseRedirect(reverse(\"shuup_admin:shop_product.list\"))\n", "issue": "Admin: Product delete returns 404\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import unicode_literals\n\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import DetailView\n\nfrom shuup.admin.utils.urls import get_model_url\nfrom shuup.core.models import Product\n\n\nclass ProductDeleteView(DetailView):\n model = Product\n context_object_name = \"product\"\n\n def get(self, request, *args, **kwargs):\n return HttpResponseRedirect(get_model_url(self.get_object(), shop=self.request.shop))\n\n def post(self, request, *args, **kwargs):\n product = self.get_object()\n product.soft_delete(user=request.user)\n messages.success(request, _(u\"%s has been marked deleted.\") % product)\n return HttpResponseRedirect(reverse(\"shuup_admin:shop_product.list\"))\n", "path": "shuup/admin/modules/products/views/delete.py"}]}
870
258
gh_patches_debug_9949
rasdani/github-patches
git_diff
inducer__relate-607
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update pysaml2 to 4.6.0 (or higher) to fix CVE-2017-1000246 * https://nvd.nist.gov/vuln/detail/CVE-2017-1000246 * https://github.com/IdentityPython/pysaml2/issues/417 Currently blocked on https://github.com/knaperek/djangosaml2/issues/129. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 from setuptools import setup, find_packages 5 6 # This script (for now) is only intended to install the 'relate' content helper 7 # script. relate-validate. Its use is not needed for (and unrelated to) 8 # deploying RELATE as a web service. 9 10 # Use 'pip install -r requirements.txt' first to install prerequisites. 11 12 setup(name="relate-courseware", 13 version="2016.1", 14 description="RELATE courseware", 15 long_description=open("README.rst", "rt").read(), 16 17 scripts=["bin/relate"], 18 author="Andreas Kloeckner", 19 url="https://github.com/inducer/relate", 20 author_email="[email protected]", 21 license="MIT", 22 packages=find_packages(exclude=['tests']), 23 install_requires=[ 24 "django>=1.10,<2.1", 25 "django-crispy-forms>=1.5.1", 26 "colorama", 27 "markdown", 28 "dulwich", 29 "pyyaml", 30 "nbconvert>=5.2.1", 31 "pymbolic", 32 "sympy", 33 ], 34 package_data={ 35 "relate": [ 36 "templates/*.html", 37 ], 38 "course": [ 39 "templates/course/*.html", 40 "templates/course/jinja2/*.tpl", 41 ], 42 }, 43 ) 44 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,10 +4,11 @@ from setuptools import setup, find_packages # This script (for now) is only intended to install the 'relate' content helper -# script. relate-validate. Its use is not needed for (and unrelated to) -# deploying RELATE as a web service. +# script. Its use is not needed for (and unrelated to) deploying RELATE as a +# web service. -# Use 'pip install -r requirements.txt' first to install prerequisites. +# Use 'pip install -r requirements.txt' to install prerequisites for RELATE as +# a web service. setup(name="relate-courseware", version="2016.1",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,10 +4,11 @@\n from setuptools import setup, find_packages\n \n # This script (for now) is only intended to install the 'relate' content helper\n-# script. relate-validate. Its use is not needed for (and unrelated to)\n-# deploying RELATE as a web service.\n+# script. Its use is not needed for (and unrelated to) deploying RELATE as a\n+# web service.\n \n-# Use 'pip install -r requirements.txt' first to install prerequisites.\n+# Use 'pip install -r requirements.txt' to install prerequisites for RELATE as\n+# a web service.\n \n setup(name=\"relate-courseware\",\n version=\"2016.1\",\n", "issue": "Update pysaml2 to 4.6.0 (or higher) to fix CVE-2017-1000246\n* https://nvd.nist.gov/vuln/detail/CVE-2017-1000246\r\n* https://github.com/IdentityPython/pysaml2/issues/417\r\n\r\nCurrently blocked on https://github.com/knaperek/djangosaml2/issues/129.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n# This script (for now) is only intended to install the 'relate' content helper\n# script. relate-validate. Its use is not needed for (and unrelated to)\n# deploying RELATE as a web service.\n\n# Use 'pip install -r requirements.txt' first to install prerequisites.\n\nsetup(name=\"relate-courseware\",\n version=\"2016.1\",\n description=\"RELATE courseware\",\n long_description=open(\"README.rst\", \"rt\").read(),\n\n scripts=[\"bin/relate\"],\n author=\"Andreas Kloeckner\",\n url=\"https://github.com/inducer/relate\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n packages=find_packages(exclude=['tests']),\n install_requires=[\n \"django>=1.10,<2.1\",\n \"django-crispy-forms>=1.5.1\",\n \"colorama\",\n \"markdown\",\n \"dulwich\",\n \"pyyaml\",\n \"nbconvert>=5.2.1\",\n \"pymbolic\",\n \"sympy\",\n ],\n package_data={\n \"relate\": [\n \"templates/*.html\",\n ],\n \"course\": [\n \"templates/course/*.html\",\n \"templates/course/jinja2/*.tpl\",\n ],\n },\n )\n", "path": "setup.py"}]}
1,019
173
gh_patches_debug_34167
rasdani/github-patches
git_diff
PyGithub__PyGithub-1810
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Attributes "maintain" and "triage" missing in class "Permissions" Current Github API returns a permission values for `admin`, `push`, `maintain`, `triage` and `pull` (see <https://docs.github.com/en/free-pro-team@latest/rest/reference/teams#check-team-permissions-for-a-repository>). PyGithub so far only supports `admin`, `push` and `pull`. This is a feature request to add the two missing attributes to class `github.Permissions.Permissions`. https://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L34 </issue> <code> [start of github/Permissions.py] 1 ############################ Copyrights and license ############################ 2 # # 3 # Copyright 2012 Vincent Jacques <[email protected]> # 4 # Copyright 2012 Zearin <[email protected]> # 5 # Copyright 2013 AKFish <[email protected]> # 6 # Copyright 2013 Vincent Jacques <[email protected]> # 7 # Copyright 2014 Vincent Jacques <[email protected]> # 8 # Copyright 2016 Jannis Gebauer <[email protected]> # 9 # Copyright 2016 Peter Buckley <[email protected]> # 10 # Copyright 2018 Wan Liuyang <[email protected]> # 11 # Copyright 2018 sfdye <[email protected]> # 12 # # 13 # This file is part of PyGithub. # 14 # http://pygithub.readthedocs.io/ # 15 # # 16 # PyGithub is free software: you can redistribute it and/or modify it under # 17 # the terms of the GNU Lesser General Public License as published by the Free # 18 # Software Foundation, either version 3 of the License, or (at your option) # 19 # any later version. # 20 # # 21 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # 22 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # 23 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # 24 # details. # 25 # # 26 # You should have received a copy of the GNU Lesser General Public License # 27 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # 28 # # 29 ################################################################################ 30 31 import github.GithubObject 32 33 34 class Permissions(github.GithubObject.NonCompletableGithubObject): 35 """ 36 This class represents Permissions 37 """ 38 39 def __repr__(self): 40 return self.get__repr__( 41 { 42 "admin": self._admin.value, 43 "pull": self._pull.value, 44 "push": self._push.value, 45 } 46 ) 47 48 @property 49 def admin(self): 50 """ 51 :type: bool 52 """ 53 return self._admin.value 54 55 @property 56 def pull(self): 57 """ 58 :type: bool 59 """ 60 return self._pull.value 61 62 @property 63 def push(self): 64 """ 65 :type: bool 66 """ 67 return self._push.value 68 69 def _initAttributes(self): 70 self._admin = github.GithubObject.NotSet 71 self._pull = github.GithubObject.NotSet 72 self._push = github.GithubObject.NotSet 73 74 def _useAttributes(self, attributes): 75 if "admin" in attributes: # pragma no branch 76 self._admin = self._makeBoolAttribute(attributes["admin"]) 77 if "pull" in attributes: # pragma no branch 78 self._pull = self._makeBoolAttribute(attributes["pull"]) 79 if "push" in attributes: # pragma no branch 80 self._push = self._makeBoolAttribute(attributes["push"]) 81 [end of github/Permissions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/github/Permissions.py b/github/Permissions.py --- a/github/Permissions.py +++ b/github/Permissions.py @@ -40,8 +40,10 @@ return self.get__repr__( { "admin": self._admin.value, + "maintain": self._maintain.value, "pull": self._pull.value, "push": self._push.value, + "triage": self._triage.value, } ) @@ -52,6 +54,13 @@ """ return self._admin.value + @property + def maintain(self): + """ + :type: bool + """ + return self._maintain.value + @property def pull(self): """ @@ -66,15 +75,28 @@ """ return self._push.value + @property + def triage(self): + """ + :type: bool + """ + return self._triage.value + def _initAttributes(self): self._admin = github.GithubObject.NotSet + self._maintain = github.GithubObject.NotSet self._pull = github.GithubObject.NotSet self._push = github.GithubObject.NotSet + self._triage = github.GithubObject.NotSet def _useAttributes(self, attributes): if "admin" in attributes: # pragma no branch self._admin = self._makeBoolAttribute(attributes["admin"]) + if "maintain" in attributes: # pragma no branch + self._maintain = self._makeBoolAttribute(attributes["maintain"]) if "pull" in attributes: # pragma no branch self._pull = self._makeBoolAttribute(attributes["pull"]) if "push" in attributes: # pragma no branch self._push = self._makeBoolAttribute(attributes["push"]) + if "triage" in attributes: # pragma no branch + self._triage = self._makeBoolAttribute(attributes["triage"])
{"golden_diff": "diff --git a/github/Permissions.py b/github/Permissions.py\n--- a/github/Permissions.py\n+++ b/github/Permissions.py\n@@ -40,8 +40,10 @@\n return self.get__repr__(\n {\n \"admin\": self._admin.value,\n+ \"maintain\": self._maintain.value,\n \"pull\": self._pull.value,\n \"push\": self._push.value,\n+ \"triage\": self._triage.value,\n }\n )\n \n@@ -52,6 +54,13 @@\n \"\"\"\n return self._admin.value\n \n+ @property\n+ def maintain(self):\n+ \"\"\"\n+ :type: bool\n+ \"\"\"\n+ return self._maintain.value\n+\n @property\n def pull(self):\n \"\"\"\n@@ -66,15 +75,28 @@\n \"\"\"\n return self._push.value\n \n+ @property\n+ def triage(self):\n+ \"\"\"\n+ :type: bool\n+ \"\"\"\n+ return self._triage.value\n+\n def _initAttributes(self):\n self._admin = github.GithubObject.NotSet\n+ self._maintain = github.GithubObject.NotSet\n self._pull = github.GithubObject.NotSet\n self._push = github.GithubObject.NotSet\n+ self._triage = github.GithubObject.NotSet\n \n def _useAttributes(self, attributes):\n if \"admin\" in attributes: # pragma no branch\n self._admin = self._makeBoolAttribute(attributes[\"admin\"])\n+ if \"maintain\" in attributes: # pragma no branch\n+ self._maintain = self._makeBoolAttribute(attributes[\"maintain\"])\n if \"pull\" in attributes: # pragma no branch\n self._pull = self._makeBoolAttribute(attributes[\"pull\"])\n if \"push\" in attributes: # pragma no branch\n self._push = self._makeBoolAttribute(attributes[\"push\"])\n+ if \"triage\" in attributes: # pragma no branch\n+ self._triage = self._makeBoolAttribute(attributes[\"triage\"])\n", "issue": "Attributes \"maintain\" and \"triage\" missing in class \"Permissions\"\nCurrent Github API returns a permission values for `admin`, `push`, `maintain`, `triage` and `pull` (see <https://docs.github.com/en/free-pro-team@latest/rest/reference/teams#check-team-permissions-for-a-repository>).\r\nPyGithub so far only supports `admin`, `push` and `pull`.\r\n\r\nThis is a feature request to add the two missing attributes to class `github.Permissions.Permissions`.\r\nhttps://github.com/PyGithub/PyGithub/blob/34d097ce473601624722b90fc5d0396011dd3acb/github/Permissions.py#L34\r\n\n", "before_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport github.GithubObject\n\n\nclass Permissions(github.GithubObject.NonCompletableGithubObject):\n \"\"\"\n This class represents Permissions\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__(\n {\n \"admin\": self._admin.value,\n \"pull\": self._pull.value,\n \"push\": self._push.value,\n }\n )\n\n @property\n def admin(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._admin.value\n\n @property\n def pull(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._pull.value\n\n @property\n def push(self):\n \"\"\"\n :type: bool\n \"\"\"\n return self._push.value\n\n def _initAttributes(self):\n self._admin = github.GithubObject.NotSet\n self._pull = github.GithubObject.NotSet\n self._push = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"admin\" in attributes: # pragma no branch\n self._admin = self._makeBoolAttribute(attributes[\"admin\"])\n if \"pull\" in attributes: # pragma no branch\n self._pull = self._makeBoolAttribute(attributes[\"pull\"])\n if \"push\" in attributes: # pragma no branch\n self._push = self._makeBoolAttribute(attributes[\"push\"])\n", "path": "github/Permissions.py"}]}
1,546
462
gh_patches_debug_7969
rasdani/github-patches
git_diff
urllib3__urllib3-3034
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add OpenGraph information to our documentation Would be good to fill in some open graph fields for our documentation so when we link to things they show up with rich output / images rather than as a standard link. Really works nicely for Social Media, Slack, etc. More information: https://ogp.me/ </issue> <code> [start of docs/conf.py] 1 from __future__ import annotations 2 3 import os 4 import sys 5 from datetime import date 6 7 # If extensions (or modules to document with autodoc) are in another directory, 8 # add these directories to sys.path here. If the directory is relative to the 9 # documentation root, use os.path.abspath to make it absolute, like shown here. 10 11 root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) 12 sys.path.insert(0, root_path) 13 14 # https://docs.readthedocs.io/en/stable/builds.html#build-environment 15 if "READTHEDOCS" in os.environ: 16 import glob 17 18 if glob.glob("../changelog/*.*.rst"): 19 print("-- Found changes; running towncrier --", flush=True) 20 import subprocess 21 22 subprocess.run( 23 ["towncrier", "--yes", "--date", "not released yet"], cwd="..", check=True 24 ) 25 26 import urllib3 27 28 # -- General configuration ----------------------------------------------------- 29 30 31 # Add any Sphinx extension module names here, as strings. They can be extensions 32 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 33 extensions = [ 34 "sphinx.ext.autodoc", 35 "sphinx_copybutton", 36 "sphinx.ext.doctest", 37 "sphinx.ext.intersphinx", 38 ] 39 40 # Test code blocks only when explicitly specified 41 doctest_test_doctest_blocks = "" 42 43 # Add any paths that contain templates here, relative to this directory. 44 templates_path = ["_templates"] 45 46 # The suffix of source filenames. 47 source_suffix = ".rst" 48 49 # The master toctree document. 50 master_doc = "index" 51 52 # General information about the project. 53 project = "urllib3" 54 copyright = f"{date.today().year}, Andrey Petrov" 55 56 # The short X.Y version. 57 version = urllib3.__version__ 58 # The full version, including alpha/beta/rc tags. 59 release = version 60 61 # List of patterns, relative to source directory, that match files and 62 # directories to ignore when looking for source files. 63 exclude_patterns = ["_build"] 64 65 # The name of the Pygments (syntax highlighting) style to use. 66 pygments_style = "friendly" 67 68 # The theme to use for HTML and HTML Help pages. See the documentation for 69 # a list of builtin themes. 70 html_theme = "furo" 71 html_favicon = "images/favicon.png" 72 73 html_static_path = ["_static"] 74 html_theme_options = { 75 "announcement": """ 76 <a style=\"text-decoration: none; color: white;\" 77 href=\"https://github.com/sponsors/urllib3\"> 78 <img src=\"/en/latest/_static/favicon.png\"/> Support urllib3 on GitHub Sponsors 79 </a> 80 """, 81 "sidebar_hide_name": True, 82 "light_logo": "banner.svg", 83 "dark_logo": "dark-logo.svg", 84 } 85 86 intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} 87 88 # Show typehints as content of the function or method 89 autodoc_typehints = "description" 90 91 # Warn about all references to unknown targets 92 nitpicky = True 93 # Except for these ones, which we expect to point to unknown targets: 94 nitpick_ignore = [ 95 ("py:class", "_TYPE_SOCKS_OPTIONS"), 96 ("py:class", "_TYPE_SOCKET_OPTIONS"), 97 ("py:class", "_TYPE_TIMEOUT"), 98 ("py:class", "_TYPE_FIELD_VALUE"), 99 ("py:class", "_TYPE_BODY"), 100 ("py:class", "_HttplibHTTPResponse"), 101 ("py:class", "_HttplibHTTPMessage"), 102 ("py:class", "TracebackType"), 103 ("py:class", "Literal"), 104 ("py:class", "email.errors.MessageDefect"), 105 ("py:class", "MessageDefect"), 106 ("py:class", "http.client.HTTPMessage"), 107 ("py:class", "RequestHistory"), 108 ("py:class", "SSLTransportType"), 109 ("py:class", "VerifyMode"), 110 ("py:class", "_ssl._SSLContext"), 111 ("py:class", "urllib3._collections.HTTPHeaderDict"), 112 ("py:class", "urllib3._collections.RecentlyUsedContainer"), 113 ("py:class", "urllib3._request_methods.RequestMethods"), 114 ("py:class", "urllib3.contrib.socks._TYPE_SOCKS_OPTIONS"), 115 ("py:class", "urllib3.util.timeout._TYPE_DEFAULT"), 116 ("py:class", "BaseHTTPConnection"), 117 ] 118 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -35,8 +35,16 @@ "sphinx_copybutton", "sphinx.ext.doctest", "sphinx.ext.intersphinx", + "sphinxext.opengraph", ] +# Open Graph metadata +ogp_title = "urllib3 documentation" +ogp_site_url = "https://urllib3.readthedocs.io" +ogp_type = "website" +ogp_image = "https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg" +ogp_description = "urllib3 is a user-friendly HTTP client library for Python." + # Test code blocks only when explicitly specified doctest_test_doctest_blocks = ""
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -35,8 +35,16 @@\n \"sphinx_copybutton\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n+ \"sphinxext.opengraph\",\n ]\n \n+# Open Graph metadata\n+ogp_title = \"urllib3 documentation\"\n+ogp_site_url = \"https://urllib3.readthedocs.io\"\n+ogp_type = \"website\"\n+ogp_image = \"https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg\"\n+ogp_description = \"urllib3 is a user-friendly HTTP client library for Python.\"\n+\n # Test code blocks only when explicitly specified\n doctest_test_doctest_blocks = \"\"\n", "issue": "Add OpenGraph information to our documentation\nWould be good to fill in some open graph fields for our documentation so when we link to things they show up with rich output / images rather than as a standard link. Really works nicely for Social Media, Slack, etc.\r\n\r\nMore information: https://ogp.me/\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom datetime import date\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nroot_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, root_path)\n\n# https://docs.readthedocs.io/en/stable/builds.html#build-environment\nif \"READTHEDOCS\" in os.environ:\n import glob\n\n if glob.glob(\"../changelog/*.*.rst\"):\n print(\"-- Found changes; running towncrier --\", flush=True)\n import subprocess\n\n subprocess.run(\n [\"towncrier\", \"--yes\", \"--date\", \"not released yet\"], cwd=\"..\", check=True\n )\n\nimport urllib3\n\n# -- General configuration -----------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_copybutton\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n]\n\n# Test code blocks only when explicitly specified\ndoctest_test_doctest_blocks = \"\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"urllib3\"\ncopyright = f\"{date.today().year}, Andrey Petrov\"\n\n# The short X.Y version.\nversion = urllib3.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"friendly\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\nhtml_favicon = \"images/favicon.png\"\n\nhtml_static_path = [\"_static\"]\nhtml_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n href=\\\"https://github.com/sponsors/urllib3\\\">\n <img src=\\\"/en/latest/_static/favicon.png\\\"/> Support urllib3 on GitHub Sponsors\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n \"light_logo\": \"banner.svg\",\n \"dark_logo\": \"dark-logo.svg\",\n}\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n# Show typehints as content of the function or method\nautodoc_typehints = \"description\"\n\n# Warn about all references to unknown targets\nnitpicky = True\n# Except for these ones, which we expect to point to unknown targets:\nnitpick_ignore = [\n (\"py:class\", \"_TYPE_SOCKS_OPTIONS\"),\n (\"py:class\", \"_TYPE_SOCKET_OPTIONS\"),\n (\"py:class\", \"_TYPE_TIMEOUT\"),\n (\"py:class\", \"_TYPE_FIELD_VALUE\"),\n (\"py:class\", \"_TYPE_BODY\"),\n (\"py:class\", \"_HttplibHTTPResponse\"),\n (\"py:class\", \"_HttplibHTTPMessage\"),\n (\"py:class\", \"TracebackType\"),\n (\"py:class\", \"Literal\"),\n (\"py:class\", \"email.errors.MessageDefect\"),\n (\"py:class\", \"MessageDefect\"),\n (\"py:class\", \"http.client.HTTPMessage\"),\n (\"py:class\", \"RequestHistory\"),\n (\"py:class\", \"SSLTransportType\"),\n (\"py:class\", \"VerifyMode\"),\n (\"py:class\", \"_ssl._SSLContext\"),\n (\"py:class\", \"urllib3._collections.HTTPHeaderDict\"),\n (\"py:class\", \"urllib3._collections.RecentlyUsedContainer\"),\n (\"py:class\", \"urllib3._request_methods.RequestMethods\"),\n (\"py:class\", \"urllib3.contrib.socks._TYPE_SOCKS_OPTIONS\"),\n (\"py:class\", \"urllib3.util.timeout._TYPE_DEFAULT\"),\n (\"py:class\", \"BaseHTTPConnection\"),\n]\n", "path": "docs/conf.py"}]}
1,788
179
gh_patches_debug_12165
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-4599
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cannot download Daum tvpot url cannot download video from Daum tvpot which is Korean internet portal service http://tvpot.daum.net/v/vfb45BtrWBWWHKEkOwXEghB </issue> <code> [start of youtube_dl/extractor/daum.py] 1 # encoding: utf-8 2 3 from __future__ import unicode_literals 4 5 import re 6 7 from .common import InfoExtractor 8 from ..compat import ( 9 compat_urllib_parse, 10 ) 11 12 13 class DaumIE(InfoExtractor): 14 _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)' 15 IE_NAME = 'daum.net' 16 17 _TESTS = [{ 18 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', 19 'info_dict': { 20 'id': '52554690', 21 'ext': 'mp4', 22 'title': 'DOTA 2GETHER 시즌2 6회 - 2부', 23 'description': 'DOTA 2GETHER 시즌2 6회 - 2부', 24 'upload_date': '20130831', 25 'duration': 3868, 26 }, 27 }, { 28 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz', 29 'only_matching': True, 30 }, { 31 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24', 32 'only_matching': True, 33 }] 34 35 def _real_extract(self, url): 36 mobj = re.match(self._VALID_URL, url) 37 video_id = mobj.group('id') 38 canonical_url = 'http://tvpot.daum.net/v/%s' % video_id 39 webpage = self._download_webpage(canonical_url, video_id) 40 full_id = self._search_regex( 41 r'<iframe src="http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"]', 42 webpage, 'full id') 43 query = compat_urllib_parse.urlencode({'vid': full_id}) 44 info = self._download_xml( 45 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id, 46 'Downloading video info') 47 urls = self._download_xml( 48 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query, 49 video_id, 'Downloading video formats info') 50 51 formats = [] 52 for format_el in urls.findall('result/output_list/output_list'): 53 profile = format_el.attrib['profile'] 54 format_query = compat_urllib_parse.urlencode({ 55 'vid': full_id, 56 'profile': profile, 57 }) 58 url_doc = self._download_xml( 59 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query, 60 video_id, note='Downloading video data for %s format' % profile) 61 format_url = url_doc.find('result/url').text 62 formats.append({ 63 'url': format_url, 64 'format_id': profile, 65 }) 66 67 return { 68 'id': video_id, 69 'title': info.find('TITLE').text, 70 'formats': formats, 71 'thumbnail': self._og_search_thumbnail(webpage), 72 'description': info.find('CONTENTS').text, 73 'duration': int(info.find('DURATION').text), 74 'upload_date': info.find('REGDTTM').text[:8], 75 } 76 [end of youtube_dl/extractor/daum.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py --- a/youtube_dl/extractor/daum.py +++ b/youtube_dl/extractor/daum.py @@ -38,7 +38,7 @@ canonical_url = 'http://tvpot.daum.net/v/%s' % video_id webpage = self._download_webpage(canonical_url, video_id) full_id = self._search_regex( - r'<iframe src="http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"]', + r'src=["\']http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"\']', webpage, 'full id') query = compat_urllib_parse.urlencode({'vid': full_id}) info = self._download_xml(
{"golden_diff": "diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py\n--- a/youtube_dl/extractor/daum.py\n+++ b/youtube_dl/extractor/daum.py\n@@ -38,7 +38,7 @@\n canonical_url = 'http://tvpot.daum.net/v/%s' % video_id\n webpage = self._download_webpage(canonical_url, video_id)\n full_id = self._search_regex(\n- r'<iframe src=\"http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"]',\n+ r'src=[\"\\']http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"\\']',\n webpage, 'full id')\n query = compat_urllib_parse.urlencode({'vid': full_id})\n info = self._download_xml(\n", "issue": "cannot download Daum tvpot url\ncannot download video from Daum tvpot which is Korean internet portal service\nhttp://tvpot.daum.net/v/vfb45BtrWBWWHKEkOwXEghB\n\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urllib_parse,\n)\n\n\nclass DaumIE(InfoExtractor):\n _VALID_URL = r'https?://(?:m\\.)?tvpot\\.daum\\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'\n IE_NAME = 'daum.net'\n\n _TESTS = [{\n 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',\n 'info_dict': {\n 'id': '52554690',\n 'ext': 'mp4',\n 'title': 'DOTA 2GETHER \uc2dc\uc98c2 6\ud68c - 2\ubd80',\n 'description': 'DOTA 2GETHER \uc2dc\uc98c2 6\ud68c - 2\ubd80',\n 'upload_date': '20130831',\n 'duration': 3868,\n },\n }, {\n 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',\n 'only_matching': True,\n }, {\n 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n canonical_url = 'http://tvpot.daum.net/v/%s' % video_id\n webpage = self._download_webpage(canonical_url, video_id)\n full_id = self._search_regex(\n r'<iframe src=\"http://videofarm.daum.net/controller/video/viewer/Video.html\\?.*?vid=(.+?)[&\"]',\n webpage, 'full id')\n query = compat_urllib_parse.urlencode({'vid': full_id})\n info = self._download_xml(\n 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,\n 'Downloading video info')\n urls = self._download_xml(\n 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,\n video_id, 'Downloading video formats info')\n\n formats = []\n for format_el in urls.findall('result/output_list/output_list'):\n profile = format_el.attrib['profile']\n format_query = compat_urllib_parse.urlencode({\n 'vid': full_id,\n 'profile': profile,\n })\n url_doc = self._download_xml(\n 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,\n video_id, note='Downloading video data for %s format' % profile)\n format_url = url_doc.find('result/url').text\n formats.append({\n 'url': format_url,\n 'format_id': profile,\n })\n\n return {\n 'id': video_id,\n 'title': info.find('TITLE').text,\n 'formats': formats,\n 'thumbnail': self._og_search_thumbnail(webpage),\n 'description': info.find('CONTENTS').text,\n 'duration': int(info.find('DURATION').text),\n 'upload_date': info.find('REGDTTM').text[:8],\n }\n", "path": "youtube_dl/extractor/daum.py"}]}
1,498
210
gh_patches_debug_7980
rasdani/github-patches
git_diff
opensearch-project__opensearch-build-508
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add bwctest.sh in OpenSearch and one plugin - [x] OpenSearch Core - https://github.com/opensearch-project/OpenSearch/pull/1150 - [x] Anomaly Detection Plugin - https://github.com/opensearch-project/anomaly-detection/pull/189 </issue> <code> [start of bundle-workflow/src/paths/script_finder.py] 1 # SPDX-License-Identifier: Apache-2.0 2 # 3 # The OpenSearch Contributors require contributions made to 4 # this file be licensed under the Apache-2.0 license or a 5 # compatible open source license. 6 7 import os 8 9 10 class ScriptFinder: 11 class ScriptNotFoundError(Exception): 12 def __init__(self, kind, paths): 13 self.kind = kind 14 self.paths = paths 15 super().__init__(f"Could not find {kind} script. Looked in {paths}.") 16 17 component_scripts_path = os.path.realpath( 18 os.path.join( 19 os.path.dirname(os.path.abspath(__file__)), "../../scripts/components" 20 ) 21 ) 22 23 default_scripts_path = os.path.realpath( 24 os.path.join( 25 os.path.dirname(os.path.abspath(__file__)), "../../scripts/default" 26 ) 27 ) 28 29 """ 30 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts. 31 32 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository, 33 it will look in the following locations, in order: 34 * Root of the Git repository 35 * /scripts/<script-name> in the Git repository 36 * <component_scripts_path>/<component_name>/<script-name> 37 * <default_scripts_path>/<script-name> 38 39 For install.sh scripts, given a component name, it will look in the following locations, in order: 40 * <component_scripts_path>/<component_name>/<script-name> 41 * <default_scripts_path>/<script-name> 42 """ 43 44 @classmethod 45 def __find_script(cls, name, paths): 46 script = next(filter(lambda path: os.path.exists(path), paths), None) 47 if script is None: 48 raise ScriptFinder.ScriptNotFoundError(name, paths) 49 return script 50 51 @classmethod 52 def find_build_script(cls, component_name, git_dir): 53 paths = [ 54 os.path.realpath(os.path.join(git_dir, "build.sh")), 55 os.path.realpath(os.path.join(git_dir, "scripts/build.sh")), 56 os.path.realpath( 57 os.path.join(cls.component_scripts_path, component_name, "build.sh") 58 ), 59 os.path.realpath(os.path.join(cls.default_scripts_path, "build.sh")), 60 ] 61 62 return cls.__find_script("build.sh", paths) 63 64 @classmethod 65 def find_integ_test_script(cls, component_name, git_dir): 66 paths = [ 67 # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497 68 # os.path.realpath(os.path.join(git_dir, "integtest.sh")), 69 # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")), 70 os.path.realpath( 71 os.path.join(cls.component_scripts_path, component_name, "integtest.sh") 72 ), 73 os.path.realpath(os.path.join(cls.default_scripts_path, "integtest.sh")), 74 ] 75 76 return cls.__find_script("integtest.sh", paths) 77 78 @classmethod 79 def find_install_script(cls, component_name): 80 paths = [ 81 os.path.realpath( 82 os.path.join(cls.component_scripts_path, component_name, "install.sh") 83 ), 84 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")), 85 ] 86 87 return cls.__find_script("install.sh", paths) 88 [end of bundle-workflow/src/paths/script_finder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py --- a/bundle-workflow/src/paths/script_finder.py +++ b/bundle-workflow/src/paths/script_finder.py @@ -85,3 +85,16 @@ ] return cls.__find_script("install.sh", paths) + + @classmethod + def find_bwc_test_script(cls, component_name, git_dir): + paths = [ + os.path.realpath(os.path.join(git_dir, "bwctest.sh")), + os.path.realpath(os.path.join(git_dir, "scripts/bwctest.sh")), + os.path.realpath( + os.path.join(cls.component_scripts_path, component_name, "bwctest.sh") + ), + os.path.realpath(os.path.join(cls.default_scripts_path, "bwctest.sh")), + ] + + return cls.__find_script("bwctest.sh", paths)
{"golden_diff": "diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py\n--- a/bundle-workflow/src/paths/script_finder.py\n+++ b/bundle-workflow/src/paths/script_finder.py\n@@ -85,3 +85,16 @@\n ]\n \n return cls.__find_script(\"install.sh\", paths)\n+\n+ @classmethod\n+ def find_bwc_test_script(cls, component_name, git_dir):\n+ paths = [\n+ os.path.realpath(os.path.join(git_dir, \"bwctest.sh\")),\n+ os.path.realpath(os.path.join(git_dir, \"scripts/bwctest.sh\")),\n+ os.path.realpath(\n+ os.path.join(cls.component_scripts_path, component_name, \"bwctest.sh\")\n+ ),\n+ os.path.realpath(os.path.join(cls.default_scripts_path, \"bwctest.sh\")),\n+ ]\n+\n+ return cls.__find_script(\"bwctest.sh\", paths)\n", "issue": "Add bwctest.sh in OpenSearch and one plugin\n- [x] OpenSearch Core - https://github.com/opensearch-project/OpenSearch/pull/1150\r\n- [x] Anomaly Detection Plugin - https://github.com/opensearch-project/anomaly-detection/pull/189\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}]}
1,481
207
gh_patches_debug_645
rasdani/github-patches
git_diff
zulip__zulip-12366
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Scrollbar drag can result in unintended click actions Split off from #11792: > * on the settings pages, if you click on the scrollbar, drag it down, and then release your click when the mouse is outside the settings modal (e.g. below it or to the right), it closes the settings modal. I don't know if this is an existing thing or a regression, but I ran into it a bunch of times when testing even after knowing the behavior. This was not a regression from perfect-scrollbar, but I fixed it in Grsmto/simplebar#312 and Grsmto/simplebar#317. Just waiting for the fixes to be included in a new upstream release. </issue> <code> [start of version.py] 1 ZULIP_VERSION = "2.0.3+git" 2 LATEST_MAJOR_VERSION = "2.0" 3 LATEST_RELEASE_VERSION = "2.0.3" 4 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/03/01/zulip-2-0-released/" 5 6 # Bump the minor PROVISION_VERSION to indicate that folks should provision 7 # only when going from an old version of the code to a newer version. Bump 8 # the major version to indicate that folks should provision in both 9 # directions. 10 11 # Typically, adding a dependency only requires a minor version bump, and 12 # removing a dependency requires a major version bump. 13 14 PROVISION_VERSION = '32.0' 15 [end of version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -11,4 +11,4 @@ # Typically, adding a dependency only requires a minor version bump, and # removing a dependency requires a major version bump. -PROVISION_VERSION = '32.0' +PROVISION_VERSION = '32.1'
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -11,4 +11,4 @@\n # Typically, adding a dependency only requires a minor version bump, and\n # removing a dependency requires a major version bump.\n \n-PROVISION_VERSION = '32.0'\n+PROVISION_VERSION = '32.1'\n", "issue": "Scrollbar drag can result in unintended click actions\nSplit off from #11792:\r\n\r\n> * on the settings pages, if you click on the scrollbar, drag it down, and then release your click when the mouse is outside the settings modal (e.g. below it or to the right), it closes the settings modal. I don't know if this is an existing thing or a regression, but I ran into it a bunch of times when testing even after knowing the behavior.\r\n\r\nThis was not a regression from perfect-scrollbar, but I fixed it in Grsmto/simplebar#312 and Grsmto/simplebar#317. Just waiting for the fixes to be included in a new upstream release.\n", "before_files": [{"content": "ZULIP_VERSION = \"2.0.3+git\"\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically, adding a dependency only requires a minor version bump, and\n# removing a dependency requires a major version bump.\n\nPROVISION_VERSION = '32.0'\n", "path": "version.py"}]}
858
81
gh_patches_debug_10296
rasdani/github-patches
git_diff
getredash__redash-3634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Queries search is not working on release/7.0.x ### Issue Summary I try building docker image with the redash dockerfile and start by docker-compose. But queries search is not working ### Steps to Reproduce ![Untitled](https://user-images.githubusercontent.com/6704470/54521288-93d2ad80-49a5-11e9-9794-94572b6698d0.gif) Check out the queries table and find that search_vector is null. ![1](https://user-images.githubusercontent.com/6704470/54521423-de542a00-49a5-11e9-9f37-5b5923042973.png) ### Technical details: * Redash Version: release/7.0.x * Browser/OS: Chrome Mac * How did you install Redash: redash setup </issue> <code> [start of redash/cli/database.py] 1 import time 2 3 from flask.cli import AppGroup 4 from flask_migrate import stamp 5 from sqlalchemy.exc import DatabaseError 6 7 manager = AppGroup(help="Manage the database (create/drop tables).") 8 9 10 def _wait_for_db_connection(db): 11 retried = False 12 while not retried: 13 try: 14 db.engine.execute('SELECT 1;') 15 return 16 except DatabaseError: 17 time.sleep(30) 18 19 retried = True 20 21 22 @manager.command() 23 def create_tables(): 24 """Create the database tables.""" 25 from redash.models import db 26 27 _wait_for_db_connection(db) 28 db.create_all() 29 30 # Need to mark current DB as up to date 31 stamp() 32 33 34 @manager.command() 35 def drop_tables(): 36 """Drop the database tables.""" 37 from redash.models import db 38 39 _wait_for_db_connection(db) 40 db.drop_all() 41 [end of redash/cli/database.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/cli/database.py b/redash/cli/database.py --- a/redash/cli/database.py +++ b/redash/cli/database.py @@ -1,5 +1,6 @@ import time +import sqlalchemy from flask.cli import AppGroup from flask_migrate import stamp from sqlalchemy.exc import DatabaseError @@ -25,6 +26,8 @@ from redash.models import db _wait_for_db_connection(db) + # To create triggers for searchable models, we need to call configure_mappers(). + sqlalchemy.orm.configure_mappers() db.create_all() # Need to mark current DB as up to date
{"golden_diff": "diff --git a/redash/cli/database.py b/redash/cli/database.py\n--- a/redash/cli/database.py\n+++ b/redash/cli/database.py\n@@ -1,5 +1,6 @@\n import time\n \n+import sqlalchemy\n from flask.cli import AppGroup\n from flask_migrate import stamp\n from sqlalchemy.exc import DatabaseError\n@@ -25,6 +26,8 @@\n from redash.models import db\n \n _wait_for_db_connection(db)\n+ # To create triggers for searchable models, we need to call configure_mappers().\n+ sqlalchemy.orm.configure_mappers()\n db.create_all()\n \n # Need to mark current DB as up to date\n", "issue": "Queries search is not working on release/7.0.x\n### Issue Summary\r\n\r\nI try building docker image with the redash dockerfile and start by docker-compose. But queries search is not working\r\n\r\n### Steps to Reproduce\r\n\r\n![Untitled](https://user-images.githubusercontent.com/6704470/54521288-93d2ad80-49a5-11e9-9794-94572b6698d0.gif)\r\n\r\nCheck out the queries table and find that search_vector is null.\r\n![1](https://user-images.githubusercontent.com/6704470/54521423-de542a00-49a5-11e9-9f37-5b5923042973.png)\r\n\r\n### Technical details:\r\n\r\n* Redash Version: release/7.0.x\r\n* Browser/OS: Chrome Mac\r\n* How did you install Redash: redash setup\r\n\n", "before_files": [{"content": "import time\n\nfrom flask.cli import AppGroup\nfrom flask_migrate import stamp\nfrom sqlalchemy.exc import DatabaseError\n\nmanager = AppGroup(help=\"Manage the database (create/drop tables).\")\n\n\ndef _wait_for_db_connection(db):\n retried = False\n while not retried:\n try:\n db.engine.execute('SELECT 1;')\n return\n except DatabaseError:\n time.sleep(30)\n\n retried = True\n\n\[email protected]()\ndef create_tables():\n \"\"\"Create the database tables.\"\"\"\n from redash.models import db\n\n _wait_for_db_connection(db)\n db.create_all()\n\n # Need to mark current DB as up to date\n stamp()\n\n\[email protected]()\ndef drop_tables():\n \"\"\"Drop the database tables.\"\"\"\n from redash.models import db\n\n _wait_for_db_connection(db)\n db.drop_all()\n", "path": "redash/cli/database.py"}]}
1,026
143
gh_patches_debug_27768
rasdani/github-patches
git_diff
svthalia__concrexit-2021
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Registration message in calendar wrong ### Describe the bug In the calendar, when you have to register for an event, it says in the block; You can register on ....... However, this seems to be the date of when the event will take place (or the registration deadline, can't tell for sure). See screenshots for an example. ### How to reproduce Steps to reproduce the behaviour: 1. Go to the calendar and look for an event with a registration. 2. Look at the message in the calendar view 3. Compare with the registration date in the event itself. ### Expected behaviour The message in the calendar view matches the date of when the registrations will open. ### Screenshots ![image](https://user-images.githubusercontent.com/12615607/139580635-4f56710d-23ae-4680-a45d-ace2376cfeb7.png) ![image](https://user-images.githubusercontent.com/12615607/139580729-88d92dcd-e60d-4d13-8505-107fc2b69b14.png) </issue> <code> [start of website/events/api/calendarjs/serializers.py] 1 from datetime import timedelta 2 3 from django.contrib.humanize.templatetags.humanize import naturaltime 4 from django.template.defaultfilters import date 5 from django.utils import timezone 6 from django.utils.translation import gettext_lazy as _ 7 from rest_framework.reverse import reverse 8 9 from events import services 10 from events.models import Event 11 from thaliawebsite.api.calendarjs.serializers import CalenderJSSerializer 12 13 14 class EventsCalenderJSSerializer(CalenderJSSerializer): 15 class Meta(CalenderJSSerializer.Meta): 16 model = Event 17 18 def _url(self, instance): 19 return reverse("events:event", kwargs={"pk": instance.id}) 20 21 def _class_names(self, instance): 22 if self.context["member"] and services.is_user_registered( 23 self.context["member"], instance 24 ): 25 if services.user_registration_pending(self.context["member"], instance): 26 return ["regular-event-pending-registration"] 27 else: 28 return ["regular-event-has-registration"] 29 elif (not instance.registration_required) or instance.registration_allowed: 30 return ["regular-event-registration-open"] 31 else: 32 # I think this handles the case that registration is needed, but not yet possible 33 return ["regular-event-registration-closed"] 34 35 def _registration_info(self, instance: Event): 36 # If registered in some way 37 if self.context["member"] and services.is_user_registered( 38 self.context["member"], instance 39 ): 40 queue_pos = services.user_registration_pending( 41 self.context["member"], instance 42 ) 43 # In waiting list 44 if type(queue_pos) is int: 45 return _("In waiting list at position {queue_pos}").format( 46 queue_pos=queue_pos 47 ) 48 # Actually registered 49 else: 50 return _("You are registered for this event") 51 # Optional registration possible 52 elif instance.optional_registration_allowed: 53 return _("You can optionally register for this event") 54 # No places left 55 elif instance.reached_participants_limit(): 56 return _("You can put yourself on the waiting list for this event") 57 # Registration still possible 58 elif instance.registration_allowed: 59 return _("You can register for this event") 60 # Not registration time yet 61 elif instance.registration_end: 62 now = timezone.now() 63 if instance.registration_end < now: 64 return _("Registrations have been closed") 65 elif instance.registration_end <= now + timedelta(days=2): 66 return _("You can register {at_time}").format( 67 at_time=naturaltime(instance.registration_end) 68 ) 69 else: 70 return _("You can register on {date}").format( 71 date=date(instance.registration_end) 72 ) 73 74 75 class UnpublishedEventsCalenderJSSerializer(CalenderJSSerializer): 76 """See CalenderJSSerializer, customised classes.""" 77 78 class Meta(CalenderJSSerializer.Meta): 79 model = Event 80 81 def _class_names(self, instance): 82 return ["unpublished-event"] 83 84 def _url(self, instance): 85 return reverse("admin:events_event_details", kwargs={"pk": instance.id}) 86 87 def _registration_info(self, instance): 88 return "Unpublished event" 89 [end of website/events/api/calendarjs/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/events/api/calendarjs/serializers.py b/website/events/api/calendarjs/serializers.py --- a/website/events/api/calendarjs/serializers.py +++ b/website/events/api/calendarjs/serializers.py @@ -50,7 +50,7 @@ return _("You are registered for this event") # Optional registration possible elif instance.optional_registration_allowed: - return _("You can optionally register for this event") + return _("Registering for this event is optional") # No places left elif instance.reached_participants_limit(): return _("You can put yourself on the waiting list for this event") @@ -62,13 +62,13 @@ now = timezone.now() if instance.registration_end < now: return _("Registrations have been closed") - elif instance.registration_end <= now + timedelta(days=2): - return _("You can register {at_time}").format( - at_time=naturaltime(instance.registration_end) + elif instance.registration_start <= now + timedelta(days=2): + return _("Registrations open {at_time}").format( + at_time=naturaltime(instance.registration_start) ) else: - return _("You can register on {date}").format( - date=date(instance.registration_end) + return _("Registrations open {date}").format( + date=date(instance.registration_start) )
{"golden_diff": "diff --git a/website/events/api/calendarjs/serializers.py b/website/events/api/calendarjs/serializers.py\n--- a/website/events/api/calendarjs/serializers.py\n+++ b/website/events/api/calendarjs/serializers.py\n@@ -50,7 +50,7 @@\n return _(\"You are registered for this event\")\n # Optional registration possible\n elif instance.optional_registration_allowed:\n- return _(\"You can optionally register for this event\")\n+ return _(\"Registering for this event is optional\")\n # No places left\n elif instance.reached_participants_limit():\n return _(\"You can put yourself on the waiting list for this event\")\n@@ -62,13 +62,13 @@\n now = timezone.now()\n if instance.registration_end < now:\n return _(\"Registrations have been closed\")\n- elif instance.registration_end <= now + timedelta(days=2):\n- return _(\"You can register {at_time}\").format(\n- at_time=naturaltime(instance.registration_end)\n+ elif instance.registration_start <= now + timedelta(days=2):\n+ return _(\"Registrations open {at_time}\").format(\n+ at_time=naturaltime(instance.registration_start)\n )\n else:\n- return _(\"You can register on {date}\").format(\n- date=date(instance.registration_end)\n+ return _(\"Registrations open {date}\").format(\n+ date=date(instance.registration_start)\n )\n", "issue": "Registration message in calendar wrong\n### Describe the bug\r\nIn the calendar, when you have to register for an event, it says in the block; You can register on .......\r\nHowever, this seems to be the date of when the event will take place (or the registration deadline, can't tell for sure).\r\nSee screenshots for an example.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to the calendar and look for an event with a registration.\r\n2. Look at the message in the calendar view\r\n3. Compare with the registration date in the event itself.\r\n\r\n### Expected behaviour\r\nThe message in the calendar view matches the date of when the registrations will open.\r\n\r\n### Screenshots\r\n![image](https://user-images.githubusercontent.com/12615607/139580635-4f56710d-23ae-4680-a45d-ace2376cfeb7.png)\r\n![image](https://user-images.githubusercontent.com/12615607/139580729-88d92dcd-e60d-4d13-8505-107fc2b69b14.png)\r\n\n", "before_files": [{"content": "from datetime import timedelta\n\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.template.defaultfilters import date\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.reverse import reverse\n\nfrom events import services\nfrom events.models import Event\nfrom thaliawebsite.api.calendarjs.serializers import CalenderJSSerializer\n\n\nclass EventsCalenderJSSerializer(CalenderJSSerializer):\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _url(self, instance):\n return reverse(\"events:event\", kwargs={\"pk\": instance.id})\n\n def _class_names(self, instance):\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n if services.user_registration_pending(self.context[\"member\"], instance):\n return [\"regular-event-pending-registration\"]\n else:\n return [\"regular-event-has-registration\"]\n elif (not instance.registration_required) or instance.registration_allowed:\n return [\"regular-event-registration-open\"]\n else:\n # I think this handles the case that registration is needed, but not yet possible\n return [\"regular-event-registration-closed\"]\n\n def _registration_info(self, instance: Event):\n # If registered in some way\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n queue_pos = services.user_registration_pending(\n self.context[\"member\"], instance\n )\n # In waiting list\n if type(queue_pos) is int:\n return _(\"In waiting list at position {queue_pos}\").format(\n queue_pos=queue_pos\n )\n # Actually registered\n else:\n return _(\"You are registered for this event\")\n # Optional registration possible\n elif instance.optional_registration_allowed:\n return _(\"You can optionally register for this event\")\n # No places left\n elif instance.reached_participants_limit():\n return _(\"You can put yourself on the waiting list for this event\")\n # Registration still possible\n elif instance.registration_allowed:\n return _(\"You can register for this event\")\n # Not registration time yet\n elif instance.registration_end:\n now = timezone.now()\n if instance.registration_end < now:\n return _(\"Registrations have been closed\")\n elif instance.registration_end <= now + timedelta(days=2):\n return _(\"You can register {at_time}\").format(\n at_time=naturaltime(instance.registration_end)\n )\n else:\n return _(\"You can register on {date}\").format(\n date=date(instance.registration_end)\n )\n\n\nclass UnpublishedEventsCalenderJSSerializer(CalenderJSSerializer):\n \"\"\"See CalenderJSSerializer, customised classes.\"\"\"\n\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _class_names(self, instance):\n return [\"unpublished-event\"]\n\n def _url(self, instance):\n return reverse(\"admin:events_event_details\", kwargs={\"pk\": instance.id})\n\n def _registration_info(self, instance):\n return \"Unpublished event\"\n", "path": "website/events/api/calendarjs/serializers.py"}]}
1,639
307
gh_patches_debug_43117
rasdani/github-patches
git_diff
spyder-ide__spyder-7902
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 4.0beta1 is offered as an update when using a development version and a virtualenv We need to avoid showing beta versions when checking for updates </issue> <code> [start of spyder/workers/updates.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright © Spyder Project Contributors 4 # Licensed under the terms of the MIT License 5 # (see spyder/__init__.py for details) 6 7 # Standard library imports 8 import json 9 import os 10 import ssl 11 import sys 12 13 # Third party imports 14 from qtpy.QtCore import QObject, Signal 15 16 # Local imports 17 from spyder import __version__ 18 from spyder.config.base import _ 19 from spyder.py3compat import PY3 20 from spyder.config.utils import is_anaconda 21 from spyder.utils.programs import check_version, is_stable_version 22 23 24 if PY3: 25 from urllib.request import urlopen 26 from urllib.error import URLError, HTTPError 27 else: 28 from urllib2 import urlopen, URLError, HTTPError 29 30 31 class WorkerUpdates(QObject): 32 """ 33 Worker that checks for releases using either the Anaconda 34 default channels or the Github Releases page without 35 blocking the Spyder user interface, in case of connection 36 issues. 37 """ 38 sig_ready = Signal() 39 40 def __init__(self, parent, startup, version=""): 41 QObject.__init__(self) 42 self._parent = parent 43 self.error = None 44 self.latest_release = None 45 self.startup = startup 46 self.version = version 47 48 def check_update_available(self, version, releases, github=False): 49 """Checks if there is an update available. 50 51 It takes as parameters the current version of Spyder and a list of 52 valid cleaned releases in chronological order. 53 Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...] 54 """ 55 if is_stable_version(version): 56 # Remove non stable versions from the list 57 releases = [r for r in releases if is_stable_version(r)] 58 59 if github: 60 latest_release = releases[0] 61 else: 62 latest_release = releases[-1] 63 64 if version.endswith('dev'): 65 return (False, latest_release) 66 67 return (check_version(version, latest_release, '<'), latest_release) 68 69 def start(self): 70 """Main method of the WorkerUpdates worker""" 71 if is_anaconda(): 72 self.url = 'https://repo.anaconda.com/pkgs/main' 73 if os.name == 'nt': 74 self.url += '/win-64/repodata.json' 75 elif sys.platform == 'darwin': 76 self.url += '/osx-64/repodata.json' 77 else: 78 self.url += '/linux-64/repodata.json' 79 else: 80 self.url = ('https://api.github.com/repos/' 81 'spyder-ide/spyder/releases') 82 self.update_available = False 83 self.latest_release = __version__ 84 85 error_msg = None 86 87 try: 88 if hasattr(ssl, '_create_unverified_context'): 89 # Fix for issue # 2685 [Works only with Python >=2.7.9] 90 # More info: https://www.python.org/dev/peps/pep-0476/#opting-out 91 context = ssl._create_unverified_context() 92 page = urlopen(self.url, context=context) 93 else: 94 page = urlopen(self.url) 95 try: 96 data = page.read() 97 98 # Needed step for python3 compatibility 99 if not isinstance(data, str): 100 data = data.decode() 101 102 data = json.loads(data) 103 if not self.version: 104 self.version = __version__ 105 106 if is_anaconda(): 107 releases = [] 108 for item in data['packages']: 109 if 'spyder' in item and 'spyder-kernels' not in item: 110 releases.append(item.split('-')[1]) 111 result = self.check_update_available(self.version, 112 releases) 113 else: 114 releases = [item['tag_name'].replace('v', '') 115 for item in data] 116 result = self.check_update_available(self.version, 117 releases, 118 github=True) 119 self.update_available, self.latest_release = result 120 except Exception: 121 error_msg = _('Unable to retrieve information.') 122 except HTTPError: 123 error_msg = _('Unable to retrieve information.') 124 except URLError: 125 error_msg = _('Unable to connect to the internet. <br><br>Make ' 126 'sure the connection is working properly.') 127 except Exception: 128 error_msg = _('Unable to check for updates.') 129 130 # Don't show dialog when starting up spyder and an error occur 131 if not (self.startup and error_msg is not None): 132 self.error = error_msg 133 self.sig_ready.emit() 134 [end of spyder/workers/updates.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/spyder/workers/updates.py b/spyder/workers/updates.py --- a/spyder/workers/updates.py +++ b/spyder/workers/updates.py @@ -7,6 +7,7 @@ # Standard library imports import json import os +import re import ssl import sys @@ -37,34 +38,41 @@ """ sig_ready = Signal() - def __init__(self, parent, startup, version=""): + def __init__(self, parent, startup, version="", releases=None): QObject.__init__(self) self._parent = parent self.error = None self.latest_release = None self.startup = startup - self.version = version + self.releases = releases - def check_update_available(self, version, releases, github=False): + if not version: + self.version = __version__ + else: + self.version = version + + def check_update_available(self): """Checks if there is an update available. It takes as parameters the current version of Spyder and a list of valid cleaned releases in chronological order. Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...] """ - if is_stable_version(version): - # Remove non stable versions from the list - releases = [r for r in releases if is_stable_version(r)] + # Don't perform any check for development versions + if 'dev' in self.version: + return (False, latest_release) - if github: - latest_release = releases[0] + # Filter releases + if is_stable_version(self.version): + releases = [r for r in self.releases if is_stable_version(r)] else: - latest_release = releases[-1] + releases = [r for r in self.releases + if not is_stable_version(r) or r in self.version] - if version.endswith('dev'): - return (False, latest_release) + latest_release = releases[-1] - return (check_version(version, latest_release, '<'), latest_release) + return (check_version(self.version, latest_release, '<'), + latest_release) def start(self): """Main method of the WorkerUpdates worker""" @@ -98,24 +106,23 @@ # Needed step for python3 compatibility if not isinstance(data, str): data = data.decode() - data = json.loads(data) - if not self.version: - self.version = __version__ if is_anaconda(): - releases = [] - for item in data['packages']: - if 'spyder' in item and 'spyder-kernels' not in item: - releases.append(item.split('-')[1]) - result = self.check_update_available(self.version, - releases) + if self.releases is None: + self.releases = [] + for item in data['packages']: + if ('spyder' in item and + not re.search(r'spyder-[a-zA-Z]', item)): + self.releases.append(item.split('-')[1]) + result = self.check_update_available() else: - releases = [item['tag_name'].replace('v', '') - for item in data] - result = self.check_update_available(self.version, - releases, - github=True) + if self.releases is None: + self.releases = [item['tag_name'].replace('v', '') + for item in data] + self.releases = list(reversed(self.releases)) + + result = self.check_update_available() self.update_available, self.latest_release = result except Exception: error_msg = _('Unable to retrieve information.')
{"golden_diff": "diff --git a/spyder/workers/updates.py b/spyder/workers/updates.py\n--- a/spyder/workers/updates.py\n+++ b/spyder/workers/updates.py\n@@ -7,6 +7,7 @@\n # Standard library imports\n import json\n import os\n+import re\n import ssl\n import sys\n \n@@ -37,34 +38,41 @@\n \"\"\"\n sig_ready = Signal()\n \n- def __init__(self, parent, startup, version=\"\"):\n+ def __init__(self, parent, startup, version=\"\", releases=None):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n- self.version = version\n+ self.releases = releases\n \n- def check_update_available(self, version, releases, github=False):\n+ if not version:\n+ self.version = __version__\n+ else:\n+ self.version = version\n+\n+ def check_update_available(self):\n \"\"\"Checks if there is an update available.\n \n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order.\n Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n- if is_stable_version(version):\n- # Remove non stable versions from the list\n- releases = [r for r in releases if is_stable_version(r)]\n+ # Don't perform any check for development versions\n+ if 'dev' in self.version:\n+ return (False, latest_release)\n \n- if github:\n- latest_release = releases[0]\n+ # Filter releases\n+ if is_stable_version(self.version):\n+ releases = [r for r in self.releases if is_stable_version(r)]\n else:\n- latest_release = releases[-1]\n+ releases = [r for r in self.releases\n+ if not is_stable_version(r) or r in self.version]\n \n- if version.endswith('dev'):\n- return (False, latest_release)\n+ latest_release = releases[-1]\n \n- return (check_version(version, latest_release, '<'), latest_release)\n+ return (check_version(self.version, latest_release, '<'),\n+ latest_release)\n \n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n@@ -98,24 +106,23 @@\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n-\n data = json.loads(data)\n- if not self.version:\n- self.version = __version__\n \n if is_anaconda():\n- releases = []\n- for item in data['packages']:\n- if 'spyder' in item and 'spyder-kernels' not in item:\n- releases.append(item.split('-')[1])\n- result = self.check_update_available(self.version,\n- releases)\n+ if self.releases is None:\n+ self.releases = []\n+ for item in data['packages']:\n+ if ('spyder' in item and\n+ not re.search(r'spyder-[a-zA-Z]', item)):\n+ self.releases.append(item.split('-')[1])\n+ result = self.check_update_available()\n else:\n- releases = [item['tag_name'].replace('v', '')\n- for item in data]\n- result = self.check_update_available(self.version,\n- releases,\n- github=True)\n+ if self.releases is None:\n+ self.releases = [item['tag_name'].replace('v', '')\n+ for item in data]\n+ self.releases = list(reversed(self.releases))\n+\n+ result = self.check_update_available()\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n", "issue": "4.0beta1 is offered as an update when using a development version and a virtualenv\nWe need to avoid showing beta versions when checking for updates\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n# Standard library imports\nimport json\nimport os\nimport ssl\nimport sys\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\n\n# Local imports\nfrom spyder import __version__\nfrom spyder.config.base import _\nfrom spyder.py3compat import PY3\nfrom spyder.config.utils import is_anaconda\nfrom spyder.utils.programs import check_version, is_stable_version\n\n\nif PY3:\n from urllib.request import urlopen\n from urllib.error import URLError, HTTPError\nelse:\n from urllib2 import urlopen, URLError, HTTPError\n\n\nclass WorkerUpdates(QObject):\n \"\"\"\n Worker that checks for releases using either the Anaconda\n default channels or the Github Releases page without\n blocking the Spyder user interface, in case of connection\n issues.\n \"\"\"\n sig_ready = Signal()\n\n def __init__(self, parent, startup, version=\"\"):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n self.version = version\n\n def check_update_available(self, version, releases, github=False):\n \"\"\"Checks if there is an update available.\n\n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order.\n Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n if is_stable_version(version):\n # Remove non stable versions from the list\n releases = [r for r in releases if is_stable_version(r)]\n\n if github:\n latest_release = releases[0]\n else:\n latest_release = releases[-1]\n\n if version.endswith('dev'):\n return (False, latest_release)\n\n return (check_version(version, latest_release, '<'), latest_release)\n\n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n if is_anaconda():\n self.url = 'https://repo.anaconda.com/pkgs/main'\n if os.name == 'nt':\n self.url += '/win-64/repodata.json'\n elif sys.platform == 'darwin':\n self.url += '/osx-64/repodata.json'\n else:\n self.url += '/linux-64/repodata.json'\n else:\n self.url = ('https://api.github.com/repos/'\n 'spyder-ide/spyder/releases')\n self.update_available = False\n self.latest_release = __version__\n\n error_msg = None\n\n try:\n if hasattr(ssl, '_create_unverified_context'):\n # Fix for issue # 2685 [Works only with Python >=2.7.9]\n # More info: https://www.python.org/dev/peps/pep-0476/#opting-out\n context = ssl._create_unverified_context()\n page = urlopen(self.url, context=context)\n else:\n page = urlopen(self.url)\n try:\n data = page.read()\n\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n\n data = json.loads(data)\n if not self.version:\n self.version = __version__\n\n if is_anaconda():\n releases = []\n for item in data['packages']:\n if 'spyder' in item and 'spyder-kernels' not in item:\n releases.append(item.split('-')[1])\n result = self.check_update_available(self.version,\n releases)\n else:\n releases = [item['tag_name'].replace('v', '')\n for item in data]\n result = self.check_update_available(self.version,\n releases,\n github=True)\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n except HTTPError:\n error_msg = _('Unable to retrieve information.')\n except URLError:\n error_msg = _('Unable to connect to the internet. <br><br>Make '\n 'sure the connection is working properly.')\n except Exception:\n error_msg = _('Unable to check for updates.')\n\n # Don't show dialog when starting up spyder and an error occur\n if not (self.startup and error_msg is not None):\n self.error = error_msg\n self.sig_ready.emit()\n", "path": "spyder/workers/updates.py"}]}
1,861
869
gh_patches_debug_28586
rasdani/github-patches
git_diff
uccser__cs-unplugged-764
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Provide clearer entry points for new visitors on website Landing page interface redesign Current: https://cs-unplugged-dev.appspot.com/en/ Proposed new design: ![a thing](https://user-images.githubusercontent.com/20089275/33110427-b2db51ec-cfac-11e7-845b-de9ad6a4c304.png) **Redesign based on older current landing page. The main changes are the jumbotron banner, nav bar and footer ( issue #695 ) </issue> <code> [start of csunplugged/general/urls.py] 1 """URL routing for the general application.""" 2 3 from django.conf.urls import url 4 5 from . import views 6 7 urlpatterns = [ 8 url( 9 r"^$", 10 views.GeneralIndexView.as_view(), 11 name="home" 12 ), 13 url( 14 r"^about/$", 15 views.GeneralAboutView.as_view(), 16 name="about" 17 ), 18 url( 19 r"^computational-thinking/$", 20 views.ComputationalThinkingView.as_view(), 21 name="computational_thinking" 22 ), 23 url( 24 r"^contact/$", 25 views.GeneralContactView.as_view(), 26 name="contact" 27 ), 28 url( 29 r"^people/$", 30 views.GeneralPeopleView.as_view(), 31 name="people" 32 ), 33 url( 34 r"^principles/$", 35 views.GeneralPrinciplesView.as_view(), 36 name="principles" 37 ), 38 ] 39 [end of csunplugged/general/urls.py] [start of csunplugged/general/views.py] 1 """Views for the general application.""" 2 3 from django.views.generic import TemplateView 4 from django.http import HttpResponse 5 6 7 class GeneralIndexView(TemplateView): 8 """View for the homepage that renders from a template.""" 9 10 template_name = "general/index.html" 11 12 13 class GeneralAboutView(TemplateView): 14 """View for the about page that renders from a template.""" 15 16 template_name = "general/about.html" 17 18 19 class GeneralContactView(TemplateView): 20 """View for the contact page that renders from a template.""" 21 22 template_name = "general/contact.html" 23 24 25 class GeneralPeopleView(TemplateView): 26 """View for the people page that renders from a template.""" 27 28 template_name = "general/people.html" 29 30 31 class GeneralPrinciplesView(TemplateView): 32 """View for the princples page that renders from a template.""" 33 34 template_name = "general/principles.html" 35 36 37 class ComputationalThinkingView(TemplateView): 38 """View for the Computational Thinking page that renders from a template.""" 39 40 template_name = "general/computational-thinking.html" 41 42 43 def health_check(request): 44 """Return heath check response for Google App Engine. 45 46 Returns a 200 HTTP response for Google App Engine to detect the system 47 is running. 48 """ 49 return HttpResponse(status=200) 50 [end of csunplugged/general/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py --- a/csunplugged/general/urls.py +++ b/csunplugged/general/urls.py @@ -15,11 +15,21 @@ views.GeneralAboutView.as_view(), name="about" ), + url( + r"^what-is-computer-science/$", + views.WhatIsCSView.as_view(), + name="what_is_cs" + ), url( r"^computational-thinking/$", views.ComputationalThinkingView.as_view(), name="computational_thinking" ), + url( + r"^how-do-i-teach-cs-unplugged/$", + views.HowDoITeachCSUnpluggedView.as_view(), + name="how_do_i_teach_cs_unplugged" + ), url( r"^contact/$", views.GeneralContactView.as_view(), diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py --- a/csunplugged/general/views.py +++ b/csunplugged/general/views.py @@ -34,12 +34,24 @@ template_name = "general/principles.html" +class WhatIsCSView(TemplateView): + """View for the 'What is Computer Science?' page that renders from a template.""" + + template_name = "general/what-is-computer-science.html" + + class ComputationalThinkingView(TemplateView): """View for the Computational Thinking page that renders from a template.""" template_name = "general/computational-thinking.html" +class HowDoITeachCSUnpluggedView(TemplateView): + """View for the 'How do I teach CS Unplugged?' page that renders from a template.""" + + template_name = "general/how-do-i-teach-cs-unplugged.html" + + def health_check(request): """Return heath check response for Google App Engine.
{"golden_diff": "diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py\n--- a/csunplugged/general/urls.py\n+++ b/csunplugged/general/urls.py\n@@ -15,11 +15,21 @@\n views.GeneralAboutView.as_view(),\n name=\"about\"\n ),\n+ url(\n+ r\"^what-is-computer-science/$\",\n+ views.WhatIsCSView.as_view(),\n+ name=\"what_is_cs\"\n+ ),\n url(\n r\"^computational-thinking/$\",\n views.ComputationalThinkingView.as_view(),\n name=\"computational_thinking\"\n ),\n+ url(\n+ r\"^how-do-i-teach-cs-unplugged/$\",\n+ views.HowDoITeachCSUnpluggedView.as_view(),\n+ name=\"how_do_i_teach_cs_unplugged\"\n+ ),\n url(\n r\"^contact/$\",\n views.GeneralContactView.as_view(),\ndiff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -34,12 +34,24 @@\n template_name = \"general/principles.html\"\n \n \n+class WhatIsCSView(TemplateView):\n+ \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n+\n+ template_name = \"general/what-is-computer-science.html\"\n+\n+\n class ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n \n template_name = \"general/computational-thinking.html\"\n \n \n+class HowDoITeachCSUnpluggedView(TemplateView):\n+ \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n+\n+ template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n+\n+\n def health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n", "issue": "Provide clearer entry points for new visitors on website\n\nLanding page interface redesign\nCurrent:\r\nhttps://cs-unplugged-dev.appspot.com/en/\r\n\r\nProposed new design:\r\n![a thing](https://user-images.githubusercontent.com/20089275/33110427-b2db51ec-cfac-11e7-845b-de9ad6a4c304.png)\r\n\r\n**Redesign based on older current landing page.\r\n\r\nThe main changes are the jumbotron banner, nav bar and footer ( issue #695 )\n", "before_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r\"^$\",\n views.GeneralIndexView.as_view(),\n name=\"home\"\n ),\n url(\n r\"^about/$\",\n views.GeneralAboutView.as_view(),\n name=\"about\"\n ),\n url(\n r\"^computational-thinking/$\",\n views.ComputationalThinkingView.as_view(),\n name=\"computational_thinking\"\n ),\n url(\n r\"^contact/$\",\n views.GeneralContactView.as_view(),\n name=\"contact\"\n ),\n url(\n r\"^people/$\",\n views.GeneralPeopleView.as_view(),\n name=\"people\"\n ),\n url(\n r\"^principles/$\",\n views.GeneralPrinciplesView.as_view(),\n name=\"principles\"\n ),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]}
1,310
438
gh_patches_debug_16392
rasdani/github-patches
git_diff
cal-itp__benefits-864
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve checks in `oauth` `authorize` view We ran into a case while deploying #862 where our `auth_provider` model is configured with a `scope`, but the response from the Auth server doesn't actually contain the scope (probably still being set up). The `oauth` `authorize` function doesn't handle this case and raises an `AttributeError` ``` File "/home/calitp/app/benefits/oauth/views.py", line 70, in authorize claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true" AttributeError: 'NoneType' object has no attribute 'lower' ``` Let's add a check and tests for this so that the logs for this are more helpful. </issue> <code> [start of benefits/oauth/views.py] 1 import logging 2 3 from django.shortcuts import redirect 4 from django.urls import reverse 5 from django.utils.decorators import decorator_from_middleware 6 7 from benefits.core import session 8 from benefits.core.middleware import VerifierSessionRequired 9 from . import analytics, redirects 10 from .client import oauth 11 12 13 logger = logging.getLogger(__name__) 14 15 16 ROUTE_AUTH = "oauth:authorize" 17 ROUTE_START = "eligibility:start" 18 ROUTE_CONFIRM = "eligibility:confirm" 19 ROUTE_UNVERIFIED = "eligibility:unverified" 20 ROUTE_POST_LOGOUT = "oauth:post_logout" 21 22 23 @decorator_from_middleware(VerifierSessionRequired) 24 def login(request): 25 """View implementing OIDC authorize_redirect.""" 26 verifier = session.verifier(request) 27 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 28 29 if not oauth_client: 30 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 31 32 route = reverse(ROUTE_AUTH) 33 redirect_uri = redirects.generate_redirect_uri(request, route) 34 35 logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}") 36 37 analytics.started_sign_in(request) 38 39 return oauth_client.authorize_redirect(request, redirect_uri) 40 41 42 @decorator_from_middleware(VerifierSessionRequired) 43 def authorize(request): 44 """View implementing OIDC token authorization.""" 45 verifier = session.verifier(request) 46 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 47 48 if not oauth_client: 49 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 50 51 logger.debug("Attempting to authorize OAuth access token") 52 token = oauth_client.authorize_access_token(request) 53 54 if token is None: 55 logger.warning("Could not authorize OAuth access token") 56 return redirect(ROUTE_START) 57 58 logger.debug("OAuth access token authorized") 59 60 # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out. 61 id_token = token["id_token"] 62 63 # We store the returned claim in case it can be used later in eligibility verification. 64 verifier_claim = verifier.auth_provider.claim 65 stored_claim = None 66 67 if verifier_claim: 68 userinfo = token.get("userinfo") 69 # the claim comes back in userinfo like { "claim": "True" | "False" } 70 claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true" 71 # if userinfo contains our claim and the flag is true, store the *claim* 72 stored_claim = verifier_claim if claim_flag else None 73 74 session.update(request, oauth_token=id_token, oauth_claim=stored_claim) 75 76 analytics.finished_sign_in(request) 77 78 return redirect(ROUTE_CONFIRM) 79 80 81 def cancel(request): 82 """View implementing cancellation of OIDC authorization.""" 83 84 analytics.canceled_sign_in(request) 85 86 return redirect(ROUTE_UNVERIFIED) 87 88 89 @decorator_from_middleware(VerifierSessionRequired) 90 def logout(request): 91 """View implementing OIDC and application sign out.""" 92 verifier = session.verifier(request) 93 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 94 95 if not oauth_client: 96 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 97 98 analytics.started_sign_out(request) 99 100 # overwrite the oauth session token, the user is signed out of the app 101 token = session.oauth_token(request) 102 session.logout(request) 103 104 route = reverse(ROUTE_POST_LOGOUT) 105 redirect_uri = redirects.generate_redirect_uri(request, route) 106 107 logger.debug(f"OAuth end_session_endpoint with redirect_uri: {redirect_uri}") 108 109 # send the user through the end_session_endpoint, redirecting back to 110 # the post_logout route 111 return redirects.deauthorize_redirect(oauth_client, token, redirect_uri) 112 113 114 def post_logout(request): 115 """View routes the user to their origin after sign out.""" 116 117 analytics.finished_sign_out(request) 118 119 origin = session.origin(request) 120 return redirect(origin) 121 [end of benefits/oauth/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py --- a/benefits/oauth/views.py +++ b/benefits/oauth/views.py @@ -66,10 +66,15 @@ if verifier_claim: userinfo = token.get("userinfo") - # the claim comes back in userinfo like { "claim": "True" | "False" } - claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true" - # if userinfo contains our claim and the flag is true, store the *claim* - stored_claim = verifier_claim if claim_flag else None + + if userinfo: + claim_value = userinfo.get(verifier_claim) + # the claim comes back in userinfo like { "claim": "True" | "False" } + if claim_value is None: + logger.warning(f"userinfo did not contain: {verifier_claim}") + elif claim_value.lower() == "true": + # if userinfo contains our claim and the flag is true, store the *claim* + stored_claim = verifier_claim session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
{"golden_diff": "diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -66,10 +66,15 @@\n \n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n- # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n- claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n- # if userinfo contains our claim and the flag is true, store the *claim*\n- stored_claim = verifier_claim if claim_flag else None\n+\n+ if userinfo:\n+ claim_value = userinfo.get(verifier_claim)\n+ # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n+ if claim_value is None:\n+ logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n+ elif claim_value.lower() == \"true\":\n+ # if userinfo contains our claim and the flag is true, store the *claim*\n+ stored_claim = verifier_claim\n \n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n", "issue": "Improve checks in `oauth` `authorize` view\nWe ran into a case while deploying #862 where our `auth_provider` model is configured with a `scope`, but the response from the Auth server doesn't actually contain the scope (probably still being set up).\r\n\r\nThe `oauth` `authorize` function doesn't handle this case and raises an `AttributeError`\r\n```\r\n File \"/home/calitp/app/benefits/oauth/views.py\", line 70, in authorize\r\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\r\nAttributeError: 'NoneType' object has no attribute 'lower'\r\n```\r\n\r\nLet's add a check and tests for this so that the logs for this are more helpful.\n", "before_files": [{"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom benefits.core.middleware import VerifierSessionRequired\nfrom . import analytics, redirects\nfrom .client import oauth\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim if claim_flag else None\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}]}
1,797
260
gh_patches_debug_32597
rasdani/github-patches
git_diff
RedHatInsights__insights-core-2401
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add version command/flag to the Insights CLI Add `version` command and `--version` flag/arg to print version info from `insights.get_nvr()`. </issue> <code> [start of insights/command_parser.py] 1 #!/usr/bin/env python 2 """ 3 Command Parser module 4 --------------------- 5 Implements the ``insights`` command line. Each function is the first 6 argument followed by the function specific arguments. See USAGE text 7 below. 8 """ 9 from __future__ import print_function 10 11 import argparse 12 import sys 13 14 USAGE = """insights <command> [<args>] 15 Available commands: 16 cat Execute a spec and show the output 17 collect Collect all specs against the client and create an Insights archive. 18 inspect Execute component and shell out to ipython for evaluation. 19 info View info and docs for Insights Core components. 20 ocpshell Interactive evaluation of archives, directories, or individual yaml files. 21 run Run insights-core against host or an archive. 22 """ 23 24 25 class InsightsCli(object): 26 """ 27 Class to implement the cli module. 28 Each command is called as a method of this class and all 29 arg parsing is performed in the separate module that 30 actually implements the command. the args "insights command" 31 are not passed to the submodule. 32 """ 33 34 def __init__(self): 35 parser = argparse.ArgumentParser( 36 description="Insights Core command line execution", 37 usage=USAGE) 38 parser.add_argument('command', help='Insights Core command to run') 39 args = parser.parse_args(sys.argv[1:2]) 40 if not hasattr(self, args.command): 41 print('Unrecognized command') 42 parser.print_help() 43 sys.exit(1) 44 # remove the sub-command arg from sys.argv 45 sys.argv.pop(1) 46 # Use dispatch pattern to execute command 47 getattr(self, args.command)() 48 49 def cat(self): 50 from .tools.cat import main as cat_main 51 cat_main() 52 53 def collect(self): 54 from .collect import main as collect_main 55 collect_main() 56 57 def info(self): 58 from .tools.query import main as query_main 59 query_main() 60 61 def inspect(self): 62 from .tools.insights_inspect import main as inspect_main 63 inspect_main() 64 65 def ocpshell(self): 66 from .ocpshell import main as ocpshell_main 67 ocpshell_main() 68 69 def run(self): 70 from insights import run 71 if "" not in sys.path: 72 sys.path.insert(0, "") 73 run(print_summary=True) 74 75 76 def fix_arg_dashes(): 77 78 en_dash = '\u2013' 79 em_dash = '\u2014' 80 81 # replace unicode (en dash and em dash) dashes from argument definitions that may have been copy 82 # and pasted from another source 83 i = 1 84 for a in sys.argv[1:]: 85 first = list(a) 86 first[0] = first[0].replace(em_dash, "--").replace(en_dash, "-") 87 sys.argv[i] = "".join(first) 88 i += 1 89 90 91 def main(): 92 fix_arg_dashes() 93 try: 94 InsightsCli() 95 except SystemExit: 96 raise 97 except BaseException as ex: 98 print(ex) 99 100 101 if __name__ == "__main__": 102 main() 103 [end of insights/command_parser.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/insights/command_parser.py b/insights/command_parser.py --- a/insights/command_parser.py +++ b/insights/command_parser.py @@ -19,6 +19,7 @@ info View info and docs for Insights Core components. ocpshell Interactive evaluation of archives, directories, or individual yaml files. run Run insights-core against host or an archive. + version Show Insights Core version information and exit. """ @@ -36,6 +37,9 @@ description="Insights Core command line execution", usage=USAGE) parser.add_argument('command', help='Insights Core command to run') + parser.add_argument('--version', action='store_true', help='show Insights Core version information and exit') + if self._parse_version_arg(): + self.version() args = parser.parse_args(sys.argv[1:2]) if not hasattr(self, args.command): print('Unrecognized command') @@ -46,6 +50,12 @@ # Use dispatch pattern to execute command getattr(self, args.command)() + def _parse_version_arg(self): + """ + Manually check for version argument/flag in cases when command is not provided. + """ + return '--version' in sys.argv[1:3] + def cat(self): from .tools.cat import main as cat_main cat_main() @@ -72,6 +82,14 @@ sys.path.insert(0, "") run(print_summary=True) + def version(self): + """ + Print version information (NVR) and exit. + """ + from insights import get_nvr + print(get_nvr()) + sys.exit() + def fix_arg_dashes():
{"golden_diff": "diff --git a/insights/command_parser.py b/insights/command_parser.py\n--- a/insights/command_parser.py\n+++ b/insights/command_parser.py\n@@ -19,6 +19,7 @@\n info View info and docs for Insights Core components.\n ocpshell Interactive evaluation of archives, directories, or individual yaml files.\n run Run insights-core against host or an archive.\n+ version Show Insights Core version information and exit.\n \"\"\"\n \n \n@@ -36,6 +37,9 @@\n description=\"Insights Core command line execution\",\n usage=USAGE)\n parser.add_argument('command', help='Insights Core command to run')\n+ parser.add_argument('--version', action='store_true', help='show Insights Core version information and exit')\n+ if self._parse_version_arg():\n+ self.version()\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n@@ -46,6 +50,12 @@\n # Use dispatch pattern to execute command\n getattr(self, args.command)()\n \n+ def _parse_version_arg(self):\n+ \"\"\"\n+ Manually check for version argument/flag in cases when command is not provided.\n+ \"\"\"\n+ return '--version' in sys.argv[1:3]\n+\n def cat(self):\n from .tools.cat import main as cat_main\n cat_main()\n@@ -72,6 +82,14 @@\n sys.path.insert(0, \"\")\n run(print_summary=True)\n \n+ def version(self):\n+ \"\"\"\n+ Print version information (NVR) and exit.\n+ \"\"\"\n+ from insights import get_nvr\n+ print(get_nvr())\n+ sys.exit()\n+\n \n def fix_arg_dashes():\n", "issue": "Add version command/flag to the Insights CLI \nAdd `version` command and `--version` flag/arg to print version info from `insights.get_nvr()`. \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommand Parser module\n---------------------\nImplements the ``insights`` command line. Each function is the first\nargument followed by the function specific arguments. See USAGE text\nbelow.\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nUSAGE = \"\"\"insights <command> [<args>]\nAvailable commands:\n cat Execute a spec and show the output\n collect Collect all specs against the client and create an Insights archive.\n inspect Execute component and shell out to ipython for evaluation.\n info View info and docs for Insights Core components.\n ocpshell Interactive evaluation of archives, directories, or individual yaml files.\n run Run insights-core against host or an archive.\n\"\"\"\n\n\nclass InsightsCli(object):\n \"\"\"\n Class to implement the cli module.\n Each command is called as a method of this class and all\n arg parsing is performed in the separate module that\n actually implements the command. the args \"insights command\"\n are not passed to the submodule.\n \"\"\"\n\n def __init__(self):\n parser = argparse.ArgumentParser(\n description=\"Insights Core command line execution\",\n usage=USAGE)\n parser.add_argument('command', help='Insights Core command to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n sys.exit(1)\n # remove the sub-command arg from sys.argv\n sys.argv.pop(1)\n # Use dispatch pattern to execute command\n getattr(self, args.command)()\n\n def cat(self):\n from .tools.cat import main as cat_main\n cat_main()\n\n def collect(self):\n from .collect import main as collect_main\n collect_main()\n\n def info(self):\n from .tools.query import main as query_main\n query_main()\n\n def inspect(self):\n from .tools.insights_inspect import main as inspect_main\n inspect_main()\n\n def ocpshell(self):\n from .ocpshell import main as ocpshell_main\n ocpshell_main()\n\n def run(self):\n from insights import run\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n\ndef fix_arg_dashes():\n\n en_dash = '\\u2013'\n em_dash = '\\u2014'\n\n # replace unicode (en dash and em dash) dashes from argument definitions that may have been copy\n # and pasted from another source\n i = 1\n for a in sys.argv[1:]:\n first = list(a)\n first[0] = first[0].replace(em_dash, \"--\").replace(en_dash, \"-\")\n sys.argv[i] = \"\".join(first)\n i += 1\n\n\ndef main():\n fix_arg_dashes()\n try:\n InsightsCli()\n except SystemExit:\n raise\n except BaseException as ex:\n print(ex)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/command_parser.py"}]}
1,436
393
gh_patches_debug_865
rasdani/github-patches
git_diff
kubeflow__pipelines-1666
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `pip install kfp` does not install CLI **What happened:** ``` $ virtualenv .venv ... $ pip install kfp==0.1.23 ... $ kfp Traceback (most recent call last): File "/private/tmp/.venv/bin/kfp", line 6, in <module> from kfp.__main__ import main File "/private/tmp/.venv/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module> from .cli.cli import main ModuleNotFoundError: No module named 'kfp.cli' ``` **What did you expect to happen:** To run the CLI. **Anything else you would like to add:** I could be confused about what is expected to be available after installing the kfp package from pip - setup.py mentions an entrypoint named kfp in https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L74 but main.py imports a `kfp.cli` package https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/kfp/__main__.py#L15 which is not included in the distribution https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L46-L54 </issue> <code> [start of sdk/python/setup.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import setup 16 17 NAME = 'kfp' 18 VERSION = '0.1.24' 19 20 REQUIRES = [ 21 'urllib3>=1.15,<1.25', #Fixing the version conflict with the "requests" package 22 'six >= 1.10', 23 'certifi', 24 'python-dateutil', 25 'PyYAML', 26 'google-cloud-storage>=1.13.0', 27 'kubernetes>=8.0.0, <=9.0.0', 28 'PyJWT>=1.6.4', 29 'cryptography>=2.4.2', 30 'google-auth>=1.6.1', 31 'requests_toolbelt>=0.8.0', 32 'cloudpickle', 33 'kfp-server-api >= 0.1.18, < 0.1.19', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api. 34 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1 35 'jsonschema >= 3.0.1', 36 'tabulate == 0.8.3', 37 'click == 7.0' 38 ] 39 40 setup( 41 name=NAME, 42 version=VERSION, 43 description='KubeFlow Pipelines SDK', 44 author='google', 45 install_requires=REQUIRES, 46 packages=[ 47 'kfp', 48 'kfp.compiler', 49 'kfp.components', 50 'kfp.components.structures', 51 'kfp.components.structures.kubernetes', 52 'kfp.dsl', 53 'kfp.notebook', 54 ], 55 classifiers=[ 56 'Intended Audience :: Developers', 57 'Intended Audience :: Education', 58 'Intended Audience :: Science/Research', 59 'License :: OSI Approved :: Apache Software License', 60 'Programming Language :: Python :: 3', 61 'Programming Language :: Python :: 3.5', 62 'Programming Language :: Python :: 3.6', 63 'Programming Language :: Python :: 3.7', 64 'Topic :: Scientific/Engineering', 65 'Topic :: Scientific/Engineering :: Artificial Intelligence', 66 'Topic :: Software Development', 67 'Topic :: Software Development :: Libraries', 68 'Topic :: Software Development :: Libraries :: Python Modules', 69 ], 70 python_requires='>=3.5.3', 71 include_package_data=True, 72 entry_points={'console_scripts': [ 73 'dsl-compile = kfp.compiler.main:main', 74 'kfp=kfp.__main__:main']}) 75 [end of sdk/python/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/setup.py b/sdk/python/setup.py --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -45,6 +45,7 @@ install_requires=REQUIRES, packages=[ 'kfp', + 'kfp.cli', 'kfp.compiler', 'kfp.components', 'kfp.components.structures',
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -45,6 +45,7 @@\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n+ 'kfp.cli',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n", "issue": "`pip install kfp` does not install CLI\n**What happened:**\r\n\r\n```\r\n$ virtualenv .venv\r\n...\r\n$ pip install kfp==0.1.23\r\n...\r\n$ kfp\r\nTraceback (most recent call last):\r\n File \"/private/tmp/.venv/bin/kfp\", line 6, in <module>\r\n from kfp.__main__ import main\r\n File \"/private/tmp/.venv/lib/python3.7/site-packages/kfp/__main__.py\", line 15, in <module>\r\n from .cli.cli import main\r\nModuleNotFoundError: No module named 'kfp.cli'\r\n```\r\n\r\n**What did you expect to happen:**\r\nTo run the CLI.\r\n\r\n**Anything else you would like to add:**\r\nI could be confused about what is expected to be available after installing the kfp package from pip - setup.py mentions an entrypoint named kfp in \r\n\r\nhttps://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L74\r\n\r\nbut main.py imports a `kfp.cli` package https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/kfp/__main__.py#L15\r\n\r\nwhich is not included in the distribution https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L46-L54\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\n\nNAME = 'kfp'\nVERSION = '0.1.24'\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle',\n 'kfp-server-api >= 0.1.18, < 0.1.19', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0'\n]\n\nsetup(\n name=NAME,\n version=VERSION,\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'kfp=kfp.__main__:main']})\n", "path": "sdk/python/setup.py"}]}
1,750
83
gh_patches_debug_3094
rasdani/github-patches
git_diff
streamlit__streamlit-2611
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> st.expander cuts off sliders # Summary Expander in sidebar cuts off sliders and potentially other stuff # Steps to reproduce Run the following code ``` import streamlit as st sidebar_expander = st.sidebar.beta_expander("Bad sliders underneath!") with sidebar_expander: st.slider("Bad layout slider 1", 0, 100, value=0) st.slider("Bad layout slider 2", 0, 100, value=(0,100)) st.sidebar.slider("Good layout slider") ``` ## Expected behavior: Sliders appear nicely under the expander ## Actual behavior: Sliders ends are cut off under the expander (see picture) ![image](https://user-images.githubusercontent.com/62385461/96776145-e4638580-13e8-11eb-9eaf-692c45541cf1.png) ## Is this a regression? No # Debug info - Streamlit version: 0.69.1 - Python version: 3.8.2 - Using PipEnv - OS version: Windows 10 - Browser version: Firefox and Chrome, recent versions # Additional information May also happen in containers outside of sidebar? Have not checked it though.. </issue> <code> [start of e2e/scripts/st_expander.py] 1 # Copyright 2018-2020 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import streamlit as st 16 17 container = st.beta_container() 18 container.write("I cannot collapse") 19 20 expander = st.beta_expander("Collapse me!", expanded=True) 21 expander.write("I can collapse") 22 23 collapsed = st.beta_expander("Expand me!") 24 collapsed.write("I am already collapsed") 25 26 sidebar = st.sidebar.beta_expander("Expand me!") 27 sidebar.write("I am in the sidebar") 28 [end of e2e/scripts/st_expander.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_expander.py b/e2e/scripts/st_expander.py --- a/e2e/scripts/st_expander.py +++ b/e2e/scripts/st_expander.py @@ -19,6 +19,8 @@ expander = st.beta_expander("Collapse me!", expanded=True) expander.write("I can collapse") +expander.slider("I don't get cut off") +expander.button("I'm also not cut off (while focused)") collapsed = st.beta_expander("Expand me!") collapsed.write("I am already collapsed")
{"golden_diff": "diff --git a/e2e/scripts/st_expander.py b/e2e/scripts/st_expander.py\n--- a/e2e/scripts/st_expander.py\n+++ b/e2e/scripts/st_expander.py\n@@ -19,6 +19,8 @@\n \n expander = st.beta_expander(\"Collapse me!\", expanded=True)\n expander.write(\"I can collapse\")\n+expander.slider(\"I don't get cut off\")\n+expander.button(\"I'm also not cut off (while focused)\")\n \n collapsed = st.beta_expander(\"Expand me!\")\n collapsed.write(\"I am already collapsed\")\n", "issue": "st.expander cuts off sliders\n# Summary\r\n\r\nExpander in sidebar cuts off sliders and potentially other stuff\r\n\r\n# Steps to reproduce\r\nRun the following code\r\n```\r\nimport streamlit as st\r\n\r\nsidebar_expander = st.sidebar.beta_expander(\"Bad sliders underneath!\")\r\nwith sidebar_expander:\r\n st.slider(\"Bad layout slider 1\", 0, 100, value=0)\r\n st.slider(\"Bad layout slider 2\", 0, 100, value=(0,100))\r\n\r\nst.sidebar.slider(\"Good layout slider\")\r\n```\r\n## Expected behavior:\r\n\r\nSliders appear nicely under the expander\r\n\r\n## Actual behavior:\r\n\r\nSliders ends are cut off under the expander (see picture)\r\n\r\n![image](https://user-images.githubusercontent.com/62385461/96776145-e4638580-13e8-11eb-9eaf-692c45541cf1.png)\r\n\r\n\r\n## Is this a regression?\r\n\r\nNo\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.69.1\r\n- Python version: 3.8.2\r\n- Using PipEnv\r\n- OS version: Windows 10\r\n- Browser version: Firefox and Chrome, recent versions\r\n\r\n# Additional information\r\n\r\nMay also happen in containers outside of sidebar? Have not checked it though..\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ncontainer = st.beta_container()\ncontainer.write(\"I cannot collapse\")\n\nexpander = st.beta_expander(\"Collapse me!\", expanded=True)\nexpander.write(\"I can collapse\")\n\ncollapsed = st.beta_expander(\"Expand me!\")\ncollapsed.write(\"I am already collapsed\")\n\nsidebar = st.sidebar.beta_expander(\"Expand me!\")\nsidebar.write(\"I am in the sidebar\")\n", "path": "e2e/scripts/st_expander.py"}]}
1,101
126
gh_patches_debug_57166
rasdani/github-patches
git_diff
unionai-oss__pandera-69
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> update readme on release of next version suggest replacing ```**Supports:** python 2.7, 3.5, 3.6``` with: ```[![PyPI pyversions](https://img.shields.io/pypi/pyversions/pandera.svg)](https://pypi.python.org/pypi/pandera/)``` when the new metadata is available on pypi. It will be 'missing' before pypi is updated: [![PyPI pyversions](https://img.shields.io/pypi/pyversions/pandera.svg)](https://pypi.python.org/pypi/pandera/) </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 with open('README.md') as f: 4 long_description = f.read() 5 6 setup( 7 name="pandera", 8 version="0.1.3", 9 author="Niels Bantilan", 10 author_email="[email protected]", 11 description='A light-weight and flexible validation package for pandas data structures.', 12 long_description=long_description, 13 long_description_content_type='text/markdown', 14 url="https://github.com/cosmicBboy/pandera", 15 project_urls={ 16 'Documentation': 'https://pandera.readthedocs.io', 17 'Issue Tracker': 'https://github.com/cosmicBboy/pandera/issues' 18 }, 19 keywords=["pandas", "validation", "data-structures"], 20 license="MIT", 21 packages=[ 22 "pandera", 23 ], 24 install_requires=[ 25 "enum34 ; python_version<'3.4'", 26 "numpy >= 1.9.0", 27 "pandas >= 0.23.0", 28 "wrapt", 29 "scipy ; python_version<'2.7'", 30 ], 31 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', 32 platforms='any', 33 classifiers=[ 34 'Development Status :: 5 - Production/Stable', 35 'Operating System :: OS Independent', 36 'Intended Audience :: Science/Research', 37 'Programming Language :: Python', 38 'Programming Language :: Python :: 3', 39 'Programming Language :: Python :: 3.5', 40 'Programming Language :: Python :: 3.6', 41 'Programming Language :: Python :: 3.7', 42 'Topic :: Scientific/Engineering' 43 ], 44 45 ) 46 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'Programming Language :: Python', + 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n+ 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n", "issue": "update readme on release of next version\nsuggest replacing\r\n\r\n```**Supports:** python 2.7, 3.5, 3.6```\r\n\r\nwith:\r\n\r\n```[![PyPI pyversions](https://img.shields.io/pypi/pyversions/pandera.svg)](https://pypi.python.org/pypi/pandera/)```\r\n\r\nwhen the new metadata is available on pypi.\r\n\r\nIt will be 'missing' before pypi is updated:\r\n[![PyPI pyversions](https://img.shields.io/pypi/pyversions/pandera.svg)](https://pypi.python.org/pypi/pandera/)\n", "before_files": [{"content": "from setuptools import setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name=\"pandera\",\n version=\"0.1.3\",\n author=\"Niels Bantilan\",\n author_email=\"[email protected]\",\n description='A light-weight and flexible validation package for pandas data structures.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"https://github.com/cosmicBboy/pandera\",\n project_urls={\n 'Documentation': 'https://pandera.readthedocs.io',\n 'Issue Tracker': 'https://github.com/cosmicBboy/pandera/issues'\n },\n keywords=[\"pandas\", \"validation\", \"data-structures\"],\n license=\"MIT\",\n packages=[\n \"pandera\",\n ],\n install_requires=[\n \"enum34 ; python_version<'3.4'\",\n \"numpy >= 1.9.0\",\n \"pandas >= 0.23.0\",\n \"wrapt\",\n \"scipy ; python_version<'2.7'\",\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering'\n ],\n\n)\n", "path": "setup.py"}]}
1,118
105
gh_patches_debug_3801
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-167
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Host docs, build on release Decide where to host generated docs, configure CI to generate (and possibly upload) new docs on each release. See #6 for CI bootstrapping. </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # http://www.sphinx-doc.org/en/master/config 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 13 import os 14 import sys 15 16 sys.path.insert(0, os.path.abspath("../opentelemetry-api/src/")) 17 18 19 # -- Project information ----------------------------------------------------- 20 21 project = "OpenTelemetry" 22 copyright = "2019, OpenTelemetry Authors" 23 author = "OpenTelemetry Authors" 24 25 26 # -- General configuration --------------------------------------------------- 27 28 # Easy automatic cross-references for `code in backticks` 29 default_role = "any" 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = [ 35 # API doc generation 36 "sphinx.ext.autodoc", 37 # Support for google-style docstrings 38 "sphinx.ext.napoleon", 39 # Infer types from hints instead of docstrings 40 "sphinx_autodoc_typehints", 41 # Add links to source from generated docs 42 "sphinx.ext.viewcode", 43 # Link to other sphinx docs 44 "sphinx.ext.intersphinx", 45 ] 46 47 intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)} 48 49 # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky 50 # Sphinx will warn about all references where the target cannot be found. 51 nitpicky = True 52 nitpick_ignore = [] 53 54 # Add any paths that contain templates here, relative to this directory. 55 templates_path = ["_templates"] 56 57 # List of patterns, relative to source directory, that match files and 58 # directories to ignore when looking for source files. 59 # This pattern also affects html_static_path and html_extra_path. 60 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 61 62 autodoc_default_options = { 63 "members": True, 64 "undoc-members": True, 65 "show-inheritance": True, 66 "member-order": "bysource", 67 } 68 69 # -- Options for HTML output ------------------------------------------------- 70 71 # The theme to use for HTML and HTML Help pages. See the documentation for 72 # a list of builtin themes. 73 # 74 html_theme = "sphinx_rtd_theme" 75 76 # Add any paths that contain custom static files (such as style sheets) here, 77 # relative to this directory. They are copied after the builtin static files, 78 # so a file named "default.css" will overwrite the builtin "default.css". 79 html_static_path = [] 80 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -42,6 +42,9 @@ "sphinx.ext.viewcode", # Link to other sphinx docs "sphinx.ext.intersphinx", + # Add a .nojekyll file to the generated HTML docs + # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing + "sphinx.ext.githubpages", ] intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -42,6 +42,9 @@\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n+ # Add a .nojekyll file to the generated HTML docs\n+ # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n+ \"sphinx.ext.githubpages\",\n ]\n \n intersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n", "issue": "Host docs, build on release\nDecide where to host generated docs, configure CI to generate (and possibly upload) new docs on each release.\r\n\r\nSee #6 for CI bootstrapping.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../opentelemetry-api/src/\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry\"\ncopyright = \"2019, OpenTelemetry Authors\"\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\nnitpick_ignore = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n", "path": "docs/conf.py"}]}
1,332
130
gh_patches_debug_641
rasdani/github-patches
git_diff
pex-tool__pex-2219
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.143 On the docket: + [x] pex fails to build pycryptodome due to filename too long #2087 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.142" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.142" +__version__ = "2.1.143"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.142\"\n+__version__ = \"2.1.143\"\n", "issue": "Release 2.1.143\nOn the docket:\r\n+ [x] pex fails to build pycryptodome due to filename too long #2087 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.142\"\n", "path": "pex/version.py"}]}
624
99
gh_patches_debug_21265
rasdani/github-patches
git_diff
scikit-hep__pyhf-436
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test Deploy feature is broken # Description See https://github.com/FaradayRF/faradayio/issues/35 for a related issue - I think. Failing job on master here: https://travis-ci.org/diana-hep/pyhf/builds/517678508?utm_source=github_status&utm_medium=notification # Expected Behavior Expect it to pass. # Actual Behavior Observe it failing. # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 from os import path 5 import sys 6 7 this_directory = path.abspath(path.dirname(__file__)) 8 if sys.version_info.major < 3: 9 from io import open 10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md: 11 long_description = readme_md.read() 12 13 extras_require = { 14 'tensorflow': [ 15 'tensorflow~=1.13', 16 'tensorflow-probability~=0.5', 17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 18 'setuptools<=39.1.0', 19 ], 20 'torch': ['torch~=1.0'], 21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'], 22 # 'dask': [ 23 # 'dask[array]' 24 # ], 25 'xmlio': ['uproot'], 26 'minuit': ['iminuit'], 27 'develop': [ 28 'pyflakes', 29 'pytest~=3.5', 30 'pytest-cov>=2.5.1', 31 'pytest-mock', 32 'pytest-benchmark[histogram]', 33 'pytest-console-scripts', 34 'python-coveralls', 35 'coverage>=4.0', # coveralls 36 'matplotlib', 37 'jupyter', 38 'nbdime', 39 'uproot~=3.3', 40 'papermill~=0.16', 41 'graphviz', 42 'bumpversion', 43 'sphinx', 44 'sphinxcontrib-bibtex', 45 'sphinxcontrib-napoleon', 46 'sphinx_rtd_theme', 47 'nbsphinx', 48 'sphinx-issues', 49 'm2r', 50 'jsonpatch', 51 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 52 'pre-commit', 53 'black;python_version>="3.6"', # Black is Python3 only 54 'twine', 55 ], 56 } 57 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 58 59 setup( 60 name='pyhf', 61 version='0.0.16', 62 description='(partial) pure python histfactory implementation', 63 long_description=long_description, 64 long_description_content_type='text/markdown', 65 url='https://github.com/diana-hep/pyhf', 66 author='Lukas Heinrich', 67 author_email='[email protected]', 68 license='Apache', 69 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 70 classifiers=[ 71 "Programming Language :: Python :: 2", 72 "Programming Language :: Python :: 2.7", 73 "Programming Language :: Python :: 3", 74 "Programming Language :: Python :: 3.6", 75 "Programming Language :: Python :: 3.7", 76 ], 77 packages=find_packages(), 78 include_package_data=True, 79 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 80 install_requires=[ 81 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 82 'click>=6.0', # for console scripts, 83 'tqdm', # for readxml 84 'six', # for modifiers 85 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 86 'jsonpatch', 87 ], 88 extras_require=extras_require, 89 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 90 dependency_links=[], 91 ) 92 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -56,6 +56,29 @@ } extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) + +def _is_test_pypi(): + """ + Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and + set to true (c.f. .travis.yml) + + The use_scm_version kwarg accepts a callable for the local_scheme + configuration parameter with argument "version". This can be replaced + with a lambda as the desired version structure is {next_version}.dev{distance} + c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy + + As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version + controlled through bumpversion is used. + """ + from os import getenv + + return ( + {'local_scheme': lambda version: ''} + if getenv('TESTPYPI_UPLOAD') == 'true' + else False + ) + + setup( name='pyhf', version='0.0.16', @@ -88,4 +111,5 @@ extras_require=extras_require, entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, dependency_links=[], + use_scm_version=_is_test_pypi(), )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,6 +56,29 @@\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n \n+\n+def _is_test_pypi():\n+ \"\"\"\n+ Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n+ set to true (c.f. .travis.yml)\n+\n+ The use_scm_version kwarg accepts a callable for the local_scheme\n+ configuration parameter with argument \"version\". This can be replaced\n+ with a lambda as the desired version structure is {next_version}.dev{distance}\n+ c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n+\n+ As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n+ controlled through bumpversion is used.\n+ \"\"\"\n+ from os import getenv\n+\n+ return (\n+ {'local_scheme': lambda version: ''}\n+ if getenv('TESTPYPI_UPLOAD') == 'true'\n+ else False\n+ )\n+\n+\n setup(\n name='pyhf',\n version='0.0.16',\n@@ -88,4 +111,5 @@\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n+ use_scm_version=_is_test_pypi(),\n )\n", "issue": "Test Deploy feature is broken\n# Description\r\n\r\nSee https://github.com/FaradayRF/faradayio/issues/35 for a related issue - I think. Failing job on master here: https://travis-ci.org/diana-hep/pyhf/builds/517678508?utm_source=github_status&utm_medium=notification\r\n\r\n# Expected Behavior\r\n\r\nExpect it to pass.\r\n\r\n# Actual Behavior\r\n\r\nObserve it failing.\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.16',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
1,726
327
gh_patches_debug_3357
rasdani/github-patches
git_diff
nipy__nipype-1574
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Build documentation fails if dipy is not installed </issue> <code> [start of nipype/interfaces/dipy/base.py] 1 # -*- coding: utf-8 -*- 2 """ Base interfaces for dipy """ 3 import os.path as op 4 import numpy as np 5 from nipype.interfaces.base import (traits, File, isdefined, 6 BaseInterface, BaseInterfaceInputSpec) 7 from ... import logging 8 9 IFLOGGER = logging.getLogger('interface') 10 11 HAVE_DIPY = True 12 try: 13 import dipy 14 except ImportError: 15 HAVE_DIPY = False 16 17 18 def no_dipy(): 19 """ Check if dipy is available """ 20 global HAVE_DIPY 21 return not HAVE_DIPY 22 23 24 def dipy_version(): 25 """ Check dipy version """ 26 if no_dipy(): 27 return None 28 29 return dipy.__version__ 30 31 32 class DipyBaseInterface(BaseInterface): 33 34 """ 35 A base interface for py:mod:`dipy` computations 36 """ 37 def __init__(self, **inputs): 38 if no_dipy(): 39 IFLOGGER.error('dipy was not found') 40 # raise ImportError('dipy was not found') 41 super(DipyBaseInterface, self).__init__(**inputs) 42 43 44 class DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec): 45 in_file = File(exists=True, mandatory=True, desc=('input diffusion data')) 46 in_bval = File(exists=True, mandatory=True, desc=('input b-values table')) 47 in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table')) 48 b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold')) 49 out_prefix = traits.Str(desc=('output prefix for file names')) 50 51 52 class DipyDiffusionInterface(DipyBaseInterface): 53 54 """ 55 A base interface for py:mod:`dipy` computations 56 """ 57 input_spec = DipyBaseInterfaceInputSpec 58 59 def _get_gradient_table(self): 60 bval = np.loadtxt(self.inputs.in_bval) 61 bvec = np.loadtxt(self.inputs.in_bvec).T 62 from dipy.core.gradients import gradient_table 63 gtab = gradient_table(bval, bvec) 64 65 gtab.b0_threshold = self.inputs.b0_thres 66 return gtab 67 68 def _gen_filename(self, name, ext=None): 69 fname, fext = op.splitext(op.basename(self.inputs.in_file)) 70 if fext == '.gz': 71 fname, fext2 = op.splitext(fname) 72 fext = fext2 + fext 73 74 if not isdefined(self.inputs.out_prefix): 75 out_prefix = op.abspath(fname) 76 else: 77 out_prefix = self.inputs.out_prefix 78 79 if ext is None: 80 ext = fext 81 82 return out_prefix + '_' + name + ext 83 [end of nipype/interfaces/dipy/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py --- a/nipype/interfaces/dipy/base.py +++ b/nipype/interfaces/dipy/base.py @@ -36,7 +36,7 @@ """ def __init__(self, **inputs): if no_dipy(): - IFLOGGER.error('dipy was not found') + IFLOGGER.warn('dipy was not found') # raise ImportError('dipy was not found') super(DipyBaseInterface, self).__init__(**inputs)
{"golden_diff": "diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py\n--- a/nipype/interfaces/dipy/base.py\n+++ b/nipype/interfaces/dipy/base.py\n@@ -36,7 +36,7 @@\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n- IFLOGGER.error('dipy was not found')\n+ IFLOGGER.warn('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n", "issue": "[BUG] Build documentation fails if dipy is not installed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Base interfaces for dipy \"\"\"\nimport os.path as op\nimport numpy as np\nfrom nipype.interfaces.base import (traits, File, isdefined,\n BaseInterface, BaseInterfaceInputSpec)\nfrom ... import logging\n\nIFLOGGER = logging.getLogger('interface')\n\nHAVE_DIPY = True\ntry:\n import dipy\nexcept ImportError:\n HAVE_DIPY = False\n\n\ndef no_dipy():\n \"\"\" Check if dipy is available \"\"\"\n global HAVE_DIPY\n return not HAVE_DIPY\n\n\ndef dipy_version():\n \"\"\" Check dipy version \"\"\"\n if no_dipy():\n return None\n\n return dipy.__version__\n\n\nclass DipyBaseInterface(BaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n IFLOGGER.error('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n\n\nclass DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=('input diffusion data'))\n in_bval = File(exists=True, mandatory=True, desc=('input b-values table'))\n in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table'))\n b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold'))\n out_prefix = traits.Str(desc=('output prefix for file names'))\n\n\nclass DipyDiffusionInterface(DipyBaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n input_spec = DipyBaseInterfaceInputSpec\n\n def _get_gradient_table(self):\n bval = np.loadtxt(self.inputs.in_bval)\n bvec = np.loadtxt(self.inputs.in_bvec).T\n from dipy.core.gradients import gradient_table\n gtab = gradient_table(bval, bvec)\n\n gtab.b0_threshold = self.inputs.b0_thres\n return gtab\n\n def _gen_filename(self, name, ext=None):\n fname, fext = op.splitext(op.basename(self.inputs.in_file))\n if fext == '.gz':\n fname, fext2 = op.splitext(fname)\n fext = fext2 + fext\n\n if not isdefined(self.inputs.out_prefix):\n out_prefix = op.abspath(fname)\n else:\n out_prefix = self.inputs.out_prefix\n\n if ext is None:\n ext = fext\n\n return out_prefix + '_' + name + ext\n", "path": "nipype/interfaces/dipy/base.py"}]}
1,284
124
gh_patches_debug_60523
rasdani/github-patches
git_diff
streamlit__streamlit-2811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sliders should show current value [regression] [Baseweb] # Summary (via @tvst: ) Our sliders right now require you to hover in order to see the selected value. This makes it really hard to understand what the user selected. I reported this before, but I just spent some time debugging my app thinking it was broken because I was reading the wrong slider value. Frustrating. I understand this is the new behavior of sliders in Base Web, but we have alternatives: **1. Roll back Base Web to a previous version** This is the preferable solution in order to get this fix out ASAP. Even if we decide it's only a temporary solution. 2. Try to find a solution using the latest Base Web 3. Copy/paste the old Baseweb slider into our own repo and modify it there. Their slider is based on another library, btw (I forget which), so maybe we should just use that library directly instead? ## Is this a regression? yes # Debug info - Streamlit version: 0.75-special Allow hiding tracebacks Currently, when a Streamlit app throws an exception, we print the traceback to the browser. This isn't necessarily the right thing to do for all apps; we should allow this to be configurable. Maybe a `[client] showTracebacks = false` option? And presumably, if tracebacks are disabled, we should filter them at the server level, so that the client never even receives the string, in case the user is worried about leaking internal app details. (Related discussion here: https://discuss.streamlit.io/t/dont-show-users-tracebacks/1746) </issue> <code> [start of e2e/scripts/st_columns.py] 1 # Copyright 2018-2021 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import streamlit as st 16 17 CAT_IMAGE = "https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80" 18 19 if st.button("Layout should not shift when this is pressed"): 20 st.write("Pressed!") 21 22 # Same-width columns 23 c1, c2, c3 = st.beta_columns(3) 24 c1.image(CAT_IMAGE, use_column_width=True) 25 c2.image(CAT_IMAGE, use_column_width=True) 26 c3.image(CAT_IMAGE, use_column_width=True) 27 28 29 # Variable-width columns 30 for c in st.beta_columns((1, 2, 4, 8)): 31 c.image(CAT_IMAGE, use_column_width=True) 32 [end of e2e/scripts/st_columns.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_columns.py b/e2e/scripts/st_columns.py --- a/e2e/scripts/st_columns.py +++ b/e2e/scripts/st_columns.py @@ -27,5 +27,5 @@ # Variable-width columns -for c in st.beta_columns((1, 2, 4, 8)): +for c in st.beta_columns((1, 2, 3, 4)): c.image(CAT_IMAGE, use_column_width=True)
{"golden_diff": "diff --git a/e2e/scripts/st_columns.py b/e2e/scripts/st_columns.py\n--- a/e2e/scripts/st_columns.py\n+++ b/e2e/scripts/st_columns.py\n@@ -27,5 +27,5 @@\n \n \n # Variable-width columns\n-for c in st.beta_columns((1, 2, 4, 8)):\n+for c in st.beta_columns((1, 2, 3, 4)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "issue": "Sliders should show current value [regression] [Baseweb]\n# Summary\r\n\r\n(via @tvst: )\r\n\r\nOur sliders right now require you to hover in order to see the selected value. This makes it really hard to understand what the user selected. I reported this before, but I just spent some time debugging my app thinking it was broken because I was reading the wrong slider value. Frustrating.\r\n\r\nI understand this is the new behavior of sliders in Base Web, but we have alternatives:\r\n\r\n**1. Roll back Base Web to a previous version**\r\n\r\n This is the preferable solution in order to get this fix out ASAP. Even if we decide it's only a temporary solution.\r\n\r\n2. Try to find a solution using the latest Base Web\r\n\r\n3. Copy/paste the old Baseweb slider into our own repo and modify it there. Their slider is based on another library, btw (I forget which), so maybe we should just use that library directly instead?\r\n\r\n\r\n## Is this a regression?\r\n\r\nyes \r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.75-special\nAllow hiding tracebacks\nCurrently, when a Streamlit app throws an exception, we print the traceback to the browser. This isn't necessarily the right thing to do for all apps; we should allow this to be configurable.\r\n\r\nMaybe a `[client] showTracebacks = false` option? And presumably, if tracebacks are disabled, we should filter them at the server level, so that the client never even receives the string, in case the user is worried about leaking internal app details.\r\n\r\n(Related discussion here: https://discuss.streamlit.io/t/dont-show-users-tracebacks/1746)\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nCAT_IMAGE = \"https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80\"\n\nif st.button(\"Layout should not shift when this is pressed\"):\n st.write(\"Pressed!\")\n\n# Same-width columns\nc1, c2, c3 = st.beta_columns(3)\nc1.image(CAT_IMAGE, use_column_width=True)\nc2.image(CAT_IMAGE, use_column_width=True)\nc3.image(CAT_IMAGE, use_column_width=True)\n\n\n# Variable-width columns\nfor c in st.beta_columns((1, 2, 4, 8)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "path": "e2e/scripts/st_columns.py"}]}
1,284
108
gh_patches_debug_32004
rasdani/github-patches
git_diff
OCA__bank-payment-18
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> many transactions not regonized After importing a dutch bank statements file from the ING, I got the following screen: ![import_log](https://cloud.githubusercontent.com/assets/890128/3571378/ca510fa0-0b5c-11e4-9d32-3ee373c491e4.png) Many transactions are not processed because of not recognizing the correct bankaccount number. The system is complaining about "Statements found for unknown account 68xxx" and "Statements found for account 06xxxxxx, but no default journal was defined. This is a little strange, because I have setup "Default Import Settings for Bank Accounts" in the settings of the accounting section. I also put a default journal in there. My information entered there is the IBAN number of my account, together with the national account (with leading zero). I also tried without the leading zero in the national account, but this doesn't make any difference at all. Why is openerp mixing up the number of the bankaccount. (with/without zero) and not matching the national number. </issue> <code> [start of account_banking/res_partner_bank.py] 1 # -*- coding: utf-8 -*- 2 ############################################################################## 3 # 4 # Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>). 5 # (C) 2011 - 2014 Therp BV (<http://therp.nl>). 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Affero General Public License as 9 # published by the Free Software Foundation, either version 3 of the 10 # License, or (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Affero General Public License for more details. 16 # 17 # You should have received a copy of the GNU Affero General Public License 18 # along with this program. If not, see <http://www.gnu.org/licenses/>. 19 # 20 ############################################################################## 21 from openerp.osv import orm 22 from openerp.addons.account_banking import sepa 23 24 25 class ResPartnerBank(orm.Model): 26 _inherit = 'res.partner.bank' 27 28 def online_account_info( 29 self, cr, uid, country_code, acc_number, context=None): 30 """ 31 API hook for legacy online lookup of account info, 32 to be removed in OpenERP 8.0. 33 """ 34 return False 35 36 def search(self, cr, uid, args, *rest, **kwargs): 37 """ 38 When a complete IBAN is searched, also search for its BBAN 39 if we have the domestic column. Disregard spaces 40 when comparing IBANs. 41 """ 42 43 def is_term(arg): 44 '''Flag an arg as term or otherwise''' 45 return isinstance(arg, (list, tuple)) and len(arg) == 3 46 47 def extended_filter_term(term): 48 ''' 49 Extend the search criteria in term when appropriate. 50 ''' 51 result = [term] 52 extra_terms = [] 53 if term[0].lower() == 'acc_number' and term[1] in ('=', '=='): 54 iban = sepa.IBAN(term[2]) 55 if iban.valid: 56 # Disregard spaces when comparing IBANs 57 cr.execute( 58 """ 59 SELECT id FROM res_partner_bank 60 WHERE replace(acc_number, ' ', '') = %s 61 """, (term[2].replace(' ', ''),)) 62 ids = [row[0] for row in cr.fetchall()] 63 result = [('id', 'in', ids)] 64 65 if 'acc_number_domestic' in self._columns: 66 bban = iban.localized_BBAN 67 # Prevent empty search filters 68 if bban: 69 extra_terms.append( 70 ('acc_number_domestic', term[1], bban)) 71 for extra_term in extra_terms: 72 result = ['|'] + result + [extra_term] 73 return result 74 75 def extended_search_expression(args): 76 ''' 77 Extend the search expression in args when appropriate. 78 The expression itself is in reverse polish notation, so recursion 79 is not needed. 80 ''' 81 if not args: 82 return [] 83 84 result = [] 85 if is_term(args[0]) and len(args) > 1: 86 # Classic filter, implicit '&' 87 result += ['&'] 88 89 for arg in args: 90 if is_term(arg): 91 result += extended_filter_term(arg) 92 else: 93 result += arg 94 return result 95 96 # Extend search filter 97 newargs = extended_search_expression(args) 98 99 # Original search 100 return super(ResPartnerBank, self).search( 101 cr, uid, newargs, *rest, **kwargs) 102 [end of account_banking/res_partner_bank.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/account_banking/res_partner_bank.py b/account_banking/res_partner_bank.py --- a/account_banking/res_partner_bank.py +++ b/account_banking/res_partner_bank.py @@ -44,6 +44,27 @@ '''Flag an arg as term or otherwise''' return isinstance(arg, (list, tuple)) and len(arg) == 3 + def search_domestic(account): + """ + Performing raw sql search for full account numbers, + ignoring leading zeroes. + """ + account = (account or '').lstrip('0') + if not account: + return ('id', '=', 0) + query = """ + SELECT id FROM res_partner_bank + WHERE ( + STATE != 'iban' + AND TRIM(LEADING '0' FROM acc_number) = %(account)s) + """ + if 'acc_number_domestic' in self._columns: + query += """ + OR TRIM(LEADING '0' FROM acc_number_domestic) + = %(account)s""" + cr.execute(query, {'account': account}) + return ('id', 'in', [row[0] for row in cr.fetchall()]) + def extended_filter_term(term): ''' Extend the search criteria in term when appropriate. @@ -62,12 +83,11 @@ ids = [row[0] for row in cr.fetchall()] result = [('id', 'in', ids)] - if 'acc_number_domestic' in self._columns: - bban = iban.localized_BBAN - # Prevent empty search filters - if bban: - extra_terms.append( - ('acc_number_domestic', term[1], bban)) + bban = iban.localized_BBAN + if bban: + extra_terms.append(search_domestic(bban)) + else: + result = [search_domestic(term[2])] for extra_term in extra_terms: result = ['|'] + result + [extra_term] return result
{"golden_diff": "diff --git a/account_banking/res_partner_bank.py b/account_banking/res_partner_bank.py\n--- a/account_banking/res_partner_bank.py\n+++ b/account_banking/res_partner_bank.py\n@@ -44,6 +44,27 @@\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n \n+ def search_domestic(account):\n+ \"\"\"\n+ Performing raw sql search for full account numbers,\n+ ignoring leading zeroes.\n+ \"\"\"\n+ account = (account or '').lstrip('0')\n+ if not account:\n+ return ('id', '=', 0)\n+ query = \"\"\"\n+ SELECT id FROM res_partner_bank\n+ WHERE (\n+ STATE != 'iban'\n+ AND TRIM(LEADING '0' FROM acc_number) = %(account)s)\n+ \"\"\"\n+ if 'acc_number_domestic' in self._columns:\n+ query += \"\"\"\n+ OR TRIM(LEADING '0' FROM acc_number_domestic)\n+ = %(account)s\"\"\"\n+ cr.execute(query, {'account': account})\n+ return ('id', 'in', [row[0] for row in cr.fetchall()])\n+\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n@@ -62,12 +83,11 @@\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n \n- if 'acc_number_domestic' in self._columns:\n- bban = iban.localized_BBAN\n- # Prevent empty search filters\n- if bban:\n- extra_terms.append(\n- ('acc_number_domestic', term[1], bban))\n+ bban = iban.localized_BBAN\n+ if bban:\n+ extra_terms.append(search_domestic(bban))\n+ else:\n+ result = [search_domestic(term[2])]\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n", "issue": "many transactions not regonized\nAfter importing a dutch bank statements file from the ING, I got the following screen:\n\n![import_log](https://cloud.githubusercontent.com/assets/890128/3571378/ca510fa0-0b5c-11e4-9d32-3ee373c491e4.png)\n\nMany transactions are not processed because of not recognizing the correct bankaccount number. \n\nThe system is complaining about \"Statements found for unknown account 68xxx\" and \"Statements found for account 06xxxxxx, but no default journal was defined.\n\nThis is a little strange, because I have setup \"Default Import Settings for Bank Accounts\"\nin the settings of the accounting section.\n\nI also put a default journal in there.\n\nMy information entered there is the IBAN number of my account, together with the national account (with leading zero). I also tried without the leading zero in the national account, but this doesn't make any difference at all.\n\nWhy is openerp mixing up the number of the bankaccount. (with/without zero) and not matching the national number.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).\n# (C) 2011 - 2014 Therp BV (<http://therp.nl>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\nfrom openerp.osv import orm\nfrom openerp.addons.account_banking import sepa\n\n\nclass ResPartnerBank(orm.Model):\n _inherit = 'res.partner.bank'\n\n def online_account_info(\n self, cr, uid, country_code, acc_number, context=None):\n \"\"\"\n API hook for legacy online lookup of account info,\n to be removed in OpenERP 8.0.\n \"\"\"\n return False\n\n def search(self, cr, uid, args, *rest, **kwargs):\n \"\"\"\n When a complete IBAN is searched, also search for its BBAN\n if we have the domestic column. Disregard spaces\n when comparing IBANs.\n \"\"\"\n\n def is_term(arg):\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n '''\n result = [term]\n extra_terms = []\n if term[0].lower() == 'acc_number' and term[1] in ('=', '=='):\n iban = sepa.IBAN(term[2])\n if iban.valid:\n # Disregard spaces when comparing IBANs\n cr.execute(\n \"\"\"\n SELECT id FROM res_partner_bank\n WHERE replace(acc_number, ' ', '') = %s\n \"\"\", (term[2].replace(' ', ''),))\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n\n if 'acc_number_domestic' in self._columns:\n bban = iban.localized_BBAN\n # Prevent empty search filters\n if bban:\n extra_terms.append(\n ('acc_number_domestic', term[1], bban))\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n\n def extended_search_expression(args):\n '''\n Extend the search expression in args when appropriate.\n The expression itself is in reverse polish notation, so recursion\n is not needed.\n '''\n if not args:\n return []\n\n result = []\n if is_term(args[0]) and len(args) > 1:\n # Classic filter, implicit '&'\n result += ['&']\n\n for arg in args:\n if is_term(arg):\n result += extended_filter_term(arg)\n else:\n result += arg\n return result\n\n # Extend search filter\n newargs = extended_search_expression(args)\n\n # Original search\n return super(ResPartnerBank, self).search(\n cr, uid, newargs, *rest, **kwargs)\n", "path": "account_banking/res_partner_bank.py"}]}
1,783
453
gh_patches_debug_31740
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2808
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider arby is broken During the global build at 2021-05-26-14-42-23, spider **arby** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/arby.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson)) </issue> <code> [start of locations/spiders/arbys.py] 1 # -*- coding: utf-8 -*- 2 import json 3 import re 4 import scrapy 5 from locations.items import GeojsonPointItem 6 7 8 class ArbysSpider(scrapy.Spider): 9 10 name = "arby" 11 item_attributes = { 'brand': "Arby's", 'brand_wikidata': "Q630866" } 12 allowed_domains = ["locations.arbys.com"] 13 download_delay = 0.2 14 start_urls = ( 15 'https://locations.arbys.com/browse/', 16 ) 17 18 def get_store_info(self, response): 19 data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first() 20 if data: 21 data = json.loads(data)[0] 22 23 properties = { 24 # store name is after the pipe, e.g. Fast Food Drive-Thru 25 # Restaurants | Arby's 8437 26 'ref': data["name"].rsplit("|", 1)[-1].strip(), 27 'name': data["name"], 28 'addr_full': data["address"]["streetAddress"].strip(), 29 'city': data["address"]["addressLocality"].strip(), 30 'state': data["address"]["addressRegion"], 31 'postcode': data["address"]["postalCode"], 32 'phone': data.get("telephone", None), 33 'lat': float(data["geo"]["latitude"]), 34 'lon': float(data["geo"]["longitude"]), 35 'website': response.url, 36 'opening_hours': data["openingHours"], 37 } 38 39 yield GeojsonPointItem(**properties) 40 41 def parse_store(self, response): 42 city_stores = response.xpath('//a[@class="location-name ga-link"]/@href').extract() 43 for city_store in city_stores: 44 yield scrapy.Request( 45 response.urljoin(city_store), 46 callback=self.get_store_info 47 ) 48 49 def parse_state(self, response): 50 51 cities = response.xpath('//a[@class="ga-link"]/@href').extract() 52 for city in cities: 53 yield scrapy.Request( 54 response.urljoin(city), 55 callback=self.parse_store 56 ) 57 58 def parse(self, response): 59 states = response.xpath('//a[@class="ga-link"]/@href').extract() 60 61 for state in states: 62 yield scrapy.Request( 63 response.urljoin(state), 64 callback=self.parse_state 65 ) 66 [end of locations/spiders/arbys.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/arbys.py b/locations/spiders/arbys.py --- a/locations/spiders/arbys.py +++ b/locations/spiders/arbys.py @@ -12,24 +12,31 @@ allowed_domains = ["locations.arbys.com"] download_delay = 0.2 start_urls = ( - 'https://locations.arbys.com/browse/', + 'https://locations.arbys.com/', ) def get_store_info(self, response): data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first() if data: - data = json.loads(data)[0] + try: + data = json.loads(data)[0] + except json.JSONDecodeError: + # Unescaped " on two pages + lines = data.split('\n') + i = 2 + next(i for (i, line) in enumerate(lines) if "mainContentOfPage" in line) + lines[i] = '"text": ""}' + data = '\n'.join(lines) + data = json.loads(data)[0] + properties = { - # store name is after the pipe, e.g. Fast Food Drive-Thru - # Restaurants | Arby's 8437 - 'ref': data["name"].rsplit("|", 1)[-1].strip(), + 'ref': response.css('div.store-id::text').get().split(': ')[-1], 'name': data["name"], 'addr_full': data["address"]["streetAddress"].strip(), 'city': data["address"]["addressLocality"].strip(), 'state': data["address"]["addressRegion"], 'postcode': data["address"]["postalCode"], - 'phone': data.get("telephone", None), + 'phone': data["address"]["telephone"], 'lat': float(data["geo"]["latitude"]), 'lon': float(data["geo"]["longitude"]), 'website': response.url,
{"golden_diff": "diff --git a/locations/spiders/arbys.py b/locations/spiders/arbys.py\n--- a/locations/spiders/arbys.py\n+++ b/locations/spiders/arbys.py\n@@ -12,24 +12,31 @@\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n- 'https://locations.arbys.com/browse/',\n+ 'https://locations.arbys.com/',\n )\n \n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n- data = json.loads(data)[0]\n+ try:\n+ data = json.loads(data)[0]\n+ except json.JSONDecodeError:\n+ # Unescaped \" on two pages\n+ lines = data.split('\\n')\n+ i = 2 + next(i for (i, line) in enumerate(lines) if \"mainContentOfPage\" in line)\n+ lines[i] = '\"text\": \"\"}'\n+ data = '\\n'.join(lines)\n+ data = json.loads(data)[0]\n+\n \n properties = {\n- # store name is after the pipe, e.g. Fast Food Drive-Thru\n- # Restaurants | Arby's 8437\n- 'ref': data[\"name\"].rsplit(\"|\", 1)[-1].strip(),\n+ 'ref': response.css('div.store-id::text').get().split(': ')[-1],\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n- 'phone': data.get(\"telephone\", None),\n+ 'phone': data[\"address\"][\"telephone\"],\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n", "issue": "Spider arby is broken\nDuring the global build at 2021-05-26-14-42-23, spider **arby** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/arby.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass ArbysSpider(scrapy.Spider):\n\n name = \"arby\"\n item_attributes = { 'brand': \"Arby's\", 'brand_wikidata': \"Q630866\" }\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n 'https://locations.arbys.com/browse/',\n )\n\n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n data = json.loads(data)[0]\n\n properties = {\n # store name is after the pipe, e.g. Fast Food Drive-Thru\n # Restaurants | Arby's 8437\n 'ref': data[\"name\"].rsplit(\"|\", 1)[-1].strip(),\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n 'phone': data.get(\"telephone\", None),\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n 'opening_hours': data[\"openingHours\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_store(self, response):\n city_stores = response.xpath('//a[@class=\"location-name ga-link\"]/@href').extract()\n for city_store in city_stores:\n yield scrapy.Request(\n response.urljoin(city_store),\n callback=self.get_store_info\n )\n\n def parse_state(self, response):\n\n cities = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n for city in cities:\n yield scrapy.Request(\n response.urljoin(city),\n callback=self.parse_store\n )\n\n def parse(self, response):\n states = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n\n for state in states:\n yield scrapy.Request(\n response.urljoin(state),\n callback=self.parse_state\n )\n", "path": "locations/spiders/arbys.py"}]}
1,341
447
gh_patches_debug_55589
rasdani/github-patches
git_diff
wagtail__wagtail-8800
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> patternlibrary_override_tags breaks all non-development installations #8665 added a `patternlibrary_override_tags` tag library to wagtail.admin, which depends on the django-pattern-library package listed in our testing_extras dependencies. However, this tag library will be loaded on all environments that have `wagtail.admin` in their INSTALLED_APPS, so any Wagtail installation that doesn't include testing_extras is currently broken. As a non-frontend dev, I don't know what this tag library is for, and there's no documentation for me to find out. Deleting it and uninstalling django-pattern-library doesn't break any tests, so it seems to me that it doesn't really belong in the testing dependencies. (If it really is a testing dependency, I'd suggest that moving patternlibrary_override_tags.py into one of the test apps in wagtail/test/ would let it do whatever it's meant to do without breaking non-testing environments.) </issue> <code> [start of wagtail/admin/templatetags/patternlibrary_override_tags.py] 1 from pattern_library.monkey_utils import override_tag 2 3 from wagtail.admin.templatetags.wagtailadmin_tags import register 4 5 override_tag(register, name="test_page_is_public") 6 [end of wagtail/admin/templatetags/patternlibrary_override_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/admin/templatetags/patternlibrary_override_tags.py b/wagtail/admin/templatetags/patternlibrary_override_tags.py deleted file mode 100644 --- a/wagtail/admin/templatetags/patternlibrary_override_tags.py +++ /dev/null @@ -1,5 +0,0 @@ -from pattern_library.monkey_utils import override_tag - -from wagtail.admin.templatetags.wagtailadmin_tags import register - -override_tag(register, name="test_page_is_public")
{"golden_diff": "diff --git a/wagtail/admin/templatetags/patternlibrary_override_tags.py b/wagtail/admin/templatetags/patternlibrary_override_tags.py\ndeleted file mode 100644\n--- a/wagtail/admin/templatetags/patternlibrary_override_tags.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-from pattern_library.monkey_utils import override_tag\n-\n-from wagtail.admin.templatetags.wagtailadmin_tags import register\n-\n-override_tag(register, name=\"test_page_is_public\")\n", "issue": "patternlibrary_override_tags breaks all non-development installations\n#8665 added a `patternlibrary_override_tags` tag library to wagtail.admin, which depends on the django-pattern-library package listed in our testing_extras dependencies. However, this tag library will be loaded on all environments that have `wagtail.admin` in their INSTALLED_APPS, so any Wagtail installation that doesn't include testing_extras is currently broken.\r\n\r\nAs a non-frontend dev, I don't know what this tag library is for, and there's no documentation for me to find out. Deleting it and uninstalling django-pattern-library doesn't break any tests, so it seems to me that it doesn't really belong in the testing dependencies. (If it really is a testing dependency, I'd suggest that moving patternlibrary_override_tags.py into one of the test apps in wagtail/test/ would let it do whatever it's meant to do without breaking non-testing environments.)\n", "before_files": [{"content": "from pattern_library.monkey_utils import override_tag\n\nfrom wagtail.admin.templatetags.wagtailadmin_tags import register\n\noverride_tag(register, name=\"test_page_is_public\")\n", "path": "wagtail/admin/templatetags/patternlibrary_override_tags.py"}]}
792
122
gh_patches_debug_17682
rasdani/github-patches
git_diff
DataDog__dd-trace-py-3868
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mypy invalid syntax error with python 2 ### Which version of dd-trace-py are you using? v1.2.0 ### Which version of pip are you using? 22.1.2 ### Which version of the libraries are you using? ``` ddtrace==1.2.0 mypy[python2]==0.961 ``` ### How can we reproduce your problem? 1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce 2. Run `docker run --rm -it (docker build .)` ### What is the result that you get? ``` usr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax Found 1 error in 1 file (errors prevented further checking) ``` ### What is the result that you expected? ``` Success: no issues found in 1 source file ``` </issue> <code> [start of ddtrace/contrib/redis/patch.py] 1 import redis 2 3 from ddtrace import config 4 from ddtrace.vendor import wrapt 5 6 from ...internal.utils.formats import stringify_cache_args 7 from ...pin import Pin 8 from ..trace_utils import unwrap 9 from .util import _trace_redis_cmd 10 from .util import _trace_redis_execute_pipeline 11 12 13 config._add("redis", dict(_default_service="redis")) 14 15 16 def patch(): 17 """Patch the instrumented methods 18 19 This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top 20 of Redis and StrictRedis. However, it means that any "import redis.Redis" won't be instrumented. 21 """ 22 if getattr(redis, "_datadog_patch", False): 23 return 24 setattr(redis, "_datadog_patch", True) 25 26 _w = wrapt.wrap_function_wrapper 27 28 if redis.VERSION < (3, 0, 0): 29 _w("redis", "StrictRedis.execute_command", traced_execute_command) 30 _w("redis", "StrictRedis.pipeline", traced_pipeline) 31 _w("redis", "Redis.pipeline", traced_pipeline) 32 _w("redis.client", "BasePipeline.execute", traced_execute_pipeline) 33 _w("redis.client", "BasePipeline.immediate_execute_command", traced_execute_command) 34 else: 35 _w("redis", "Redis.execute_command", traced_execute_command) 36 _w("redis", "Redis.pipeline", traced_pipeline) 37 _w("redis.client", "Pipeline.execute", traced_execute_pipeline) 38 _w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command) 39 if redis.VERSION >= (4, 2, 0): 40 from .asyncio_patch import traced_async_execute_command 41 from .asyncio_patch import traced_async_execute_pipeline 42 43 _w("redis.asyncio.client", "Redis.execute_command", traced_async_execute_command) 44 _w("redis.asyncio.client", "Redis.pipeline", traced_pipeline) 45 _w("redis.asyncio.client", "Pipeline.execute", traced_async_execute_pipeline) 46 _w("redis.asyncio.client", "Pipeline.immediate_execute_command", traced_async_execute_command) 47 Pin(service=None).onto(redis.asyncio.Redis) 48 Pin(service=None).onto(redis.StrictRedis) 49 50 51 def unpatch(): 52 if getattr(redis, "_datadog_patch", False): 53 setattr(redis, "_datadog_patch", False) 54 55 if redis.VERSION < (3, 0, 0): 56 unwrap(redis.StrictRedis, "execute_command") 57 unwrap(redis.StrictRedis, "pipeline") 58 unwrap(redis.Redis, "pipeline") 59 unwrap(redis.client.BasePipeline, "execute") 60 unwrap(redis.client.BasePipeline, "immediate_execute_command") 61 else: 62 unwrap(redis.Redis, "execute_command") 63 unwrap(redis.Redis, "pipeline") 64 unwrap(redis.client.Pipeline, "execute") 65 unwrap(redis.client.Pipeline, "immediate_execute_command") 66 if redis.VERSION >= (4, 2, 0): 67 unwrap(redis.asyncio.client.Redis, "execute_command") 68 unwrap(redis.asyncio.client.Redis, "pipeline") 69 unwrap(redis.asyncio.client.Pipeline, "execute") 70 unwrap(redis.asyncio.client.Pipeline, "immediate_execute_command") 71 72 73 # 74 # tracing functions 75 # 76 def traced_execute_command(func, instance, args, kwargs): 77 pin = Pin.get_from(instance) 78 if not pin or not pin.enabled(): 79 return func(*args, **kwargs) 80 81 with _trace_redis_cmd(pin, config.redis, instance, args): 82 return func(*args, **kwargs) 83 84 85 def traced_pipeline(func, instance, args, kwargs): 86 pipeline = func(*args, **kwargs) 87 pin = Pin.get_from(instance) 88 if pin: 89 pin.onto(pipeline) 90 return pipeline 91 92 93 def traced_execute_pipeline(func, instance, args, kwargs): 94 pin = Pin.get_from(instance) 95 if not pin or not pin.enabled(): 96 return func(*args, **kwargs) 97 98 cmds = [stringify_cache_args(c) for c, _ in instance.command_stack] 99 resource = "\n".join(cmds) 100 with _trace_redis_execute_pipeline(pin, config.redis, resource, instance): 101 return func(*args, **kwargs) 102 [end of ddtrace/contrib/redis/patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py --- a/ddtrace/contrib/redis/patch.py +++ b/ddtrace/contrib/redis/patch.py @@ -1,4 +1,5 @@ import redis +from six import PY3 from ddtrace import config from ddtrace.vendor import wrapt @@ -36,7 +37,8 @@ _w("redis", "Redis.pipeline", traced_pipeline) _w("redis.client", "Pipeline.execute", traced_execute_pipeline) _w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command) - if redis.VERSION >= (4, 2, 0): + # Avoid mypy invalid syntax errors when parsing Python 2 files + if PY3 and redis.VERSION >= (4, 2, 0): from .asyncio_patch import traced_async_execute_command from .asyncio_patch import traced_async_execute_pipeline
{"golden_diff": "diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py\n--- a/ddtrace/contrib/redis/patch.py\n+++ b/ddtrace/contrib/redis/patch.py\n@@ -1,4 +1,5 @@\n import redis\n+from six import PY3\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -36,7 +37,8 @@\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n- if redis.VERSION >= (4, 2, 0):\n+ # Avoid mypy invalid syntax errors when parsing Python 2 files\n+ if PY3 and redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n", "issue": "mypy invalid syntax error with python 2\n### Which version of dd-trace-py are you using?\r\n\r\nv1.2.0\r\n\r\n### Which version of pip are you using?\r\n\r\n22.1.2\r\n\r\n### Which version of the libraries are you using?\r\n\r\n```\r\nddtrace==1.2.0\r\nmypy[python2]==0.961\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce\r\n2. Run `docker run --rm -it (docker build .)`\r\n\r\n### What is the result that you get?\r\n\r\n```\r\nusr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\n```\r\nSuccess: no issues found in 1 source file\r\n```\r\n\n", "before_files": [{"content": "import redis\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap\nfrom .util import _trace_redis_cmd\nfrom .util import _trace_redis_execute_pipeline\n\n\nconfig._add(\"redis\", dict(_default_service=\"redis\"))\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n\n This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top\n of Redis and StrictRedis. However, it means that any \"import redis.Redis\" won't be instrumented.\n \"\"\"\n if getattr(redis, \"_datadog_patch\", False):\n return\n setattr(redis, \"_datadog_patch\", True)\n\n _w = wrapt.wrap_function_wrapper\n\n if redis.VERSION < (3, 0, 0):\n _w(\"redis\", \"StrictRedis.execute_command\", traced_execute_command)\n _w(\"redis\", \"StrictRedis.pipeline\", traced_pipeline)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"BasePipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"BasePipeline.immediate_execute_command\", traced_execute_command)\n else:\n _w(\"redis\", \"Redis.execute_command\", traced_execute_command)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n if redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n\n _w(\"redis.asyncio.client\", \"Redis.execute_command\", traced_async_execute_command)\n _w(\"redis.asyncio.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.execute\", traced_async_execute_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.immediate_execute_command\", traced_async_execute_command)\n Pin(service=None).onto(redis.asyncio.Redis)\n Pin(service=None).onto(redis.StrictRedis)\n\n\ndef unpatch():\n if getattr(redis, \"_datadog_patch\", False):\n setattr(redis, \"_datadog_patch\", False)\n\n if redis.VERSION < (3, 0, 0):\n unwrap(redis.StrictRedis, \"execute_command\")\n unwrap(redis.StrictRedis, \"pipeline\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.BasePipeline, \"execute\")\n unwrap(redis.client.BasePipeline, \"immediate_execute_command\")\n else:\n unwrap(redis.Redis, \"execute_command\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.Pipeline, \"execute\")\n unwrap(redis.client.Pipeline, \"immediate_execute_command\")\n if redis.VERSION >= (4, 2, 0):\n unwrap(redis.asyncio.client.Redis, \"execute_command\")\n unwrap(redis.asyncio.client.Redis, \"pipeline\")\n unwrap(redis.asyncio.client.Pipeline, \"execute\")\n unwrap(redis.asyncio.client.Pipeline, \"immediate_execute_command\")\n\n\n#\n# tracing functions\n#\ndef traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.redis, instance, args):\n return func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/redis/patch.py"}]}
1,859
214
gh_patches_debug_14674
rasdani/github-patches
git_diff
beeware__toga-1626
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Android: `paths.app` doesn't work in event handlers **Describe the bug** On Android, `paths.app` returns `/` when called outside of the `startup` method, e.g. in an event handler. This is because the `__main__` module has finished importing, and it no longer exists in `sys.modules` under that name. This doesn't happen on Windows, because it runs the entire main loop within the top level of the `__main__` module. The other desktop platforms are probably the same. But on Android, the main loop is already running and we have to integrate into it, so importing the `__main__` module does not block. From inspection of the code, it looks like iOS is similar, so it might have the same problem. The fallback to `/` makes no sense on Android anyway, so it should be removed. Reported by @kakos-k9 on Discord. **Test app** ```python import toga from toga.style import Pack from toga.style.pack import COLUMN, ROW class Hello(toga.App): def startup(self): button = toga.Button("Print", on_press=self.on_press) self.on_press(button) self.main_window = toga.MainWindow() self.main_window.content = toga.Box( style=Pack(direction=COLUMN), children=[button] ) self.main_window.show() def on_press(self, button): import traceback for name in ["app", "data", "toga"]: print(f"{name}={getattr(self.paths, name)}") def main(): return Hello() ``` **Environment:** - Operating System: Android - Python version: 3.8 - Software versions: - Toga: 9a292f8e26 </issue> <code> [start of src/android/toga_android/paths.py] 1 import sys 2 from pathlib import Path 3 4 import toga 5 from toga import App 6 7 8 class Paths: 9 # Allow instantiating Path object via the factory 10 Path = Path 11 12 @property 13 def __context(self): 14 return App.app._impl.native.getApplicationContext() 15 16 @property 17 def app(self): 18 try: 19 return Path(sys.modules["__main__"].__file__).parent 20 except KeyError: 21 # If we're running in test conditions, 22 # there is no __main__ module. 23 return Path.cwd() 24 except AttributeError: 25 # If we're running at an interactive prompt, 26 # the __main__ module isn't file-based. 27 return Path.cwd() 28 29 @property 30 def data(self): 31 return Path(self.__context.getFilesDir().getPath()) 32 33 @property 34 def cache(self): 35 return Path(self.__context.getCacheDir().getPath()) 36 37 @property 38 def logs(self): 39 return self.data 40 41 @property 42 def toga(self): 43 """Return a path to a Toga resources 44 """ 45 return Path(toga.__file__).parent 46 47 48 paths = Paths() 49 [end of src/android/toga_android/paths.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/android/toga_android/paths.py b/src/android/toga_android/paths.py --- a/src/android/toga_android/paths.py +++ b/src/android/toga_android/paths.py @@ -13,18 +13,13 @@ def __context(self): return App.app._impl.native.getApplicationContext() + def __init__(self): + # On Android, __main__ only exists during app startup, so cache its location now. + self._app = Path(sys.modules["__main__"].__file__).parent + @property def app(self): - try: - return Path(sys.modules["__main__"].__file__).parent - except KeyError: - # If we're running in test conditions, - # there is no __main__ module. - return Path.cwd() - except AttributeError: - # If we're running at an interactive prompt, - # the __main__ module isn't file-based. - return Path.cwd() + return self._app @property def data(self):
{"golden_diff": "diff --git a/src/android/toga_android/paths.py b/src/android/toga_android/paths.py\n--- a/src/android/toga_android/paths.py\n+++ b/src/android/toga_android/paths.py\n@@ -13,18 +13,13 @@\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n \n+ def __init__(self):\n+ # On Android, __main__ only exists during app startup, so cache its location now.\n+ self._app = Path(sys.modules[\"__main__\"].__file__).parent\n+\n @property\n def app(self):\n- try:\n- return Path(sys.modules[\"__main__\"].__file__).parent\n- except KeyError:\n- # If we're running in test conditions,\n- # there is no __main__ module.\n- return Path.cwd()\n- except AttributeError:\n- # If we're running at an interactive prompt,\n- # the __main__ module isn't file-based.\n- return Path.cwd()\n+ return self._app\n \n @property\n def data(self):\n", "issue": "Android: `paths.app` doesn't work in event handlers\n**Describe the bug**\r\n\r\nOn Android, `paths.app` returns `/` when called outside of the `startup` method, e.g. in an event handler. This is because the `__main__` module has finished importing, and it no longer exists in `sys.modules` under that name.\r\n\r\nThis doesn't happen on Windows, because it runs the entire main loop within the top level of the `__main__` module. The other desktop platforms are probably the same.\r\n\r\nBut on Android, the main loop is already running and we have to integrate into it, so importing the `__main__` module does not block. From inspection of the code, it looks like iOS is similar, so it might have the same problem.\r\n\r\nThe fallback to `/` makes no sense on Android anyway, so it should be removed.\r\n\r\nReported by @kakos-k9 on Discord.\r\n\r\n**Test app**\r\n```python\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nclass Hello(toga.App):\r\n\r\n def startup(self):\r\n button = toga.Button(\"Print\", on_press=self.on_press)\r\n self.on_press(button)\r\n\r\n self.main_window = toga.MainWindow()\r\n self.main_window.content = toga.Box(\r\n style=Pack(direction=COLUMN),\r\n children=[button]\r\n )\r\n self.main_window.show()\r\n\r\n def on_press(self, button):\r\n import traceback\r\n for name in [\"app\", \"data\", \"toga\"]:\r\n print(f\"{name}={getattr(self.paths, name)}\")\r\n\r\n\r\ndef main():\r\n return Hello()\r\n```\r\n\r\n**Environment:**\r\n - Operating System: Android\r\n - Python version: 3.8\r\n - Software versions:\r\n - Toga: 9a292f8e26\r\n\n", "before_files": [{"content": "import sys\nfrom pathlib import Path\n\nimport toga\nfrom toga import App\n\n\nclass Paths:\n # Allow instantiating Path object via the factory\n Path = Path\n\n @property\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n\n @property\n def app(self):\n try:\n return Path(sys.modules[\"__main__\"].__file__).parent\n except KeyError:\n # If we're running in test conditions,\n # there is no __main__ module.\n return Path.cwd()\n except AttributeError:\n # If we're running at an interactive prompt,\n # the __main__ module isn't file-based.\n return Path.cwd()\n\n @property\n def data(self):\n return Path(self.__context.getFilesDir().getPath())\n\n @property\n def cache(self):\n return Path(self.__context.getCacheDir().getPath())\n\n @property\n def logs(self):\n return self.data\n\n @property\n def toga(self):\n \"\"\"Return a path to a Toga resources\n \"\"\"\n return Path(toga.__file__).parent\n\n\npaths = Paths()\n", "path": "src/android/toga_android/paths.py"}]}
1,266
239
gh_patches_debug_2178
rasdani/github-patches
git_diff
rucio__rucio-1028
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move conveyor transfers to third_party_copy_operation Motivation ---------- At the moment read is used for source and write is used for destination, for both third_party_copy should be used. </issue> <code> [start of lib/rucio/vcsversion.py] 1 2 ''' 3 This file is automatically generated; Do not edit it. :) 4 ''' 5 VERSION_INFO = { 6 'final': True, 7 'version': '1.15.3', 8 'branch_nick': 'patch-0-Rucio_1_15_3_preparation', 9 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8', 10 'revno': 6357 11 } 12 [end of lib/rucio/vcsversion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py --- a/lib/rucio/vcsversion.py +++ b/lib/rucio/vcsversion.py @@ -4,8 +4,8 @@ ''' VERSION_INFO = { 'final': True, - 'version': '1.15.3', - 'branch_nick': 'patch-0-Rucio_1_15_3_preparation', - 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8', - 'revno': 6357 + 'version': '1.15.4', + 'branch_nick': 'patch-0-Rucio_1_15_4_preparation', + 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb', + 'revno': 6402 }
{"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.15.3',\n- 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n- 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n- 'revno': 6357\n+ 'version': '1.15.4',\n+ 'branch_nick': 'patch-0-Rucio_1_15_4_preparation',\n+ 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb',\n+ 'revno': 6402\n }\n", "issue": "Move conveyor transfers to third_party_copy_operation\nMotivation\r\n----------\r\nAt the moment read is used for source and write is used for destination, for both third_party_copy should be used.\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.15.3',\n 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n 'revno': 6357\n}\n", "path": "lib/rucio/vcsversion.py"}]}
710
248
gh_patches_debug_16727
rasdani/github-patches
git_diff
learningequality__kolibri-6191
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> recipients column in coach tools in not accurate ### Observed behavior recipients column always indicates assignment to the entire class, even when that's not true: ![groups](https://user-images.githubusercontent.com/2367265/69926711-644bce80-146a-11ea-8f2b-82ec444fb367.gif) ### Expected behavior recipients column should be accurate ### User-facing consequences confusion ### Errors and logs none observed ### Steps to reproduce create a lesson and assign it to a group ### Context 0.13.0 beta 3 </issue> <code> [start of kolibri/core/lessons/viewsets.py] 1 import json 2 from functools import partial 3 from itertools import chain 4 5 from django.db import connection 6 from django.db.models import CharField 7 from django_filters.rest_framework import DjangoFilterBackend 8 9 from .serializers import LessonSerializer 10 from kolibri.core.api import ValuesViewset 11 from kolibri.core.auth.api import KolibriAuthPermissions 12 from kolibri.core.auth.api import KolibriAuthPermissionsFilter 13 from kolibri.core.lessons.models import Lesson 14 from kolibri.core.lessons.models import LessonAssignment 15 from kolibri.core.query import ArrayAgg 16 from kolibri.core.query import GroupConcat 17 from kolibri.core.query import process_uuid_aggregate 18 19 20 def _ensure_raw_dict(d): 21 if hasattr(d, "dict"): 22 d = d.dict() 23 return dict(d) 24 25 26 class LessonPermissions(KolibriAuthPermissions): 27 # Overrides the default validator to sanitize the Lesson POST Payload 28 # before validation 29 def validator(self, request, view, datum): 30 model = view.get_serializer_class().Meta.model 31 validated_data = view.get_serializer().to_internal_value( 32 _ensure_raw_dict(datum) 33 ) 34 # Cannot have create assignments without creating the Lesson first, 35 # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list 36 validated_data.pop("lesson_assignments") 37 return request.user.can_create(model, validated_data) 38 39 40 def _map_lesson_classroom(item): 41 return { 42 "id": item.pop("collection__id"), 43 "name": item.pop("collection__name"), 44 "parent": item.pop("collection__parent_id"), 45 } 46 47 48 class LessonViewset(ValuesViewset): 49 serializer_class = LessonSerializer 50 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend) 51 filter_fields = ("collection",) 52 permission_classes = (LessonPermissions,) 53 queryset = Lesson.objects.all().order_by("-date_created") 54 55 values = ( 56 "id", 57 "title", 58 "description", 59 "resources", 60 "is_active", 61 "collection", # classroom 62 "collection__id", 63 "collection__name", 64 "collection__parent_id", 65 "created_by", 66 "assignment_ids", 67 ) 68 69 field_map = { 70 "classroom": _map_lesson_classroom, 71 "resources": lambda x: json.loads(x["resources"]), 72 "assignment_ids": partial(process_uuid_aggregate, key="assignment_ids"), 73 } 74 75 def consolidate(self, items): 76 assignment_ids = [] 77 for item in items: 78 assignment_ids.extend(item["assignment_ids"]) 79 assignments = LessonAssignment.objects.filter(id__in=assignment_ids) 80 if connection.vendor == "postgresql" and ArrayAgg is not None: 81 assignments = assignments.annotate( 82 learner_ids=ArrayAgg("collection__membership__user__id"), 83 ) 84 85 def _process_item(item): 86 item["learner_ids"] = map( 87 lambda x: x.hex, filter(lambda x: x, item["learner_ids"]) 88 ) 89 return item 90 91 else: 92 assignments = assignments.values("id").annotate( 93 learner_ids=GroupConcat( 94 "collection__membership__user__id", output_field=CharField(), 95 ), 96 ) 97 98 def _process_item(item): 99 item["learner_ids"] = ( 100 item["learner_ids"].split(",") if item["learner_ids"] else [] 101 ) 102 return item 103 104 assignments = { 105 a["id"]: _process_item(a) 106 for a in assignments.values( 107 "id", "collection", "collection__kind", "learner_ids", "assigned_by" 108 ) 109 } 110 for item in items: 111 item_ids = item.pop("assignment_ids") 112 item["lesson_assignments"] = [assignments[a] for a in item_ids] 113 item["learner_ids"] = list( 114 set( 115 chain.from_iterable( 116 a.pop("learner_ids") for a in item["lesson_assignments"] 117 ) 118 ) 119 ) 120 return items 121 122 def annotate_queryset(self, queryset): 123 if connection.vendor == "postgresql" and ArrayAgg is not None: 124 return queryset.annotate(assignment_ids=ArrayAgg("lesson_assignments__id"),) 125 return queryset.values("id").annotate( 126 assignment_ids=GroupConcat( 127 "lesson_assignments__id", output_field=CharField(), 128 ), 129 ) 130 [end of kolibri/core/lessons/viewsets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/lessons/viewsets.py b/kolibri/core/lessons/viewsets.py --- a/kolibri/core/lessons/viewsets.py +++ b/kolibri/core/lessons/viewsets.py @@ -4,6 +4,7 @@ from django.db import connection from django.db.models import CharField +from django.db.models import F from django_filters.rest_framework import DjangoFilterBackend from .serializers import LessonSerializer @@ -104,7 +105,11 @@ assignments = { a["id"]: _process_item(a) for a in assignments.values( - "id", "collection", "collection__kind", "learner_ids", "assigned_by" + "id", + "collection", + "learner_ids", + "assigned_by", + collection_kind=F("collection__kind"), ) } for item in items:
{"golden_diff": "diff --git a/kolibri/core/lessons/viewsets.py b/kolibri/core/lessons/viewsets.py\n--- a/kolibri/core/lessons/viewsets.py\n+++ b/kolibri/core/lessons/viewsets.py\n@@ -4,6 +4,7 @@\n \n from django.db import connection\n from django.db.models import CharField\n+from django.db.models import F\n from django_filters.rest_framework import DjangoFilterBackend\n \n from .serializers import LessonSerializer\n@@ -104,7 +105,11 @@\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n- \"id\", \"collection\", \"collection__kind\", \"learner_ids\", \"assigned_by\"\n+ \"id\",\n+ \"collection\",\n+ \"learner_ids\",\n+ \"assigned_by\",\n+ collection_kind=F(\"collection__kind\"),\n )\n }\n for item in items:\n", "issue": "recipients column in coach tools in not accurate\n\r\n### Observed behavior\r\n\r\nrecipients column always indicates assignment to the entire class, even when that's not true:\r\n\r\n![groups](https://user-images.githubusercontent.com/2367265/69926711-644bce80-146a-11ea-8f2b-82ec444fb367.gif)\r\n\r\n\r\n### Expected behavior\r\n\r\nrecipients column should be accurate\r\n\r\n### User-facing consequences\r\n\r\nconfusion\r\n\r\n### Errors and logs\r\n\r\nnone observed\r\n\r\n### Steps to reproduce\r\n\r\ncreate a lesson and assign it to a group\r\n\r\n### Context\r\n\r\n0.13.0 beta 3\r\n\n", "before_files": [{"content": "import json\nfrom functools import partial\nfrom itertools import chain\n\nfrom django.db import connection\nfrom django.db.models import CharField\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom .serializers import LessonSerializer\nfrom kolibri.core.api import ValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.lessons.models import LessonAssignment\nfrom kolibri.core.query import ArrayAgg\nfrom kolibri.core.query import GroupConcat\nfrom kolibri.core.query import process_uuid_aggregate\n\n\ndef _ensure_raw_dict(d):\n if hasattr(d, \"dict\"):\n d = d.dict()\n return dict(d)\n\n\nclass LessonPermissions(KolibriAuthPermissions):\n # Overrides the default validator to sanitize the Lesson POST Payload\n # before validation\n def validator(self, request, view, datum):\n model = view.get_serializer_class().Meta.model\n validated_data = view.get_serializer().to_internal_value(\n _ensure_raw_dict(datum)\n )\n # Cannot have create assignments without creating the Lesson first,\n # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list\n validated_data.pop(\"lesson_assignments\")\n return request.user.can_create(model, validated_data)\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LessonViewset(ValuesViewset):\n serializer_class = LessonSerializer\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n filter_fields = (\"collection\",)\n permission_classes = (LessonPermissions,)\n queryset = Lesson.objects.all().order_by(\"-date_created\")\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\", # classroom\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n \"created_by\",\n \"assignment_ids\",\n )\n\n field_map = {\n \"classroom\": _map_lesson_classroom,\n \"resources\": lambda x: json.loads(x[\"resources\"]),\n \"assignment_ids\": partial(process_uuid_aggregate, key=\"assignment_ids\"),\n }\n\n def consolidate(self, items):\n assignment_ids = []\n for item in items:\n assignment_ids.extend(item[\"assignment_ids\"])\n assignments = LessonAssignment.objects.filter(id__in=assignment_ids)\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n assignments = assignments.annotate(\n learner_ids=ArrayAgg(\"collection__membership__user__id\"),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = map(\n lambda x: x.hex, filter(lambda x: x, item[\"learner_ids\"])\n )\n return item\n\n else:\n assignments = assignments.values(\"id\").annotate(\n learner_ids=GroupConcat(\n \"collection__membership__user__id\", output_field=CharField(),\n ),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = (\n item[\"learner_ids\"].split(\",\") if item[\"learner_ids\"] else []\n )\n return item\n\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n \"id\", \"collection\", \"collection__kind\", \"learner_ids\", \"assigned_by\"\n )\n }\n for item in items:\n item_ids = item.pop(\"assignment_ids\")\n item[\"lesson_assignments\"] = [assignments[a] for a in item_ids]\n item[\"learner_ids\"] = list(\n set(\n chain.from_iterable(\n a.pop(\"learner_ids\") for a in item[\"lesson_assignments\"]\n )\n )\n )\n return items\n\n def annotate_queryset(self, queryset):\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n return queryset.annotate(assignment_ids=ArrayAgg(\"lesson_assignments__id\"),)\n return queryset.values(\"id\").annotate(\n assignment_ids=GroupConcat(\n \"lesson_assignments__id\", output_field=CharField(),\n ),\n )\n", "path": "kolibri/core/lessons/viewsets.py"}]}
1,894
199
gh_patches_debug_26403
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-2188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set version for develop branch. According to [PEP 440](https://www.python.org/dev/peps/pep-0440/#developmental-releases), we should set version `X.YaN.devM`. </issue> <code> [start of setup_preprocessing.py] 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from setuptools import find_packages, setup 15 16 with open("elasticdl_preprocessing/requirements.txt") as f: 17 required_deps = f.read().splitlines() 18 19 extras = {} 20 with open("elasticdl_preprocessing/requirements-dev.txt") as f: 21 extras["develop"] = f.read().splitlines() 22 23 setup( 24 name="elasticdl_preprocessing", 25 version="develop", 26 description="A feature preprocessing library.", 27 long_description="This is an extension of the native Keras Preprocessing" 28 " Layers and Feature Column API from TensorFlow. We can develop our model" 29 " using the native high-level API from TensorFlow and our library." 30 " We can train this model using native TensorFlow or ElasticDL.", 31 long_description_content_type="text/markdown", 32 author="Ant Financial", 33 url="https://elasticdl.org", 34 install_requires=required_deps, 35 extras_require=extras, 36 python_requires=">=3.5", 37 packages=find_packages( 38 include=["elasticdl_preprocessing*"], exclude=["*test*"] 39 ), 40 package_data={"": ["requirements.txt"]}, 41 ) 42 [end of setup_preprocessing.py] [start of setup.py] 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from setuptools import find_packages, setup 15 16 with open("elasticdl/requirements.txt") as f: 17 required_deps = f.read().splitlines() 18 required_deps.append("elasticdl_client") 19 required_deps.append("elasticdl_preprocessing") 20 21 extras = {} 22 with open("elasticdl/requirements-dev.txt") as f: 23 extras["develop"] = f.read().splitlines() 24 25 setup( 26 name="elasticdl", 27 version="develop", 28 description="A Kubernetes-native Deep Learning Framework", 29 long_description="ElasticDL is a Kubernetes-native deep learning framework" 30 " built on top of TensorFlow 2.0 that supports" 31 " fault-tolerance and elastic scheduling.", 32 long_description_content_type="text/markdown", 33 author="Ant Financial", 34 url="https://elasticdl.org", 35 install_requires=required_deps, 36 extras_require=extras, 37 python_requires=">=3.5", 38 packages=find_packages( 39 exclude=[ 40 "*test*", 41 "elasticdl_client*", 42 "elasticdl_preprocessing*", 43 "model_zoo*", 44 ] 45 ), 46 package_data={ 47 "": [ 48 "proto/*.proto", 49 "docker/*", 50 "Makefile", 51 "requirements.txt", 52 "go/bin/elasticdl_ps", 53 "go/pkg/kernel/capi/*", 54 ] 55 }, 56 ) 57 [end of setup.py] [start of setup_client.py] 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from setuptools import find_packages, setup 15 16 with open("elasticdl_client/requirements.txt") as f: 17 required_deps = f.read().splitlines() 18 19 setup( 20 name="elasticdl_client", 21 version="develop", 22 description="The client command line tool for ElasticDL.", 23 long_description="ElasticDL Client is the client command line tool for" 24 " ElasticDL. Users can use it to submit distributed ElasticDL jobs to" 25 " a Kubernetes cluster. It also provides an easy way to build and push" 26 " Docker images for distributed ElasticDL jobs.", 27 long_description_content_type="text/markdown", 28 author="Ant Financial", 29 url="https://elasticdl.org", 30 install_requires=required_deps, 31 python_requires=">=3.5", 32 packages=find_packages(include=["elasticdl_client*"]), 33 package_data={"": ["requirements.txt"]}, 34 entry_points={"console_scripts": ["elasticdl=elasticdl_client.main:main"]}, 35 ) 36 [end of setup_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ setup( name="elasticdl", - version="develop", + version="0.2.0rc3.dev0", description="A Kubernetes-native Deep Learning Framework", long_description="ElasticDL is a Kubernetes-native deep learning framework" " built on top of TensorFlow 2.0 that supports" diff --git a/setup_client.py b/setup_client.py --- a/setup_client.py +++ b/setup_client.py @@ -18,7 +18,7 @@ setup( name="elasticdl_client", - version="develop", + version="0.2.0rc3.dev0", description="The client command line tool for ElasticDL.", long_description="ElasticDL Client is the client command line tool for" " ElasticDL. Users can use it to submit distributed ElasticDL jobs to" diff --git a/setup_preprocessing.py b/setup_preprocessing.py --- a/setup_preprocessing.py +++ b/setup_preprocessing.py @@ -22,7 +22,7 @@ setup( name="elasticdl_preprocessing", - version="develop", + version="0.2.0rc3.dev0", description="A feature preprocessing library.", long_description="This is an extension of the native Keras Preprocessing" " Layers and Feature Column API from TensorFlow. We can develop our model"
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n \n setup(\n name=\"elasticdl\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\ndiff --git a/setup_client.py b/setup_client.py\n--- a/setup_client.py\n+++ b/setup_client.py\n@@ -18,7 +18,7 @@\n \n setup(\n name=\"elasticdl_client\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\ndiff --git a/setup_preprocessing.py b/setup_preprocessing.py\n--- a/setup_preprocessing.py\n+++ b/setup_preprocessing.py\n@@ -22,7 +22,7 @@\n \n setup(\n name=\"elasticdl_preprocessing\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n", "issue": "Set version for develop branch.\nAccording to [PEP 440](https://www.python.org/dev/peps/pep-0440/#developmental-releases), we should set version `X.YaN.devM`.\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_preprocessing/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nextras = {}\nwith open(\"elasticdl_preprocessing/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_preprocessing\",\n version=\"develop\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n \" using the native high-level API from TensorFlow and our library.\"\n \" We can train this model using native TensorFlow or ElasticDL.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n include=[\"elasticdl_preprocessing*\"], exclude=[\"*test*\"]\n ),\n package_data={\"\": [\"requirements.txt\"]},\n)\n", "path": "setup_preprocessing.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n required_deps = f.read().splitlines()\nrequired_deps.append(\"elasticdl_client\")\nrequired_deps.append(\"elasticdl_preprocessing\")\n\nextras = {}\nwith open(\"elasticdl/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"develop\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n exclude=[\n \"*test*\",\n \"elasticdl_client*\",\n \"elasticdl_preprocessing*\",\n \"model_zoo*\",\n ]\n ),\n package_data={\n \"\": [\n \"proto/*.proto\",\n \"docker/*\",\n \"Makefile\",\n \"requirements.txt\",\n \"go/bin/elasticdl_ps\",\n \"go/pkg/kernel/capi/*\",\n ]\n },\n)\n", "path": "setup.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_client/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_client\",\n version=\"develop\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\n \" a Kubernetes cluster. It also provides an easy way to build and push\"\n \" Docker images for distributed ElasticDL jobs.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n python_requires=\">=3.5\",\n packages=find_packages(include=[\"elasticdl_client*\"]),\n package_data={\"\": [\"requirements.txt\"]},\n entry_points={\"console_scripts\": [\"elasticdl=elasticdl_client.main:main\"]},\n)\n", "path": "setup_client.py"}]}
1,962
323
gh_patches_debug_1874
rasdani/github-patches
git_diff
cloudtools__troposphere-120
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update metadata to include property keys Within the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary: ``` "Resources": { "MyInstance": { "Type": "AWS::EC2::Instance", "Metadata" : { "AWS::CloudFormation::Init" : { "config" : { "packages" : { : }, "groups" : { : }, "users" : { : }, "sources" : { : }, "files" : { : }, "commands" : { : }, "services" : { : } } } }, "Properties": { : } } } ``` Currently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files' </issue> <code> [start of troposphere/cloudformation.py] 1 # Copyright (c) 2013, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSHelperFn, AWSObject, AWSProperty, Ref 7 from .validators import integer 8 9 10 class Stack(AWSObject): 11 type = "AWS::CloudFormation::Stack" 12 13 props = { 14 'TemplateURL': (basestring, True), 15 'TimeoutInMinutes': (integer, False), 16 'Parameters': (dict, False), 17 } 18 19 20 class WaitCondition(AWSObject): 21 type = "AWS::CloudFormation::WaitCondition" 22 23 props = { 24 'Count': (integer, False), 25 'Handle': (Ref, True), 26 'Timeout': (integer, True), 27 } 28 29 30 class WaitConditionHandle(AWSObject): 31 type = "AWS::CloudFormation::WaitConditionHandle" 32 33 props = {} 34 35 36 class InitFileContext(AWSHelperFn): 37 def __init__(self, data): 38 self.data = data 39 40 def JSONrepr(self): 41 return self.data 42 43 44 class InitFile(AWSProperty): 45 props = { 46 'content': (basestring, True), 47 'mode': (basestring, False), 48 'owner': (basestring, False), 49 'group': (basestring, False), 50 'context': (InitFileContext, False) 51 } 52 53 54 class InitFiles(AWSHelperFn): 55 def __init__(self, data): 56 self.validate(data) 57 self.data = data 58 59 def validate(self, data): 60 for k in data: 61 if not isinstance(data[k], InitFile): 62 raise ValueError("File '" + k + "' must be of type InitFile") 63 64 def JSONrepr(self): 65 return self.data 66 67 68 class InitConfig(AWSProperty): 69 props = { 70 'files': (dict, False) 71 } 72 73 74 class Init(AWSHelperFn): 75 def __init__(self, data): 76 self.validate(data) 77 self.data = {"AWS::CloudFormation::Init": data} 78 79 def validate(self, data): 80 if 'config' not in data: 81 raise ValueError('config property is required') 82 if not isinstance(data['config'], InitConfig): 83 raise ValueError( 84 'config property must be of type autoscaling.InitConfig' 85 ) 86 87 def JSONrepr(self): 88 return self.data 89 [end of troposphere/cloudformation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py --- a/troposphere/cloudformation.py +++ b/troposphere/cloudformation.py @@ -67,7 +67,13 @@ class InitConfig(AWSProperty): props = { - 'files': (dict, False) + 'groups': (dict, False), + 'users': (dict, False), + 'sources': (dict, False), + 'packages': (dict, False), + 'files': (dict, False), + 'commands': (dict, False), + 'services': (dict, False) }
{"golden_diff": "diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py\n--- a/troposphere/cloudformation.py\n+++ b/troposphere/cloudformation.py\n@@ -67,7 +67,13 @@\n \n class InitConfig(AWSProperty):\n props = {\n- 'files': (dict, False)\n+ 'groups': (dict, False),\n+ 'users': (dict, False),\n+ 'sources': (dict, False),\n+ 'packages': (dict, False),\n+ 'files': (dict, False),\n+ 'commands': (dict, False),\n+ 'services': (dict, False)\n }\n", "issue": "Update metadata to include property keys\nWithin the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary: \n\n```\n\"Resources\": {\n \"MyInstance\": {\n \"Type\": \"AWS::EC2::Instance\",\n \"Metadata\" : {\n \"AWS::CloudFormation::Init\" : {\n \"config\" : {\n \"packages\" : {\n :\n },\n \"groups\" : {\n :\n },\n \"users\" : {\n :\n },\n \"sources\" : {\n :\n },\n \"files\" : {\n :\n },\n \"commands\" : {\n :\n },\n \"services\" : {\n :\n }\n }\n }\n },\n \"Properties\": {\n :\n }\n }\n}\n```\n\nCurrently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files'\n\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n 'TemplateURL': (basestring, True),\n 'TimeoutInMinutes': (integer, False),\n 'Parameters': (dict, False),\n }\n\n\nclass WaitCondition(AWSObject):\n type = \"AWS::CloudFormation::WaitCondition\"\n\n props = {\n 'Count': (integer, False),\n 'Handle': (Ref, True),\n 'Timeout': (integer, True),\n }\n\n\nclass WaitConditionHandle(AWSObject):\n type = \"AWS::CloudFormation::WaitConditionHandle\"\n\n props = {}\n\n\nclass InitFileContext(AWSHelperFn):\n def __init__(self, data):\n self.data = data\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitFile(AWSProperty):\n props = {\n 'content': (basestring, True),\n 'mode': (basestring, False),\n 'owner': (basestring, False),\n 'group': (basestring, False),\n 'context': (InitFileContext, False)\n }\n\n\nclass InitFiles(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = data\n\n def validate(self, data):\n for k in data:\n if not isinstance(data[k], InitFile):\n raise ValueError(\"File '\" + k + \"' must be of type InitFile\")\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitConfig(AWSProperty):\n props = {\n 'files': (dict, False)\n }\n\n\nclass Init(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = {\"AWS::CloudFormation::Init\": data}\n\n def validate(self, data):\n if 'config' not in data:\n raise ValueError('config property is required')\n if not isinstance(data['config'], InitConfig):\n raise ValueError(\n 'config property must be of type autoscaling.InitConfig'\n )\n\n def JSONrepr(self):\n return self.data\n", "path": "troposphere/cloudformation.py"}]}
1,420
144
gh_patches_debug_21385
rasdani/github-patches
git_diff
joke2k__faker-529
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Decrease binary dummy length `fake.binary(length=1048576)` generate too huge binary data but it is useless at docs build time. I tested two case, 1048576 and 1024. ``` console (faker) [item4@item4-mbp docs]$ echo "1048576" 1048576 (faker) [item4@item4-mbp docs]$ time make html sphinx-build -b html -d _build/doctrees . _build/html Running Sphinx v1.4.8 loading pickled environment... done building [mo]: targets for 0 po files that are out of date building [html]: targets for 61 source files that are out of date updating environment: 0 added, 61 changed, 0 removed reading sources... [100%] providers/faker.providers.user_agent looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done writing output... [100%] providers/faker.providers.user_agent generating indices... genindex writing additional pages... search copying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist done copying extra files... done dumping search index in English (code: en) ... done dumping object inventory... done build succeeded, 1 warning. Build finished. The HTML pages are in _build/html. real 3m24.875s user 3m3.222s sys 0m10.986s (faker) [item4@item4-mbp docs]$ echo "1024" 1024 (faker) [item4@item4-mbp docs]$ time make html sphinx-build -b html -d _build/doctrees . _build/html Running Sphinx v1.4.8 loading pickled environment... done building [mo]: targets for 0 po files that are out of date building [html]: targets for 61 source files that are out of date updating environment: 0 added, 61 changed, 0 removed reading sources... [100%] providers/faker.providers.user_agent looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done writing output... [100%] providers/faker.providers.user_agent generating indices... genindex writing additional pages... search copying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist done copying extra files... done dumping search index in English (code: en) ... done dumping object inventory... done build succeeded, 1 warning. Build finished. The HTML pages are in _build/html. real 0m46.086s user 0m43.772s sys 0m1.479s ``` Does docs really need 1048576 length binary? </issue> <code> [start of faker/documentor.py] 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 import inspect 6 7 from faker import utils 8 9 10 class Documentor(object): 11 12 def __init__(self, generator): 13 """ 14 :param generator: a localized Generator with providers filled, 15 for which to write the documentation 16 :type generator: faker.Generator() 17 """ 18 self.generator = generator 19 self.max_name_len = 0 20 self.already_generated = [] 21 22 def get_formatters(self, locale=None, excludes=None, **kwargs): 23 24 self.max_name_len = 0 25 self.already_generated = [] if excludes is None else excludes[:] 26 formatters = [] 27 providers = self.generator.get_providers() 28 for provider in providers[::-1]: # reverse 29 if locale and provider.__lang__ != locale: 30 continue 31 formatters.append( 32 (provider, self.get_provider_formatters(provider, **kwargs)) 33 ) 34 return formatters 35 36 def get_provider_formatters(self, provider, prefix='fake.', 37 with_args=True, with_defaults=True): 38 39 formatters = {} 40 41 for name, method in inspect.getmembers(provider, inspect.ismethod): 42 43 # skip 'private' method and inherited methods 44 if name.startswith('_') or name in self.already_generated: 45 continue 46 47 arguments = [] 48 49 if with_args: 50 # retrieve all parameter 51 argspec = inspect.getargspec(method) 52 53 lst = [x for x in argspec.args if x not in ['self', 'cls']] 54 for i, arg in enumerate(lst): 55 56 if argspec.defaults and with_defaults: 57 58 try: 59 default = argspec.defaults[i] 60 if utils.is_string(default): 61 default = utils.quote(default) 62 else: 63 # TODO check default type 64 default = "{0}".format(default) 65 66 arg = "{0}={1}".format(arg, default) 67 68 except IndexError: 69 pass 70 71 arguments.append(arg) 72 if with_args == 'first': 73 break 74 75 if with_args != 'first': 76 if argspec.varargs: 77 arguments.append('*' + argspec.varargs) 78 if argspec.keywords: 79 arguments.append('**' + argspec.keywords) 80 81 # build fake method signature 82 signature = "{0}{1}({2})".format(prefix, 83 name, 84 ", ".join(arguments)) 85 86 # make a fake example 87 example = self.generator.format(name) 88 89 formatters[signature] = example 90 91 self.max_name_len = max(self.max_name_len, len(signature)) 92 self.already_generated.append(name) 93 94 return formatters 95 96 @staticmethod 97 def get_provider_name(provider_class): 98 return provider_class.__provider__ 99 [end of faker/documentor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/documentor.py b/faker/documentor.py --- a/faker/documentor.py +++ b/faker/documentor.py @@ -39,12 +39,16 @@ formatters = {} for name, method in inspect.getmembers(provider, inspect.ismethod): - # skip 'private' method and inherited methods if name.startswith('_') or name in self.already_generated: continue arguments = [] + faker_args = [] + faker_kwargs = {} + + if name == 'binary': + faker_kwargs['length'] = 1024 if with_args: # retrieve all parameter @@ -84,7 +88,7 @@ ", ".join(arguments)) # make a fake example - example = self.generator.format(name) + example = self.generator.format(name, *faker_args, **faker_kwargs) formatters[signature] = example
{"golden_diff": "diff --git a/faker/documentor.py b/faker/documentor.py\n--- a/faker/documentor.py\n+++ b/faker/documentor.py\n@@ -39,12 +39,16 @@\n formatters = {}\n \n for name, method in inspect.getmembers(provider, inspect.ismethod):\n-\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n \n arguments = []\n+ faker_args = []\n+ faker_kwargs = {}\n+\n+ if name == 'binary':\n+ faker_kwargs['length'] = 1024\n \n if with_args:\n # retrieve all parameter\n@@ -84,7 +88,7 @@\n \", \".join(arguments))\n \n # make a fake example\n- example = self.generator.format(name)\n+ example = self.generator.format(name, *faker_args, **faker_kwargs)\n \n formatters[signature] = example\n", "issue": "Decrease binary dummy length\n`fake.binary(length=1048576)` generate too huge binary data but it is useless at docs build time.\n\nI tested two case, 1048576 and 1024.\n\n``` console\n(faker) [item4@item4-mbp docs]$ echo \"1048576\"\n1048576\n(faker) [item4@item4-mbp docs]$ time make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.4.8\nloading pickled environment... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 61 source files that are out of date\nupdating environment: 0 added, 61 changed, 0 removed\nreading sources... [100%] providers/faker.providers.user_agent\nlooking for now-outdated files... none found\npickling environment... done\nchecking consistency... done\npreparing documents... done\nwriting output... [100%] providers/faker.providers.user_agent\ngenerating indices... genindex\nwriting additional pages... search\ncopying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist\ndone\ncopying extra files... done\ndumping search index in English (code: en) ... done\ndumping object inventory... done\nbuild succeeded, 1 warning.\n\nBuild finished. The HTML pages are in _build/html.\n\nreal 3m24.875s\nuser 3m3.222s\nsys 0m10.986s\n(faker) [item4@item4-mbp docs]$ echo \"1024\"\n1024\n(faker) [item4@item4-mbp docs]$ time make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.4.8\nloading pickled environment... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 61 source files that are out of date\nupdating environment: 0 added, 61 changed, 0 removed\nreading sources... [100%] providers/faker.providers.user_agent\nlooking for now-outdated files... none found\npickling environment... done\nchecking consistency... done\npreparing documents... done\nwriting output... [100%] providers/faker.providers.user_agent\ngenerating indices... genindex\nwriting additional pages... search\ncopying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist\ndone\ncopying extra files... done\ndumping search index in English (code: en) ... done\ndumping object inventory... done\nbuild succeeded, 1 warning.\n\nBuild finished. The HTML pages are in _build/html.\n\nreal 0m46.086s\nuser 0m43.772s\nsys 0m1.479s\n```\n\nDoes docs really need 1048576 length binary?\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\nfrom faker import utils\n\n\nclass Documentor(object):\n\n def __init__(self, generator):\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len = 0\n self.already_generated = []\n\n def get_formatters(self, locale=None, excludes=None, **kwargs):\n\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs))\n )\n return formatters\n\n def get_provider_formatters(self, provider, prefix='fake.',\n with_args=True, with_defaults=True):\n\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n\n arguments = []\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getargspec(method)\n\n lst = [x for x in argspec.args if x not in ['self', 'cls']]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if utils.is_string(default):\n default = utils.quote(default)\n else:\n # TODO check default type\n default = \"{0}\".format(default)\n\n arg = \"{0}={1}\".format(arg, default)\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == 'first':\n break\n\n if with_args != 'first':\n if argspec.varargs:\n arguments.append('*' + argspec.varargs)\n if argspec.keywords:\n arguments.append('**' + argspec.keywords)\n\n # build fake method signature\n signature = \"{0}{1}({2})\".format(prefix,\n name,\n \", \".join(arguments))\n\n # make a fake example\n example = self.generator.format(name)\n\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class):\n return provider_class.__provider__\n", "path": "faker/documentor.py"}]}
2,008
209
gh_patches_debug_64631
rasdani/github-patches
git_diff
quantumlib__Cirq-2370
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Escape unicode characters when producing latex Currently, the qcircuit to latex code passes through unicode characters like ↺ ↻. LaTeX may not be happy about this. We should replace the unicode characters with some that LaTeX is happy with. Or perhaps throw an error? </issue> <code> [start of cirq/contrib/qcircuit/qcircuit_pdf.py] 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # coverage: ignore 16 17 import errno 18 import os 19 20 from pylatex import Document, NoEscape, Package 21 22 from cirq import circuits 23 from cirq.contrib.qcircuit.qcircuit_diagram import ( 24 circuit_to_latex_using_qcircuit) 25 26 27 def circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit, 28 filepath: str, 29 pdf_kwargs=None, 30 qcircuit_kwargs=None, 31 clean_ext=('dvi', 'ps'), 32 documentclass='article'): 33 """Compiles the QCircuit-based latex diagram of the given circuit. 34 35 Args: 36 circuit: The circuit to produce a pdf of. 37 filepath: Where to output the pdf. 38 pdf_kwargs: The arguments to pass to generate_pdf. 39 qcircuit_kwargs: The arguments to pass to 40 circuit_to_latex_using_qcircuit. 41 clean_ext: The file extensions to clean up after compilation. By 42 default, latexmk is used with the '-pdfps' flag, which produces 43 intermediary dvi and ps files. 44 documentclass: The documentclass of the latex file. 45 """ 46 pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'], 47 **({} if pdf_kwargs is None else pdf_kwargs)} 48 qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs 49 tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs) 50 doc = Document(documentclass=documentclass, document_options='dvips') 51 doc.packages.append(Package('amsmath')) 52 doc.packages.append(Package('qcircuit')) 53 doc.append(NoEscape(tex)) 54 doc.generate_pdf(filepath, **pdf_kwargs) 55 for ext in clean_ext: 56 try: 57 os.remove(filepath + '.' + ext) 58 except (OSError, IOError) as e: 59 if e.errno != errno.ENOENT: 60 raise 61 [end of cirq/contrib/qcircuit/qcircuit_pdf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cirq/contrib/qcircuit/qcircuit_pdf.py b/cirq/contrib/qcircuit/qcircuit_pdf.py --- a/cirq/contrib/qcircuit/qcircuit_pdf.py +++ b/cirq/contrib/qcircuit/qcircuit_pdf.py @@ -50,6 +50,7 @@ doc = Document(documentclass=documentclass, document_options='dvips') doc.packages.append(Package('amsmath')) doc.packages.append(Package('qcircuit')) + doc.preamble.append(Package('inputenc', options=['utf8'])) doc.append(NoEscape(tex)) doc.generate_pdf(filepath, **pdf_kwargs) for ext in clean_ext:
{"golden_diff": "diff --git a/cirq/contrib/qcircuit/qcircuit_pdf.py b/cirq/contrib/qcircuit/qcircuit_pdf.py\n--- a/cirq/contrib/qcircuit/qcircuit_pdf.py\n+++ b/cirq/contrib/qcircuit/qcircuit_pdf.py\n@@ -50,6 +50,7 @@\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n+ doc.preamble.append(Package('inputenc', options=['utf8']))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n", "issue": "Escape unicode characters when producing latex\nCurrently, the qcircuit to latex code passes through unicode characters like \u21ba \u21bb. LaTeX may not be happy about this. We should replace the unicode characters with some that LaTeX is happy with. Or perhaps throw an error?\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coverage: ignore\n\nimport errno\nimport os\n\nfrom pylatex import Document, NoEscape, Package\n\nfrom cirq import circuits\nfrom cirq.contrib.qcircuit.qcircuit_diagram import (\n circuit_to_latex_using_qcircuit)\n\n\ndef circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit,\n filepath: str,\n pdf_kwargs=None,\n qcircuit_kwargs=None,\n clean_ext=('dvi', 'ps'),\n documentclass='article'):\n \"\"\"Compiles the QCircuit-based latex diagram of the given circuit.\n\n Args:\n circuit: The circuit to produce a pdf of.\n filepath: Where to output the pdf.\n pdf_kwargs: The arguments to pass to generate_pdf.\n qcircuit_kwargs: The arguments to pass to\n circuit_to_latex_using_qcircuit.\n clean_ext: The file extensions to clean up after compilation. By\n default, latexmk is used with the '-pdfps' flag, which produces\n intermediary dvi and ps files.\n documentclass: The documentclass of the latex file.\n \"\"\"\n pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'],\n **({} if pdf_kwargs is None else pdf_kwargs)}\n qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs\n tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs)\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n try:\n os.remove(filepath + '.' + ext)\n except (OSError, IOError) as e:\n if e.errno != errno.ENOENT:\n raise\n", "path": "cirq/contrib/qcircuit/qcircuit_pdf.py"}]}
1,251
153
gh_patches_debug_38801
rasdani/github-patches
git_diff
TheAlgorithms__Python-9083
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Returning index instead of boolean when knuth_morris_pratt matches. In [knuth morris pratt algorithm](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py), I think it's better to return the "index" of the matched substring in case of success, and `-1` in case of failure, so that it would be consistent with the `str.find()` in Python. `True` and `False` aren't that helpful. </issue> <code> [start of strings/knuth_morris_pratt.py] 1 from __future__ import annotations 2 3 4 def kmp(pattern: str, text: str) -> bool: 5 """ 6 The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text 7 with complexity O(n + m) 8 9 1) Preprocess pattern to identify any suffixes that are identical to prefixes 10 11 This tells us where to continue from if we get a mismatch between a character 12 in our pattern and the text. 13 14 2) Step through the text one character at a time and compare it to a character in 15 the pattern updating our location within the pattern if necessary 16 17 """ 18 19 # 1) Construct the failure array 20 failure = get_failure_array(pattern) 21 22 # 2) Step through text searching for pattern 23 i, j = 0, 0 # index into text, pattern 24 while i < len(text): 25 if pattern[j] == text[i]: 26 if j == (len(pattern) - 1): 27 return True 28 j += 1 29 30 # if this is a prefix in our pattern 31 # just go back far enough to continue 32 elif j > 0: 33 j = failure[j - 1] 34 continue 35 i += 1 36 return False 37 38 39 def get_failure_array(pattern: str) -> list[int]: 40 """ 41 Calculates the new index we should go to if we fail a comparison 42 :param pattern: 43 :return: 44 """ 45 failure = [0] 46 i = 0 47 j = 1 48 while j < len(pattern): 49 if pattern[i] == pattern[j]: 50 i += 1 51 elif i > 0: 52 i = failure[i - 1] 53 continue 54 j += 1 55 failure.append(i) 56 return failure 57 58 59 if __name__ == "__main__": 60 # Test 1) 61 pattern = "abc1abc12" 62 text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" 63 text2 = "alskfjaldsk23adsfabcabc" 64 assert kmp(pattern, text1) and not kmp(pattern, text2) 65 66 # Test 2) 67 pattern = "ABABX" 68 text = "ABABZABABYABABX" 69 assert kmp(pattern, text) 70 71 # Test 3) 72 pattern = "AAAB" 73 text = "ABAAAAAB" 74 assert kmp(pattern, text) 75 76 # Test 4) 77 pattern = "abcdabcy" 78 text = "abcxabcdabxabcdabcdabcy" 79 assert kmp(pattern, text) 80 81 # Test 5) 82 pattern = "aabaabaaa" 83 assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] 84 [end of strings/knuth_morris_pratt.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py --- a/strings/knuth_morris_pratt.py +++ b/strings/knuth_morris_pratt.py @@ -1,7 +1,7 @@ from __future__ import annotations -def kmp(pattern: str, text: str) -> bool: +def knuth_morris_pratt(text: str, pattern: str) -> int: """ The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text with complexity O(n + m) @@ -14,6 +14,12 @@ 2) Step through the text one character at a time and compare it to a character in the pattern updating our location within the pattern if necessary + >>> kmp = "knuth_morris_pratt" + >>> all( + ... knuth_morris_pratt(kmp, s) == kmp.find(s) + ... for s in ("kn", "h_m", "rr", "tt", "not there") + ... ) + True """ # 1) Construct the failure array @@ -24,7 +30,7 @@ while i < len(text): if pattern[j] == text[i]: if j == (len(pattern) - 1): - return True + return i - j j += 1 # if this is a prefix in our pattern @@ -33,7 +39,7 @@ j = failure[j - 1] continue i += 1 - return False + return -1 def get_failure_array(pattern: str) -> list[int]: @@ -57,27 +63,38 @@ if __name__ == "__main__": + import doctest + + doctest.testmod() + # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert kmp(pattern, text1) and not kmp(pattern, text2) + assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) # Test 3) pattern = "AAAB" text = "ABAAAAAB" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) + + # Test 5) -> Doctests + kmp = "knuth_morris_pratt" + assert all( + knuth_morris_pratt(kmp, s) == kmp.find(s) + for s in ("kn", "h_m", "rr", "tt", "not there") + ) - # Test 5) + # Test 6) pattern = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
{"golden_diff": "diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py\n--- a/strings/knuth_morris_pratt.py\n+++ b/strings/knuth_morris_pratt.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n \n-def kmp(pattern: str, text: str) -> bool:\n+def knuth_morris_pratt(text: str, pattern: str) -> int:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n@@ -14,6 +14,12 @@\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n \n+ >>> kmp = \"knuth_morris_pratt\"\n+ >>> all(\n+ ... knuth_morris_pratt(kmp, s) == kmp.find(s)\n+ ... for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n+ ... )\n+ True\n \"\"\"\n \n # 1) Construct the failure array\n@@ -24,7 +30,7 @@\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n- return True\n+ return i - j\n j += 1\n \n # if this is a prefix in our pattern\n@@ -33,7 +39,7 @@\n j = failure[j - 1]\n continue\n i += 1\n- return False\n+ return -1\n \n \n def get_failure_array(pattern: str) -> list[int]:\n@@ -57,27 +63,38 @@\n \n \n if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n+\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n- assert kmp(pattern, text1) and not kmp(pattern, text2)\n+ assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern)\n \n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n \n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n \n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n+\n+ # Test 5) -> Doctests\n+ kmp = \"knuth_morris_pratt\"\n+ assert all(\n+ knuth_morris_pratt(kmp, s) == kmp.find(s)\n+ for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n+ )\n \n- # Test 5)\n+ # Test 6)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "issue": "Returning index instead of boolean when knuth_morris_pratt matches.\nIn [knuth morris pratt algorithm](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py), I think it's better to return the \"index\" of the matched substring in case of success, and `-1` in case of failure, so that it would be consistent with the `str.find()` in Python. `True` and `False` aren't that helpful.\n", "before_files": [{"content": "from __future__ import annotations\n\n\ndef kmp(pattern: str, text: str) -> bool:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n\n 1) Preprocess pattern to identify any suffixes that are identical to prefixes\n\n This tells us where to continue from if we get a mismatch between a character\n in our pattern and the text.\n\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n\n \"\"\"\n\n # 1) Construct the failure array\n failure = get_failure_array(pattern)\n\n # 2) Step through text searching for pattern\n i, j = 0, 0 # index into text, pattern\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n return True\n j += 1\n\n # if this is a prefix in our pattern\n # just go back far enough to continue\n elif j > 0:\n j = failure[j - 1]\n continue\n i += 1\n return False\n\n\ndef get_failure_array(pattern: str) -> list[int]:\n \"\"\"\n Calculates the new index we should go to if we fail a comparison\n :param pattern:\n :return:\n \"\"\"\n failure = [0]\n i = 0\n j = 1\n while j < len(pattern):\n if pattern[i] == pattern[j]:\n i += 1\n elif i > 0:\n i = failure[i - 1]\n continue\n j += 1\n failure.append(i)\n return failure\n\n\nif __name__ == \"__main__\":\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n assert kmp(pattern, text1) and not kmp(pattern, text2)\n\n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n assert kmp(pattern, text)\n\n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n assert kmp(pattern, text)\n\n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n assert kmp(pattern, text)\n\n # Test 5)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "path": "strings/knuth_morris_pratt.py"}]}
1,436
809
gh_patches_debug_19094
rasdani/github-patches
git_diff
bridgecrewio__checkov-4875
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_300 S3 MPU lifecycle alert **Issue** Alert ID - CKV_AWS_300 Checkov sends false alert for MPU "aws_s3_bucket_lifecycle_configuration" despite having the correct rule configuration. In my scenario I'm having an empty filter in my terraform code. But as per logic in checkov code base, (checkov/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py) under scan_resource_conf method, the 'if' condition doesn't take the 'filter' condition into account. This in-turn throws a false alert. **Examples** ```hcl resource "aws_s3_bucket_lifecycle_configuration" "lifecycle_rule" { bucket = aws_s3_bucket.s3.id rule { abort_incomplete_multipart_upload { days_after_initiation = 3 } id = "MPU 3days" filter { } status = "Enabled" } } ``` **Version (please complete the following information):** - Checkov 2.3.155 (Latest) </issue> <code> [start of checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py] 1 from __future__ import annotations 2 3 from typing import Any 4 5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 6 from checkov.common.models.enums import CheckCategories, CheckResult 7 8 9 class S3AbortIncompleteUploads(BaseResourceCheck): 10 def __init__(self) -> None: 11 """ 12 If you don't set this value in a lifecycle configuration you'll end up paying for s3 13 resources you never could use 14 """ 15 name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads" 16 id = "CKV_AWS_300" 17 supported_resources = ("aws_s3_bucket_lifecycle_configuration",) 18 categories = (CheckCategories.GENERAL_SECURITY,) 19 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 20 21 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: 22 self.evaluated_keys = ["rule"] 23 rules = conf.get("rule") 24 if rules and isinstance(rules, list): 25 for idx_rule, rule in enumerate(rules): 26 if ( 27 rule.get("abort_incomplete_multipart_upload") 28 and rule.get("status") == ["Enabled"] 29 and not rule.get("filter") 30 ): 31 self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"] 32 return CheckResult.PASSED 33 34 return CheckResult.FAILED 35 36 37 check = S3AbortIncompleteUploads() 38 [end of checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py --- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py +++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py @@ -23,12 +23,13 @@ rules = conf.get("rule") if rules and isinstance(rules, list): for idx_rule, rule in enumerate(rules): - if ( - rule.get("abort_incomplete_multipart_upload") - and rule.get("status") == ["Enabled"] - and not rule.get("filter") - ): + if rule.get("abort_incomplete_multipart_upload") and rule.get("status") == ["Enabled"]: self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"] + filter = rule.get("filter") + if filter and isinstance(filter, list) and filter[0]: + # it is possible to set an empty filter, which applies then to all objects + continue + return CheckResult.PASSED return CheckResult.FAILED
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n@@ -23,12 +23,13 @@\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n- if (\n- rule.get(\"abort_incomplete_multipart_upload\")\n- and rule.get(\"status\") == [\"Enabled\"]\n- and not rule.get(\"filter\")\n- ):\n+ if rule.get(\"abort_incomplete_multipart_upload\") and rule.get(\"status\") == [\"Enabled\"]:\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n+ filter = rule.get(\"filter\")\n+ if filter and isinstance(filter, list) and filter[0]:\n+ # it is possible to set an empty filter, which applies then to all objects\n+ continue\n+\n return CheckResult.PASSED\n \n return CheckResult.FAILED\n", "issue": "CKV_AWS_300 S3 MPU lifecycle alert\n**Issue**\r\nAlert ID - CKV_AWS_300 \r\nCheckov sends false alert for MPU \"aws_s3_bucket_lifecycle_configuration\" despite having the correct rule configuration. In my scenario I'm having an empty filter in my terraform code. But as per logic in checkov code base, (checkov/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py) under scan_resource_conf method, the 'if' condition doesn't take the 'filter' condition into account. This in-turn throws a false alert.\r\n\r\n**Examples**\r\n```hcl\r\nresource \"aws_s3_bucket_lifecycle_configuration\" \"lifecycle_rule\" {\r\n bucket = aws_s3_bucket.s3.id\r\n rule {\r\n abort_incomplete_multipart_upload {\r\n days_after_initiation = 3\r\n }\r\n id = \"MPU 3days\"\r\n filter {\r\n }\r\n status = \"Enabled\"\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov 2.3.155 (Latest)\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if (\n rule.get(\"abort_incomplete_multipart_upload\")\n and rule.get(\"status\") == [\"Enabled\"]\n and not rule.get(\"filter\")\n ):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}]}
1,173
264
gh_patches_debug_53987
rasdani/github-patches
git_diff
getredash__redash-784
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'datetime.timedelta' object has no attribute 'isoformat' On the latest 0.9.2-rc: ``` [2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError("'datetime.timedelta' object has no attribute 'isoformat'",) Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task R = retval = fun(*args, **kwargs) File "/opt/redash/redash/tasks.py", line 31, in __call__ return super(BaseTask, self).__call__(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__ return self.run(*args, **kwargs) File "/opt/redash/redash/tasks.py", line 286, in execute_query data, error = query_runner.run_query(annotated_query) File "/opt/redash/redash/query_runner/pg.py", line 132, in run_query json_data = json.dumps(data, cls=JSONEncoder) File "/usr/lib/python2.7/json/__init__.py", line 250, in dumps sort_keys=sort_keys, **kw).encode(obj) File "/usr/lib/python2.7/json/encoder.py", line 207, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode return _iterencode(o, 0) File "/opt/redash/redash/utils.py", line 57, in default return o.isoformat() AttributeError: 'datetime.timedelta' object has no attribute 'isoformat' ``` </issue> <code> [start of redash/utils.py] 1 import cStringIO 2 import csv 3 import codecs 4 import decimal 5 import datetime 6 import json 7 import random 8 import re 9 import hashlib 10 import pytz 11 12 COMMENTS_REGEX = re.compile("/\*.*?\*/") 13 14 15 def utcnow(): 16 """Return datetime.now value with timezone specified. 17 18 Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server, 19 which leads to errors in calculations. 20 """ 21 return datetime.datetime.now(pytz.utc) 22 23 24 def slugify(s): 25 return re.sub('[^a-z0-9_\-]+', '-', s.lower()) 26 27 28 def gen_query_hash(sql): 29 """Returns hash of the given query after stripping all comments, line breaks and multiple 30 spaces, and lower casing all text. 31 32 TODO: possible issue - the following queries will get the same id: 33 1. SELECT 1 FROM table WHERE column='Value'; 34 2. SELECT 1 FROM table where column='value'; 35 """ 36 sql = COMMENTS_REGEX.sub("", sql) 37 sql = "".join(sql.split()).lower() 38 return hashlib.md5(sql.encode('utf-8')).hexdigest() 39 40 41 def generate_token(length): 42 chars = ('abcdefghijklmnopqrstuvwxyz' 43 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 44 '0123456789') 45 46 rand = random.SystemRandom() 47 return ''.join(rand.choice(chars) for x in range(length)) 48 49 class JSONEncoder(json.JSONEncoder): 50 """Custom JSON encoding class, to handle Decimal and datetime.date instances. 51 """ 52 def default(self, o): 53 if isinstance(o, decimal.Decimal): 54 return float(o) 55 56 if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): 57 return o.isoformat() 58 59 super(JSONEncoder, self).default(o) 60 61 62 def json_dumps(data): 63 return json.dumps(data, cls=JSONEncoder) 64 65 66 def build_url(request, host, path): 67 parts = request.host.split(':') 68 if len(parts) > 1: 69 port = parts[1] 70 if (port, request.scheme) not in (('80', 'http'), ('443', 'https')): 71 host = '{}:{}'.format(host, port) 72 73 return "{}://{}{}".format(request.scheme, host, path) 74 75 76 class UnicodeWriter: 77 """ 78 A CSV writer which will write rows to CSV file "f", 79 which is encoded in the given encoding. 80 """ 81 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): 82 # Redirect output to a queue 83 self.queue = cStringIO.StringIO() 84 self.writer = csv.writer(self.queue, dialect=dialect, **kwds) 85 self.stream = f 86 self.encoder = codecs.getincrementalencoder(encoding)() 87 88 def _encode_utf8(self, val): 89 if isinstance(val, (unicode, str)): 90 return val.encode('utf-8') 91 92 return val 93 94 def writerow(self, row): 95 self.writer.writerow([self._encode_utf8(s) for s in row]) 96 # Fetch UTF-8 output from the queue ... 97 data = self.queue.getvalue() 98 data = data.decode("utf-8") 99 # ... and reencode it into the target encoding 100 data = self.encoder.encode(data) 101 # write to the target stream 102 self.stream.write(data) 103 # empty queue 104 self.queue.truncate(0) 105 106 def writerows(self, rows): 107 for row in rows: 108 self.writerow(row) 109 [end of redash/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/utils.py b/redash/utils.py --- a/redash/utils.py +++ b/redash/utils.py @@ -53,9 +53,12 @@ if isinstance(o, decimal.Decimal): return float(o) - if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)): + if isinstance(o, (datetime.date, datetime.time)): return o.isoformat() - + + if isinstance(o, datetime.timedelta): + return str(o) + super(JSONEncoder, self).default(o)
{"golden_diff": "diff --git a/redash/utils.py b/redash/utils.py\n--- a/redash/utils.py\n+++ b/redash/utils.py\n@@ -53,9 +53,12 @@\n if isinstance(o, decimal.Decimal):\n return float(o)\n \n- if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n+ if isinstance(o, (datetime.date, datetime.time)):\n return o.isoformat()\n- \n+\n+ if isinstance(o, datetime.timedelta):\n+ return str(o)\n+\n super(JSONEncoder, self).default(o)\n", "issue": "AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\nOn the latest 0.9.2-rc:\n\n```\n[2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError(\"'datetime.timedelta' object has no attribute 'isoformat'\",)\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 240, in trace_task\n R = retval = fun(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 31, in __call__\n return super(BaseTask, self).__call__(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 437, in __protected_call__\n return self.run(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 286, in execute_query\n data, error = query_runner.run_query(annotated_query)\n File \"/opt/redash/redash/query_runner/pg.py\", line 132, in run_query\n json_data = json.dumps(data, cls=JSONEncoder)\n File \"/usr/lib/python2.7/json/__init__.py\", line 250, in dumps\n sort_keys=sort_keys, **kw).encode(obj)\n File \"/usr/lib/python2.7/json/encoder.py\", line 207, in encode\n chunks = self.iterencode(o, _one_shot=True)\n File \"/usr/lib/python2.7/json/encoder.py\", line 270, in iterencode\n return _iterencode(o, 0)\n File \"/opt/redash/redash/utils.py\", line 57, in default\n return o.isoformat()\nAttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\n```\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport random\nimport re\nimport hashlib\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\ndef generate_token(length):\n chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789')\n\n rand = random.SystemRandom()\n return ''.join(rand.choice(chars) for x in range(length))\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\ndef build_url(request, host, path):\n parts = request.host.split(':')\n if len(parts) > 1:\n port = parts[1]\n if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):\n host = '{}:{}'.format(host, port)\n\n return \"{}://{}{}\".format(request.scheme, host, path)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}]}
1,961
120
gh_patches_debug_23933
rasdani/github-patches
git_diff
ietf-tools__datatracker-5109
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> secr.middleware.dbquery.QueryCountDebugMiddleware is unused. Is this still needed in the source tree? Can it be removed? cc @rpcross </issue> <code> [start of ietf/secr/middleware/dbquery.py] 1 # Copyright The IETF Trust 2014-2019, All Rights Reserved 2 #import logging 3 4 from django.db import connection 5 from django.utils.log import getLogger # type: ignore 6 7 8 logger = getLogger(__name__) 9 #logger.setLevel(logging.DEBUG) 10 #logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE)) 11 12 class QueryCountDebugMiddleware(object): 13 """ 14 This middleware will log the number of queries run 15 and the total time taken for each request (with a 16 status code of 200). It does not currently support 17 multi-db setups. 18 """ 19 def process_response(self, request, response): 20 #assert False, request.path 21 logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries))) 22 if response.status_code == 200: 23 total_time = 0 24 #for query in connection.queries: 25 # query_time = query.get('time') 26 # if query_time is None: 27 # django-debug-toolbar monkeypatches the connection 28 # cursor wrapper and adds extra information in each 29 # item in connection.queries. The query time is stored 30 # under the key "duration" rather than "time" and is 31 # in milliseconds, not seconds. 32 # query_time = query.get('duration', 0) / 1000 33 # total_time += float(query_time) 34 logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time)) 35 return response 36 [end of ietf/secr/middleware/dbquery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/secr/middleware/dbquery.py b/ietf/secr/middleware/dbquery.py deleted file mode 100644 --- a/ietf/secr/middleware/dbquery.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright The IETF Trust 2014-2019, All Rights Reserved -#import logging - -from django.db import connection -from django.utils.log import getLogger # type: ignore - - -logger = getLogger(__name__) -#logger.setLevel(logging.DEBUG) -#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE)) - -class QueryCountDebugMiddleware(object): - """ - This middleware will log the number of queries run - and the total time taken for each request (with a - status code of 200). It does not currently support - multi-db setups. - """ - def process_response(self, request, response): - #assert False, request.path - logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries))) - if response.status_code == 200: - total_time = 0 - #for query in connection.queries: - # query_time = query.get('time') - # if query_time is None: - # django-debug-toolbar monkeypatches the connection - # cursor wrapper and adds extra information in each - # item in connection.queries. The query time is stored - # under the key "duration" rather than "time" and is - # in milliseconds, not seconds. - # query_time = query.get('duration', 0) / 1000 - # total_time += float(query_time) - logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time)) - return response
{"golden_diff": "diff --git a/ietf/secr/middleware/dbquery.py b/ietf/secr/middleware/dbquery.py\ndeleted file mode 100644\n--- a/ietf/secr/middleware/dbquery.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-# Copyright The IETF Trust 2014-2019, All Rights Reserved\n-#import logging\n-\n-from django.db import connection\n-from django.utils.log import getLogger # type: ignore\n-\n-\n-logger = getLogger(__name__)\n-#logger.setLevel(logging.DEBUG)\n-#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))\n-\n-class QueryCountDebugMiddleware(object):\n- \"\"\"\n- This middleware will log the number of queries run\n- and the total time taken for each request (with a\n- status code of 200). It does not currently support\n- multi-db setups.\n- \"\"\"\n- def process_response(self, request, response):\n- #assert False, request.path\n- logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))\n- if response.status_code == 200:\n- total_time = 0\n- #for query in connection.queries:\n- # query_time = query.get('time')\n- # if query_time is None:\n- # django-debug-toolbar monkeypatches the connection\n- # cursor wrapper and adds extra information in each\n- # item in connection.queries. The query time is stored\n- # under the key \"duration\" rather than \"time\" and is\n- # in milliseconds, not seconds.\n- # query_time = query.get('duration', 0) / 1000\n- # total_time += float(query_time)\n- logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))\n- return response\n", "issue": "secr.middleware.dbquery.QueryCountDebugMiddleware is unused.\nIs this still needed in the source tree? Can it be removed?\r\ncc @rpcross\n", "before_files": [{"content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n#import logging\n\nfrom django.db import connection\nfrom django.utils.log import getLogger # type: ignore\n\n\nlogger = getLogger(__name__)\n#logger.setLevel(logging.DEBUG)\n#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))\n\nclass QueryCountDebugMiddleware(object):\n \"\"\"\n This middleware will log the number of queries run\n and the total time taken for each request (with a\n status code of 200). It does not currently support\n multi-db setups.\n \"\"\"\n def process_response(self, request, response):\n #assert False, request.path\n logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))\n if response.status_code == 200:\n total_time = 0\n #for query in connection.queries:\n # query_time = query.get('time')\n # if query_time is None:\n # django-debug-toolbar monkeypatches the connection\n # cursor wrapper and adds extra information in each\n # item in connection.queries. The query time is stored\n # under the key \"duration\" rather than \"time\" and is\n # in milliseconds, not seconds.\n # query_time = query.get('duration', 0) / 1000\n # total_time += float(query_time)\n logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))\n return response\n", "path": "ietf/secr/middleware/dbquery.py"}]}
975
427
gh_patches_debug_395
rasdani/github-patches
git_diff
wright-group__WrightTools-640
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CITATION text file not distributed </issue> <code> [start of setup.py] 1 #! /usr/bin/env python3 2 3 import os 4 from setuptools import setup, find_packages 5 6 7 here = os.path.abspath(os.path.dirname(__file__)) 8 9 10 def read(fname): 11 return open(os.path.join(here, fname)).read() 12 13 14 extra_files = { 15 "WrightTools": [ 16 "datasets", 17 "datasets/*", 18 "datasets/*/*", 19 "datasets/*/*/*", 20 "datasets/*/*/*/*", 21 "VERSION", 22 "WT5_VERSION", 23 ] 24 } 25 26 with open(os.path.join(here, "WrightTools", "VERSION")) as version_file: 27 version = version_file.read().strip() 28 29 setup( 30 name="WrightTools", 31 packages=find_packages(exclude=("tests", "tests.*")), 32 package_data=extra_files, 33 python_requires=">=3.5", 34 setup_requires=["pytest-runner"], 35 tests_require=[ 36 "pytest", 37 "pytest-cov", 38 "sphinx==1.6.5", 39 "sphinx-gallery==0.1.12", 40 "sphinx-rtd-theme", 41 ], 42 install_requires=[ 43 "h5py", 44 "imageio", 45 "matplotlib>=2.0", 46 "numexpr", 47 "numpy", 48 "python-dateutil", 49 "pytz", 50 "scipy", 51 "tidy_headers", 52 ], 53 extras_require={ 54 "docs": ["sphinx-gallery==0.1.12"], 55 "dev": ["black", "pre-commit", "pydocstyle"], 56 }, 57 version=version, 58 description="Tools for loading, processing, and plotting multidimensional spectroscopy data.", 59 long_description=read("README.rst"), 60 author="WrightTools Developers", 61 license="MIT", 62 url="http://wright.tools", 63 keywords="spectroscopy science multidimensional visualization", 64 classifiers=[ 65 "Development Status :: 5 - Production/Stable", 66 "Intended Audience :: Science/Research", 67 "License :: OSI Approved :: MIT License", 68 "Natural Language :: English", 69 "Programming Language :: Python :: 3", 70 "Programming Language :: Python :: 3.5", 71 "Programming Language :: Python :: 3.6", 72 "Topic :: Scientific/Engineering", 73 ], 74 ) 75 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -18,6 +18,7 @@ "datasets/*/*", "datasets/*/*/*", "datasets/*/*/*/*", + "CITATION", "VERSION", "WT5_VERSION", ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,6 +18,7 @@\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n+ \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n", "issue": "CITATION text file not distributed\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n", "path": "setup.py"}]}
1,156
73
gh_patches_debug_24933
rasdani/github-patches
git_diff
urllib3__urllib3-1665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Multipart mixed responses generate warnings If you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious. Stick this perl script somewhere: ``` #!/usr/bin/perl print "Server: Some Server Name\r\n"; print "Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\r\n"; print "Content-Length: 178\r\n"; print "\r\n\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "7\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "9\r\n"; print "--36eeb8c4e26d842a\r\n"; print "Content-Type: text/plain\r\n"; print "\r\n"; print "11\r\n"; print "--36eeb8c4e26d842a--\r\n"; ``` Read it with requests (naturally, you'll have to change the URI to wherever you put the script): ``` import requests, logging logging.basicConfig(level=logging.WARNING) logging.getLogger("requests").setLevel(logging.DEBUG) headers = {'accept': "multipart/mixed"} r = requests.get("http://localhost:8124/cgi-bin/mpm.pl", headers=headers) print(r) ``` The following errors are displayed: ``` DEBUG:requests.packages.urllib3.connectionpool:"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1" 200 178 WARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: '' Traceback (most recent call last): File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py", line 390, in _make_request assert_header_parsing(httplib_response.msg) File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py", line 58, in assert_header_parsing raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) requests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: '' ``` It took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!) See also kennethreitz/requests#3001 ## </issue> <code> [start of src/urllib3/util/response.py] 1 from __future__ import absolute_import 2 from ..packages.six.moves import http_client as httplib 3 4 from ..exceptions import HeaderParsingError 5 6 7 def is_fp_closed(obj): 8 """ 9 Checks whether a given file-like object is closed. 10 11 :param obj: 12 The file-like object to check. 13 """ 14 15 try: 16 # Check `isclosed()` first, in case Python3 doesn't set `closed`. 17 # GH Issue #928 18 return obj.isclosed() 19 except AttributeError: 20 pass 21 22 try: 23 # Check via the official file-like-object way. 24 return obj.closed 25 except AttributeError: 26 pass 27 28 try: 29 # Check if the object is a container for another file-like object that 30 # gets released on exhaustion (e.g. HTTPResponse). 31 return obj.fp is None 32 except AttributeError: 33 pass 34 35 raise ValueError("Unable to determine whether fp is closed.") 36 37 38 def assert_header_parsing(headers): 39 """ 40 Asserts whether all headers have been successfully parsed. 41 Extracts encountered errors from the result of parsing headers. 42 43 Only works on Python 3. 44 45 :param headers: Headers to verify. 46 :type headers: `httplib.HTTPMessage`. 47 48 :raises urllib3.exceptions.HeaderParsingError: 49 If parsing errors are found. 50 """ 51 52 # This will fail silently if we pass in the wrong kind of parameter. 53 # To make debugging easier add an explicit check. 54 if not isinstance(headers, httplib.HTTPMessage): 55 raise TypeError("expected httplib.Message, got {0}.".format(type(headers))) 56 57 defects = getattr(headers, "defects", None) 58 get_payload = getattr(headers, "get_payload", None) 59 60 unparsed_data = None 61 if get_payload: 62 # get_payload is actually email.message.Message.get_payload; 63 # we're only interested in the result if it's not a multipart message 64 if not headers.is_multipart(): 65 payload = get_payload() 66 67 if isinstance(payload, (bytes, str)): 68 unparsed_data = payload 69 70 if defects or unparsed_data: 71 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data) 72 73 74 def is_response_to_head(response): 75 """ 76 Checks whether the request of a response has been a HEAD-request. 77 Handles the quirks of AppEngine. 78 79 :param conn: 80 :type conn: :class:`httplib.HTTPResponse` 81 """ 82 # FIXME: Can we do this somehow without accessing private httplib _method? 83 method = response._method 84 if isinstance(method, int): # Platform-specific: Appengine 85 return method == 3 86 return method.upper() == "HEAD" 87 [end of src/urllib3/util/response.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py --- a/src/urllib3/util/response.py +++ b/src/urllib3/util/response.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect from ..packages.six.moves import http_client as httplib from ..exceptions import HeaderParsingError @@ -66,6 +67,25 @@ if isinstance(payload, (bytes, str)): unparsed_data = payload + if defects: + # httplib is assuming a response body is available + # when parsing headers even when httplib only sends + # header data to parse_headers() This results in + # defects on multipart responses in particular. + # See: https://github.com/urllib3/urllib3/issues/800 + + # So we ignore the following defects: + # - StartBoundaryNotFoundDefect: + # The claimed start boundary was never found. + # - MultipartInvariantViolationDefect: + # A message claimed to be a multipart but no subparts were found. + defects = [ + defect + for defect in defects + if not isinstance( + defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect) + ) + ] if defects or unparsed_data: raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
{"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -1,4 +1,5 @@\n from __future__ import absolute_import\n+from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect\n from ..packages.six.moves import http_client as httplib\n \n from ..exceptions import HeaderParsingError\n@@ -66,6 +67,25 @@\n \n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n+ if defects:\n+ # httplib is assuming a response body is available\n+ # when parsing headers even when httplib only sends\n+ # header data to parse_headers() This results in\n+ # defects on multipart responses in particular.\n+ # See: https://github.com/urllib3/urllib3/issues/800\n+\n+ # So we ignore the following defects:\n+ # - StartBoundaryNotFoundDefect:\n+ # The claimed start boundary was never found.\n+ # - MultipartInvariantViolationDefect:\n+ # A message claimed to be a multipart but no subparts were found.\n+ defects = [\n+ defect\n+ for defect in defects\n+ if not isinstance(\n+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n+ )\n+ ]\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "Multipart mixed responses generate warnings\nIf you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious.\n\nStick this perl script somewhere:\n\n```\n#!/usr/bin/perl\n\nprint \"Server: Some Server Name\\r\\n\";\nprint \"Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Length: 178\\r\\n\";\nprint \"\\r\\n\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"7\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"9\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"11\\r\\n\";\nprint \"--36eeb8c4e26d842a--\\r\\n\";\n```\n\nRead it with requests (naturally, you'll have to change the URI to wherever you put the script):\n\n```\nimport requests, logging\n\nlogging.basicConfig(level=logging.WARNING)\nlogging.getLogger(\"requests\").setLevel(logging.DEBUG)\n\nheaders = {'accept': \"multipart/mixed\"}\nr = requests.get(\"http://localhost:8124/cgi-bin/mpm.pl\", headers=headers)\n\nprint(r)\n```\n\nThe following errors are displayed:\n\n```\nDEBUG:requests.packages.urllib3.connectionpool:\"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1\" 200 178\nWARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''\nTraceback (most recent call last):\n File \"/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py\", line 390, in _make_request\n assert_header_parsing(httplib_response.msg)\n File \"/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py\", line 58, in assert_header_parsing\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\nrequests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''\n```\n\nIt took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!)\n\nSee also kennethreitz/requests#3001\n## \n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(\"expected httplib.Message, got {0}.\".format(type(headers)))\n\n defects = getattr(headers, \"defects\", None)\n get_payload = getattr(headers, \"get_payload\", None)\n\n unparsed_data = None\n if get_payload:\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == \"HEAD\"\n", "path": "src/urllib3/util/response.py"}]}
1,978
348
gh_patches_debug_22952
rasdani/github-patches
git_diff
pwndbg__pwndbg-648
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> xuntil doesn't work with PIE binaries before being run `xuntil` was added in #604 and works except that it doesn't work with PIE binaries before being run. This is because it expects an int argument. I'll look into using sloppy parsing. </issue> <code> [start of pwndbg/commands/peda.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import argparse 9 import sys 10 11 import gdb 12 13 import pwndbg.auxv 14 import pwndbg.color.message as message 15 import pwndbg.commands 16 import pwndbg.commands.context 17 import pwndbg.commands.telescope 18 import pwndbg.proc 19 20 21 @pwndbg.commands.ArgparsedCommand("Gets the current file.") 22 @pwndbg.commands.OnlyWhenRunning 23 def getfile(): 24 print(repr(pwndbg.auxv.get().AT_EXECFN)) 25 26 @pwndbg.commands.ArgparsedCommand("Get the pid.") 27 @pwndbg.commands.OnlyWhenRunning 28 def getpid(): 29 print(pwndbg.proc.pid) 30 31 32 parser = argparse.ArgumentParser(description='Continue execution until an address or function.') 33 parser.add_argument('target', help='Address or function to stop execution at') 34 35 36 @pwndbg.commands.ArgparsedCommand(parser) 37 def xuntil(target): 38 addr = int(target) 39 40 if not pwndbg.memory.peek(addr): 41 print(message.error('Invalid address %#x' % addr)) 42 return 43 44 spec = "*%#x" % (addr) 45 b = gdb.Breakpoint(spec, temporary=True) 46 if pwndbg.proc.alive: 47 gdb.execute("continue", from_tty=False) 48 else: 49 gdb.execute("run", from_tty=False) 50 51 xinfo = pwndbg.commands.context.context 52 xprint = pwndbg.commands.telescope.telescope 53 [end of pwndbg/commands/peda.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/commands/peda.py b/pwndbg/commands/peda.py --- a/pwndbg/commands/peda.py +++ b/pwndbg/commands/peda.py @@ -30,18 +30,27 @@ parser = argparse.ArgumentParser(description='Continue execution until an address or function.') -parser.add_argument('target', help='Address or function to stop execution at') - +parser.add_argument('target', type=str, help='Address or function to stop execution at') @pwndbg.commands.ArgparsedCommand(parser) def xuntil(target): - addr = int(target) - - if not pwndbg.memory.peek(addr): - print(message.error('Invalid address %#x' % addr)) - return + try: + addr = int(target,0) + + if not pwndbg.memory.peek(addr): + print(message.error('Invalid address %#x' % addr)) + return + + spec = "*%#x" % (addr) + except (TypeError, ValueError): + #The following gdb command will throw an error if the symbol is not defined. + try: + result = gdb.execute('info address %s' % target, to_string=True, from_tty=False) + except gdb.error: + print(message.error("Unable to resolve %s" % target)) + return + spec = target - spec = "*%#x" % (addr) b = gdb.Breakpoint(spec, temporary=True) if pwndbg.proc.alive: gdb.execute("continue", from_tty=False)
{"golden_diff": "diff --git a/pwndbg/commands/peda.py b/pwndbg/commands/peda.py\n--- a/pwndbg/commands/peda.py\n+++ b/pwndbg/commands/peda.py\n@@ -30,18 +30,27 @@\n \n \n parser = argparse.ArgumentParser(description='Continue execution until an address or function.')\n-parser.add_argument('target', help='Address or function to stop execution at')\n-\n+parser.add_argument('target', type=str, help='Address or function to stop execution at')\n \n @pwndbg.commands.ArgparsedCommand(parser)\n def xuntil(target):\n- addr = int(target)\n-\n- if not pwndbg.memory.peek(addr):\n- print(message.error('Invalid address %#x' % addr))\n- return\n+ try:\n+ addr = int(target,0)\n+ \n+ if not pwndbg.memory.peek(addr):\n+ print(message.error('Invalid address %#x' % addr))\n+ return\n+\n+ spec = \"*%#x\" % (addr)\n+ except (TypeError, ValueError):\n+ #The following gdb command will throw an error if the symbol is not defined.\n+ try:\n+ result = gdb.execute('info address %s' % target, to_string=True, from_tty=False)\n+ except gdb.error:\n+ print(message.error(\"Unable to resolve %s\" % target))\n+ return \n+ spec = target\n \n- spec = \"*%#x\" % (addr)\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n", "issue": "xuntil doesn't work with PIE binaries before being run\n`xuntil` was added in #604 and works except that it doesn't work with PIE binaries before being run.\r\nThis is because it expects an int argument. I'll look into using sloppy parsing.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport sys\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.commands.context\nimport pwndbg.commands.telescope\nimport pwndbg.proc\n\n\[email protected](\"Gets the current file.\")\[email protected]\ndef getfile():\n print(repr(pwndbg.auxv.get().AT_EXECFN))\n\[email protected](\"Get the pid.\")\[email protected]\ndef getpid():\n print(pwndbg.proc.pid)\n\n\nparser = argparse.ArgumentParser(description='Continue execution until an address or function.')\nparser.add_argument('target', help='Address or function to stop execution at')\n\n\[email protected](parser)\ndef xuntil(target):\n addr = int(target)\n\n if not pwndbg.memory.peek(addr):\n print(message.error('Invalid address %#x' % addr))\n return\n\n spec = \"*%#x\" % (addr)\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n else:\n gdb.execute(\"run\", from_tty=False)\n\nxinfo = pwndbg.commands.context.context\nxprint = pwndbg.commands.telescope.telescope\n", "path": "pwndbg/commands/peda.py"}]}
1,041
352
gh_patches_debug_17968
rasdani/github-patches
git_diff
Pycord-Development__pycord-621
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cog.get_commands() does not return ext.commands commands ### Summary `Cog.get_commands()` no longer returns anything but slash/interaction based commands, breaking all help commands ### Reproduction Steps Run the example below with a cog that has `ext.commands` based commands. ### Minimal Reproducible Code ```python cog = bot.get_cog("MyCog") commands = cog.get_commands() print(commands) #This will return [] unless MyCog contains slash/interaction based commands ``` ### Expected Results Commands inside cog returned as a list. ### Actual Results Only interaction-based commands are returned ### Intents ``` discord.Intents(guilds = True, members = True, bans = True, emojis = True, messages = True, invites = True, reactions = True) ``` ### System Information ``` - Python v3.10.1-final - py-cord v2.0.0-alpha - py-cord pkg_resources: v2.0.0a4627+gca1a76cb - aiohttp v3.7.4.post0 - system info: Linux 5.15.10-zen1-1-zen #1 ZEN SMP PREEMPT Fri, 17 Dec 2021 11:17:39 +0000 ``` ### Checklist - [X] I have searched the open issues for duplicates. - [X] I have shown the entire traceback, if possible. - [X] I have removed my token from display, if visible. ### Additional Context _No response_ </issue> <code> [start of discord/ext/commands/cog.py] 1 """ 2 The MIT License (MIT) 3 4 Copyright (c) 2015-2021 Rapptz 5 Copyright (c) 2021-present Pycord Development 6 7 Permission is hereby granted, free of charge, to any person obtaining a 8 copy of this software and associated documentation files (the "Software"), 9 to deal in the Software without restriction, including without limitation 10 the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 and/or sell copies of the Software, and to permit persons to whom the 12 Software is furnished to do so, subject to the following conditions: 13 14 The above copyright notice and this permission notice shall be included in 15 all copies or substantial portions of the Software. 16 17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 DEALINGS IN THE SOFTWARE. 24 """ 25 from __future__ import annotations 26 27 import discord 28 from ...cog import Cog 29 30 from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type 31 32 from ...commands import ApplicationCommand 33 34 if TYPE_CHECKING: 35 from .core import Command 36 37 __all__ = ('Cog',) 38 39 CogT = TypeVar('CogT', bound='Cog') 40 FuncT = TypeVar('FuncT', bound=Callable[..., Any]) 41 42 MISSING: Any = discord.utils.MISSING 43 44 class Cog(Cog): 45 def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT: 46 # For issue 426, we need to store a copy of the command objects 47 # since we modify them to inject `self` to them. 48 # To do this, we need to interfere with the Cog creation process. 49 self = super().__new__(cls) 50 51 return self 52 53 def walk_commands(self) -> Generator[Command, None, None]: 54 """An iterator that recursively walks through this cog's commands and subcommands. 55 56 Yields 57 ------ 58 Union[:class:`.Command`, :class:`.Group`] 59 A command or group from the cog. 60 """ 61 from .core import GroupMixin 62 for command in self.__cog_commands__: 63 if isinstance(command, ApplicationCommand): 64 yield command 65 else: 66 if command.parent is None: 67 yield command 68 if isinstance(command, GroupMixin): 69 yield from command.walk_commands() 70 [end of discord/ext/commands/cog.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/discord/ext/commands/cog.py b/discord/ext/commands/cog.py --- a/discord/ext/commands/cog.py +++ b/discord/ext/commands/cog.py @@ -27,7 +27,7 @@ import discord from ...cog import Cog -from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type +from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union from ...commands import ApplicationCommand @@ -67,3 +67,16 @@ yield command if isinstance(command, GroupMixin): yield from command.walk_commands() + + def get_commands(self) -> List[Union[ApplicationCommand, Command]]: + r""" + Returns + -------- + List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]] + A :class:`list` of commands that are defined inside this cog. + + .. note:: + + This does not include subcommands. + """ + return [c for c in self.__cog_commands__ if c.parent is None]
{"golden_diff": "diff --git a/discord/ext/commands/cog.py b/discord/ext/commands/cog.py\n--- a/discord/ext/commands/cog.py\n+++ b/discord/ext/commands/cog.py\n@@ -27,7 +27,7 @@\n import discord\n from ...cog import Cog\n \n-from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type\n+from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union\n \n from ...commands import ApplicationCommand\n \n@@ -67,3 +67,16 @@\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n+\n+ def get_commands(self) -> List[Union[ApplicationCommand, Command]]:\n+ r\"\"\"\n+ Returns\n+ --------\n+ List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]]\n+ A :class:`list` of commands that are defined inside this cog.\n+\n+ .. note::\n+\n+ This does not include subcommands.\n+ \"\"\"\n+ return [c for c in self.__cog_commands__ if c.parent is None]\n", "issue": "Cog.get_commands() does not return ext.commands commands\n### Summary\r\n\r\n`Cog.get_commands()` no longer returns anything but slash/interaction based commands, breaking all help commands\r\n\r\n### Reproduction Steps\r\n\r\nRun the example below with a cog that has `ext.commands` based commands.\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\ncog = bot.get_cog(\"MyCog\")\r\ncommands = cog.get_commands()\r\nprint(commands)\r\n#This will return [] unless MyCog contains slash/interaction based commands\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nCommands inside cog returned as a list.\r\n\r\n### Actual Results\r\n\r\nOnly interaction-based commands are returned\r\n\r\n### Intents\r\n\r\n```\r\ndiscord.Intents(guilds = True, members = True, bans = True, emojis = True, messages = True, invites = True, reactions = True)\r\n```\r\n\r\n### System Information\r\n```\r\n- Python v3.10.1-final\r\n- py-cord v2.0.0-alpha\r\n - py-cord pkg_resources: v2.0.0a4627+gca1a76cb\r\n- aiohttp v3.7.4.post0\r\n- system info: Linux 5.15.10-zen1-1-zen #1 ZEN SMP PREEMPT Fri, 17 Dec 2021 11:17:39 +0000\r\n```\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nimport discord\nfrom ...cog import Cog\n\nfrom typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type\n\nfrom ...commands import ApplicationCommand\n\nif TYPE_CHECKING:\n from .core import Command\n\n__all__ = ('Cog',)\n\nCogT = TypeVar('CogT', bound='Cog')\nFuncT = TypeVar('FuncT', bound=Callable[..., Any])\n\nMISSING: Any = discord.utils.MISSING\n\nclass Cog(Cog):\n def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT:\n # For issue 426, we need to store a copy of the command objects\n # since we modify them to inject `self` to them.\n # To do this, we need to interfere with the Cog creation process.\n self = super().__new__(cls)\n\n return self\n\n def walk_commands(self) -> Generator[Command, None, None]:\n \"\"\"An iterator that recursively walks through this cog's commands and subcommands.\n\n Yields\n ------\n Union[:class:`.Command`, :class:`.Group`]\n A command or group from the cog.\n \"\"\"\n from .core import GroupMixin\n for command in self.__cog_commands__:\n if isinstance(command, ApplicationCommand):\n yield command\n else:\n if command.parent is None:\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n", "path": "discord/ext/commands/cog.py"}]}
1,594
250
gh_patches_debug_17155
rasdani/github-patches
git_diff
cloudtools__troposphere-1157
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> RDS VPC example instance classes are out of date https://github.com/cloudtools/troposphere/blob/565247a50bd27a8f45c3e408b466b193473cb9a6/examples/RDS_VPC.py#L67-L76 The current list of instance classes is documented here: https://aws.amazon.com/rds/instance-types/ </issue> <code> [start of examples/RDS_VPC.py] 1 # Converted from RDS_VPC.template located at: 2 # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ 3 4 from troposphere import GetAtt, Join, Output, Parameter, Ref, Template 5 from troposphere.ec2 import SecurityGroup 6 from troposphere.rds import DBInstance, DBSubnetGroup 7 8 9 t = Template() 10 11 t.add_description( 12 "AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template " 13 "showing how to create an RDS DBInstance in an existing Virtual Private " 14 "Cloud (VPC). **WARNING** This template creates an Amazon Relational " 15 "Database Service database instance. You will be billed for the AWS " 16 "resources used if you create a stack from this template.") 17 18 vpcid = t.add_parameter(Parameter( 19 "VpcId", 20 Type="String", 21 Description="VpcId of your existing Virtual Private Cloud (VPC)" 22 )) 23 24 subnet = t.add_parameter(Parameter( 25 "Subnets", 26 Type="CommaDelimitedList", 27 Description=( 28 "The list of SubnetIds, for at least two Availability Zones in the " 29 "region in your Virtual Private Cloud (VPC)") 30 )) 31 32 dbname = t.add_parameter(Parameter( 33 "DBName", 34 Default="MyDatabase", 35 Description="The database name", 36 Type="String", 37 MinLength="1", 38 MaxLength="64", 39 AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", 40 ConstraintDescription=("must begin with a letter and contain only" 41 " alphanumeric characters.") 42 )) 43 44 dbuser = t.add_parameter(Parameter( 45 "DBUser", 46 NoEcho=True, 47 Description="The database admin account username", 48 Type="String", 49 MinLength="1", 50 MaxLength="16", 51 AllowedPattern="[a-zA-Z][a-zA-Z0-9]*", 52 ConstraintDescription=("must begin with a letter and contain only" 53 " alphanumeric characters.") 54 )) 55 56 dbpassword = t.add_parameter(Parameter( 57 "DBPassword", 58 NoEcho=True, 59 Description="The database admin account password", 60 Type="String", 61 MinLength="1", 62 MaxLength="41", 63 AllowedPattern="[a-zA-Z0-9]*", 64 ConstraintDescription="must contain only alphanumeric characters." 65 )) 66 67 dbclass = t.add_parameter(Parameter( 68 "DBClass", 69 Default="db.m1.small", 70 Description="Database instance class", 71 Type="String", 72 AllowedValues=[ 73 "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", 74 "db.m2.2xlarge", "db.m2.4xlarge"], 75 ConstraintDescription="must select a valid database instance type.", 76 )) 77 78 dballocatedstorage = t.add_parameter(Parameter( 79 "DBAllocatedStorage", 80 Default="5", 81 Description="The size of the database (Gb)", 82 Type="Number", 83 MinValue="5", 84 MaxValue="1024", 85 ConstraintDescription="must be between 5 and 1024Gb.", 86 )) 87 88 89 mydbsubnetgroup = t.add_resource(DBSubnetGroup( 90 "MyDBSubnetGroup", 91 DBSubnetGroupDescription="Subnets available for the RDS DB Instance", 92 SubnetIds=Ref(subnet), 93 )) 94 95 myvpcsecuritygroup = t.add_resource(SecurityGroup( 96 "myVPCSecurityGroup", 97 GroupDescription="Security group for RDS DB Instance.", 98 VpcId=Ref(vpcid) 99 )) 100 101 mydb = t.add_resource(DBInstance( 102 "MyDB", 103 DBName=Ref(dbname), 104 AllocatedStorage=Ref(dballocatedstorage), 105 DBInstanceClass=Ref(dbclass), 106 Engine="MySQL", 107 EngineVersion="5.5", 108 MasterUsername=Ref(dbuser), 109 MasterUserPassword=Ref(dbpassword), 110 DBSubnetGroupName=Ref(mydbsubnetgroup), 111 VPCSecurityGroups=[Ref(myvpcsecuritygroup)], 112 )) 113 114 t.add_output(Output( 115 "JDBCConnectionString", 116 Description="JDBC connection string for database", 117 Value=Join("", [ 118 "jdbc:mysql://", 119 GetAtt("MyDB", "Endpoint.Address"), 120 GetAtt("MyDB", "Endpoint.Port"), 121 "/", 122 Ref(dbname) 123 ]) 124 )) 125 126 print(t.to_json()) 127 [end of examples/RDS_VPC.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/RDS_VPC.py b/examples/RDS_VPC.py --- a/examples/RDS_VPC.py +++ b/examples/RDS_VPC.py @@ -70,8 +70,17 @@ Description="Database instance class", Type="String", AllowedValues=[ - "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", - "db.m2.2xlarge", "db.m2.4xlarge"], + "db.m5.large", "db.m5.xlarge", "db.m5.2xlarge", "db.m5.4xlarge", + "db.m5.12xlarge", "db.m5.24xlarge", "db.m4.large", "db.m4.xlarge", + "db.m4.2xlarge", "db.m4.4xlarge", "db.m4.10xlarge", "db.m4.16xlarge", + "db.r4.large", "db.r4.xlarge", "db.r4.2xlarge", "db.r4.4xlarge", + "db.r4.8xlarge", "db.r4.16xlarge", "db.x1e.xlarge", "db.x1e.2xlarge", + "db.x1e.4xlarge", "db.x1e.8xlarge", "db.x1e.16xlarge", "db.x1e.32xlarge", + "db.x1.16xlarge", "db.x1.32xlarge", "db.r3.large", "db.r3.xlarge", + "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.t2.micro", + "db.t2.small", "db.t2.medium", "db.t2.large", "db.t2.xlarge", + "db.t2.2xlarge" + ], ConstraintDescription="must select a valid database instance type.", ))
{"golden_diff": "diff --git a/examples/RDS_VPC.py b/examples/RDS_VPC.py\n--- a/examples/RDS_VPC.py\n+++ b/examples/RDS_VPC.py\n@@ -70,8 +70,17 @@\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n- \"db.m1.small\", \"db.m1.large\", \"db.m1.xlarge\", \"db.m2.xlarge\",\n- \"db.m2.2xlarge\", \"db.m2.4xlarge\"],\n+ \"db.m5.large\", \"db.m5.xlarge\", \"db.m5.2xlarge\", \"db.m5.4xlarge\",\n+ \"db.m5.12xlarge\", \"db.m5.24xlarge\", \"db.m4.large\", \"db.m4.xlarge\",\n+ \"db.m4.2xlarge\", \"db.m4.4xlarge\", \"db.m4.10xlarge\", \"db.m4.16xlarge\",\n+ \"db.r4.large\", \"db.r4.xlarge\", \"db.r4.2xlarge\", \"db.r4.4xlarge\",\n+ \"db.r4.8xlarge\", \"db.r4.16xlarge\", \"db.x1e.xlarge\", \"db.x1e.2xlarge\",\n+ \"db.x1e.4xlarge\", \"db.x1e.8xlarge\", \"db.x1e.16xlarge\", \"db.x1e.32xlarge\",\n+ \"db.x1.16xlarge\", \"db.x1.32xlarge\", \"db.r3.large\", \"db.r3.xlarge\",\n+ \"db.r3.2xlarge\", \"db.r3.4xlarge\", \"db.r3.8xlarge\", \"db.t2.micro\",\n+ \"db.t2.small\", \"db.t2.medium\", \"db.t2.large\", \"db.t2.xlarge\",\n+ \"db.t2.2xlarge\"\n+ ],\n ConstraintDescription=\"must select a valid database instance type.\",\n ))\n", "issue": "RDS VPC example instance classes are out of date\nhttps://github.com/cloudtools/troposphere/blob/565247a50bd27a8f45c3e408b466b193473cb9a6/examples/RDS_VPC.py#L67-L76\r\n\r\nThe current list of instance classes is documented here: https://aws.amazon.com/rds/instance-types/\n", "before_files": [{"content": "# Converted from RDS_VPC.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import GetAtt, Join, Output, Parameter, Ref, Template\nfrom troposphere.ec2 import SecurityGroup\nfrom troposphere.rds import DBInstance, DBSubnetGroup\n\n\nt = Template()\n\nt.add_description(\n \"AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template \"\n \"showing how to create an RDS DBInstance in an existing Virtual Private \"\n \"Cloud (VPC). **WARNING** This template creates an Amazon Relational \"\n \"Database Service database instance. You will be billed for the AWS \"\n \"resources used if you create a stack from this template.\")\n\nvpcid = t.add_parameter(Parameter(\n \"VpcId\",\n Type=\"String\",\n Description=\"VpcId of your existing Virtual Private Cloud (VPC)\"\n))\n\nsubnet = t.add_parameter(Parameter(\n \"Subnets\",\n Type=\"CommaDelimitedList\",\n Description=(\n \"The list of SubnetIds, for at least two Availability Zones in the \"\n \"region in your Virtual Private Cloud (VPC)\")\n))\n\ndbname = t.add_parameter(Parameter(\n \"DBName\",\n Default=\"MyDatabase\",\n Description=\"The database name\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"64\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbuser = t.add_parameter(Parameter(\n \"DBUser\",\n NoEcho=True,\n Description=\"The database admin account username\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"16\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbpassword = t.add_parameter(Parameter(\n \"DBPassword\",\n NoEcho=True,\n Description=\"The database admin account password\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"41\",\n AllowedPattern=\"[a-zA-Z0-9]*\",\n ConstraintDescription=\"must contain only alphanumeric characters.\"\n))\n\ndbclass = t.add_parameter(Parameter(\n \"DBClass\",\n Default=\"db.m1.small\",\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n \"db.m1.small\", \"db.m1.large\", \"db.m1.xlarge\", \"db.m2.xlarge\",\n \"db.m2.2xlarge\", \"db.m2.4xlarge\"],\n ConstraintDescription=\"must select a valid database instance type.\",\n))\n\ndballocatedstorage = t.add_parameter(Parameter(\n \"DBAllocatedStorage\",\n Default=\"5\",\n Description=\"The size of the database (Gb)\",\n Type=\"Number\",\n MinValue=\"5\",\n MaxValue=\"1024\",\n ConstraintDescription=\"must be between 5 and 1024Gb.\",\n))\n\n\nmydbsubnetgroup = t.add_resource(DBSubnetGroup(\n \"MyDBSubnetGroup\",\n DBSubnetGroupDescription=\"Subnets available for the RDS DB Instance\",\n SubnetIds=Ref(subnet),\n))\n\nmyvpcsecuritygroup = t.add_resource(SecurityGroup(\n \"myVPCSecurityGroup\",\n GroupDescription=\"Security group for RDS DB Instance.\",\n VpcId=Ref(vpcid)\n))\n\nmydb = t.add_resource(DBInstance(\n \"MyDB\",\n DBName=Ref(dbname),\n AllocatedStorage=Ref(dballocatedstorage),\n DBInstanceClass=Ref(dbclass),\n Engine=\"MySQL\",\n EngineVersion=\"5.5\",\n MasterUsername=Ref(dbuser),\n MasterUserPassword=Ref(dbpassword),\n DBSubnetGroupName=Ref(mydbsubnetgroup),\n VPCSecurityGroups=[Ref(myvpcsecuritygroup)],\n))\n\nt.add_output(Output(\n \"JDBCConnectionString\",\n Description=\"JDBC connection string for database\",\n Value=Join(\"\", [\n \"jdbc:mysql://\",\n GetAtt(\"MyDB\", \"Endpoint.Address\"),\n GetAtt(\"MyDB\", \"Endpoint.Port\"),\n \"/\",\n Ref(dbname)\n ])\n))\n\nprint(t.to_json())\n", "path": "examples/RDS_VPC.py"}]}
1,824
462
gh_patches_debug_30972
rasdani/github-patches
git_diff
saleor__saleor-3639
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Expose in API list of supported payment gateways In API we expose the `GatewaysEnum` enum which contains all the available payment gateways, but there is no way for the Storefront 2.0 to determine which of them are actually enabled (currently Braintree is hardcoded over there). We should expose a field that would provide this information, e.g: ``` type Shop { enabledGateways: [GatewaysEnum] } ``` A resolver function should take into account gateways enabled in `CHECKOUT_PAYMENT_GATEWAYS` setting. </issue> <code> [start of saleor/graphql/checkout/types.py] 1 import graphene 2 import graphene_django_optimizer as gql_optimizer 3 4 from ...checkout import models 5 from ...core.utils.taxes import get_taxes_for_address 6 from ..core.connection import CountableDjangoObjectType 7 from ..core.types.money import TaxedMoney 8 from ..order.utils import applicable_shipping_methods 9 from ..shipping.types import ShippingMethod 10 11 12 class CheckoutLine(CountableDjangoObjectType): 13 total_price = graphene.Field( 14 TaxedMoney, 15 description=( 16 'The sum of the checkout line price, taxes and discounts.')) 17 requires_shipping = graphene.Boolean( 18 description='Indicates whether the item need to be delivered.') 19 20 class Meta: 21 exclude_fields = ['cart', 'data'] 22 description = 'Represents an item in the checkout.' 23 interfaces = [graphene.relay.Node] 24 model = models.CartLine 25 filter_fields = ['id'] 26 27 def resolve_total_price(self, info): 28 taxes = get_taxes_for_address(self.cart.shipping_address) 29 return self.get_total(taxes=taxes) 30 31 def resolve_requires_shipping(self, info): 32 return self.is_shipping_required() 33 34 35 class Checkout(CountableDjangoObjectType): 36 available_shipping_methods = graphene.List( 37 ShippingMethod, required=False, 38 description='Shipping methods that can be used with this order.') 39 is_shipping_required = graphene.Boolean( 40 description='Returns True, if checkout requires shipping.', 41 required=True) 42 lines = gql_optimizer.field( 43 graphene.List( 44 CheckoutLine, description=( 45 'A list of checkout lines, each containing information about ' 46 'an item in the checkout.')), 47 model_field='lines') 48 shipping_price = graphene.Field( 49 TaxedMoney, 50 description='The price of the shipping, with all the taxes included.') 51 subtotal_price = graphene.Field( 52 TaxedMoney, 53 description=( 54 'The price of the checkout before shipping, with taxes included.')) 55 total_price = graphene.Field( 56 TaxedMoney, 57 description=( 58 'The sum of the the checkout line prices, with all the taxes,' 59 'shipping costs, and discounts included.')) 60 61 class Meta: 62 exclude_fields = ['payments'] 63 description = 'Checkout object' 64 model = models.Cart 65 interfaces = [graphene.relay.Node] 66 filter_fields = ['token'] 67 68 def resolve_total_price(self, info): 69 taxes = get_taxes_for_address(self.shipping_address) 70 return self.get_total(taxes=taxes) 71 72 def resolve_subtotal_price(self, info): 73 taxes = get_taxes_for_address(self.shipping_address) 74 return self.get_subtotal(taxes=taxes) 75 76 def resolve_shipping_price(self, info): 77 taxes = get_taxes_for_address(self.shipping_address) 78 return self.get_shipping_price(taxes=taxes) 79 80 def resolve_lines(self, info): 81 return self.lines.prefetch_related('variant') 82 83 def resolve_available_shipping_methods(self, info): 84 taxes = get_taxes_for_address(self.shipping_address) 85 price = self.get_subtotal( 86 taxes=taxes, discounts=info.context.discounts) 87 return applicable_shipping_methods(self, info, price.gross.amount) 88 89 def resolve_is_shipping_required(self, info): 90 return self.is_shipping_required() 91 [end of saleor/graphql/checkout/types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/checkout/types.py b/saleor/graphql/checkout/types.py --- a/saleor/graphql/checkout/types.py +++ b/saleor/graphql/checkout/types.py @@ -1,11 +1,13 @@ import graphene import graphene_django_optimizer as gql_optimizer +from django.conf import settings from ...checkout import models from ...core.utils.taxes import get_taxes_for_address from ..core.connection import CountableDjangoObjectType from ..core.types.money import TaxedMoney from ..order.utils import applicable_shipping_methods +from ..payment.enums import PaymentGatewayEnum from ..shipping.types import ShippingMethod @@ -36,6 +38,9 @@ available_shipping_methods = graphene.List( ShippingMethod, required=False, description='Shipping methods that can be used with this order.') + available_payment_gateways = graphene.List( + PaymentGatewayEnum, description='List of available payment gateways.', + required=True) is_shipping_required = graphene.Boolean( description='Returns True, if checkout requires shipping.', required=True) @@ -86,5 +91,8 @@ taxes=taxes, discounts=info.context.discounts) return applicable_shipping_methods(self, info, price.gross.amount) + def resolve_available_payment_gateways(self, info): + return settings.CHECKOUT_PAYMENT_GATEWAYS.keys() + def resolve_is_shipping_required(self, info): return self.is_shipping_required()
{"golden_diff": "diff --git a/saleor/graphql/checkout/types.py b/saleor/graphql/checkout/types.py\n--- a/saleor/graphql/checkout/types.py\n+++ b/saleor/graphql/checkout/types.py\n@@ -1,11 +1,13 @@\n import graphene\n import graphene_django_optimizer as gql_optimizer\n+from django.conf import settings\n \n from ...checkout import models\n from ...core.utils.taxes import get_taxes_for_address\n from ..core.connection import CountableDjangoObjectType\n from ..core.types.money import TaxedMoney\n from ..order.utils import applicable_shipping_methods\n+from ..payment.enums import PaymentGatewayEnum\n from ..shipping.types import ShippingMethod\n \n \n@@ -36,6 +38,9 @@\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n+ available_payment_gateways = graphene.List(\n+ PaymentGatewayEnum, description='List of available payment gateways.',\n+ required=True)\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n@@ -86,5 +91,8 @@\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n \n+ def resolve_available_payment_gateways(self, info):\n+ return settings.CHECKOUT_PAYMENT_GATEWAYS.keys()\n+\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "issue": "Expose in API list of supported payment gateways\nIn API we expose the `GatewaysEnum` enum which contains all the available payment gateways, but there is no way for the Storefront 2.0 to determine which of them are actually enabled (currently Braintree is hardcoded over there). We should expose a field that would provide this information, e.g:\r\n```\r\ntype Shop {\r\n enabledGateways: [GatewaysEnum]\r\n}\r\n```\r\n\r\nA resolver function should take into account gateways enabled in `CHECKOUT_PAYMENT_GATEWAYS` setting.\n", "before_files": [{"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...checkout import models\nfrom ...core.utils.taxes import get_taxes_for_address\nfrom ..core.connection import CountableDjangoObjectType\nfrom ..core.types.money import TaxedMoney\nfrom ..order.utils import applicable_shipping_methods\nfrom ..shipping.types import ShippingMethod\n\n\nclass CheckoutLine(CountableDjangoObjectType):\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the checkout line price, taxes and discounts.'))\n requires_shipping = graphene.Boolean(\n description='Indicates whether the item need to be delivered.')\n\n class Meta:\n exclude_fields = ['cart', 'data']\n description = 'Represents an item in the checkout.'\n interfaces = [graphene.relay.Node]\n model = models.CartLine\n filter_fields = ['id']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.cart.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_requires_shipping(self, info):\n return self.is_shipping_required()\n\n\nclass Checkout(CountableDjangoObjectType):\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n lines = gql_optimizer.field(\n graphene.List(\n CheckoutLine, description=(\n 'A list of checkout lines, each containing information about '\n 'an item in the checkout.')),\n model_field='lines')\n shipping_price = graphene.Field(\n TaxedMoney,\n description='The price of the shipping, with all the taxes included.')\n subtotal_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The price of the checkout before shipping, with taxes included.'))\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the the checkout line prices, with all the taxes,'\n 'shipping costs, and discounts included.'))\n\n class Meta:\n exclude_fields = ['payments']\n description = 'Checkout object'\n model = models.Cart\n interfaces = [graphene.relay.Node]\n filter_fields = ['token']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_subtotal_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_subtotal(taxes=taxes)\n\n def resolve_shipping_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_shipping_price(taxes=taxes)\n\n def resolve_lines(self, info):\n return self.lines.prefetch_related('variant')\n\n def resolve_available_shipping_methods(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n price = self.get_subtotal(\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "path": "saleor/graphql/checkout/types.py"}]}
1,509
319
gh_patches_debug_17205
rasdani/github-patches
git_diff
fidals__shopelectro-532
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tracking.es6:30-33: Send `purchase` event to YA and GA... The puzzle `504-30170dc3` from #504 has to be resolved: https://github.com/fidals/shopelectro/blob/4e607fb856842b43192511b1b4eaff19d2052b9a/front/js/shared/tracking.es6#L30-L33 The puzzle was created by Artemiy on 17-Aug-18. Estimate: 60 minutes, If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html). </issue> <code> [start of shopelectro/views/ecommerce.py] 1 from django.conf import settings 2 from django.http import HttpResponse, JsonResponse 3 from django.shortcuts import get_object_or_404, render 4 from django.views.decorators.http import require_POST 5 6 from ecommerce import mailer, views as ec_views 7 from pages.models import CustomPage 8 9 from shopelectro.cart import SECart 10 from shopelectro.forms import OrderForm 11 from shopelectro.models import Product, Order 12 13 14 # ECOMMERCE VIEWS 15 class OrderPage(ec_views.OrderPage): 16 order_form = OrderForm 17 cart = SECart 18 19 def get_context_data(self, request, **kwargs): 20 return { 21 **super(OrderPage, self).get_context_data(request, **kwargs), 22 'page': CustomPage.objects.get(slug='order'), 23 } 24 25 26 class AddToCart(ec_views.AddToCart): 27 cart = SECart 28 product_model = Product 29 order_form = OrderForm 30 31 32 class RemoveFromCart(ec_views.RemoveFromCart): 33 cart = SECart 34 product_model = Product 35 order_form = OrderForm 36 37 38 class ChangeCount(ec_views.ChangeCount): 39 cart = SECart 40 product_model = Product 41 order_form = OrderForm 42 43 44 class FlushCart(ec_views.FlushCart): 45 product_model = Product 46 order_form = OrderForm 47 48 49 class OrderSuccess(ec_views.OrderSuccess): 50 order = Order 51 52 53 @require_POST 54 def one_click_buy(request): 55 """ 56 Handle one-click-buy. 57 58 Accept XHR, save Order to DB, send mail about it 59 and return 200 OK. 60 """ 61 SECart(request.session).clear() 62 63 cart = SECart(request.session) 64 product = get_object_or_404(Product, id=request.POST['product']) 65 cart.add(product, int(request.POST['quantity'])) 66 order = Order(phone=request.POST['phone']) 67 order.set_positions(cart) 68 ec_views.save_order_to_session(request.session, order) 69 mailer.send_order( 70 subject=settings.EMAIL_SUBJECTS['one_click'], 71 order=order, 72 to_customer=False, 73 ) 74 return HttpResponse('ok') 75 76 77 @require_POST 78 def order_call(request): 79 """Send email about ordered call.""" 80 phone, time, url = ec_views.get_keys_from_post( 81 request, 'phone', 'time', 'url') 82 83 mailer.send_backcall( 84 subject=settings.EMAIL_SUBJECTS['call'], 85 phone=phone, 86 time=time, 87 url=url, 88 ) 89 90 return HttpResponse('ok') 91 92 93 class YandexOrder(OrderPage): 94 95 def post(self, request): 96 cart = self.cart(request.session) 97 form = self.order_form(request.POST.dict()) 98 99 if not form.is_valid(): 100 return render(request, self.template, {'cart': cart, 'form': form}) 101 102 order = form.save() 103 order.set_positions(cart) 104 ec_views.save_order_to_session(request.session, order) 105 106 # Took form fields from Yandex docs https://goo.gl/afKfsz 107 response_data = { 108 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required 109 'shopId': settings.SHOP['id'], # Required 110 'scid': settings.SHOP['scid'], # Required 111 'shopSuccessURL': settings.SHOP['success_url'], 112 'shopFailURL': settings.SHOP['fail_url'], 113 'customerNumber': order.id, # Required 114 'sum': order.total_price, # Required 115 'orderNumber': order.fake_order_number, 116 'cps_phone': order.phone, 117 'cps_email': order.email, 118 'paymentType': request.POST.get('payment_type'), 119 } 120 121 return JsonResponse(response_data) 122 [end of shopelectro/views/ecommerce.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py --- a/shopelectro/views/ecommerce.py +++ b/shopelectro/views/ecommerce.py @@ -1,4 +1,5 @@ from django.conf import settings +from django.core import serializers from django.http import HttpResponse, JsonResponse from django.shortcuts import get_object_or_404, render from django.views.decorators.http import require_POST @@ -47,7 +48,20 @@ class OrderSuccess(ec_views.OrderSuccess): - order = Order + order = Order.objects.all().prefetch_related('positions') + + def get_context_data(self, **kwargs): + context = super().get_context_data(**kwargs) + positions_json = serializers.serialize( + 'json', + context['order'].positions.all(), + fields=['name', 'quantity', 'price'], + ) + + return { + **context, + 'positions_json': positions_json, + } @require_POST
{"golden_diff": "diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py\n--- a/shopelectro/views/ecommerce.py\n+++ b/shopelectro/views/ecommerce.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.core import serializers\n from django.http import HttpResponse, JsonResponse\n from django.shortcuts import get_object_or_404, render\n from django.views.decorators.http import require_POST\n@@ -47,7 +48,20 @@\n \n \n class OrderSuccess(ec_views.OrderSuccess):\n- order = Order\n+ order = Order.objects.all().prefetch_related('positions')\n+\n+ def get_context_data(self, **kwargs):\n+ context = super().get_context_data(**kwargs)\n+ positions_json = serializers.serialize(\n+ 'json',\n+ context['order'].positions.all(),\n+ fields=['name', 'quantity', 'price'],\n+ )\n+\n+ return {\n+ **context,\n+ 'positions_json': positions_json,\n+ }\n \n \n @require_POST\n", "issue": "tracking.es6:30-33: Send `purchase` event to YA and GA...\nThe puzzle `504-30170dc3` from #504 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/4e607fb856842b43192511b1b4eaff19d2052b9a/front/js/shared/tracking.es6#L30-L33\n\nThe puzzle was created by Artemiy on 17-Aug-18. \n\nEstimate: 60 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer, views as ec_views\nfrom pages.models import CustomPage\n\nfrom shopelectro.cart import SECart\nfrom shopelectro.forms import OrderForm\nfrom shopelectro.models import Product, Order\n\n\n# ECOMMERCE VIEWS\nclass OrderPage(ec_views.OrderPage):\n order_form = OrderForm\n cart = SECart\n\n def get_context_data(self, request, **kwargs):\n return {\n **super(OrderPage, self).get_context_data(request, **kwargs),\n 'page': CustomPage.objects.get(slug='order'),\n }\n\n\nclass AddToCart(ec_views.AddToCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass RemoveFromCart(ec_views.RemoveFromCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass ChangeCount(ec_views.ChangeCount):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass FlushCart(ec_views.FlushCart):\n product_model = Product\n order_form = OrderForm\n\n\nclass OrderSuccess(ec_views.OrderSuccess):\n order = Order\n\n\n@require_POST\ndef one_click_buy(request):\n \"\"\"\n Handle one-click-buy.\n\n Accept XHR, save Order to DB, send mail about it\n and return 200 OK.\n \"\"\"\n SECart(request.session).clear()\n\n cart = SECart(request.session)\n product = get_object_or_404(Product, id=request.POST['product'])\n cart.add(product, int(request.POST['quantity']))\n order = Order(phone=request.POST['phone'])\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['one_click'],\n order=order,\n to_customer=False,\n )\n return HttpResponse('ok')\n\n\n@require_POST\ndef order_call(request):\n \"\"\"Send email about ordered call.\"\"\"\n phone, time, url = ec_views.get_keys_from_post(\n request, 'phone', 'time', 'url')\n\n mailer.send_backcall(\n subject=settings.EMAIL_SUBJECTS['call'],\n phone=phone,\n time=time,\n url=url,\n )\n\n return HttpResponse('ok')\n\n\nclass YandexOrder(OrderPage):\n\n def post(self, request):\n cart = self.cart(request.session)\n form = self.order_form(request.POST.dict())\n\n if not form.is_valid():\n return render(request, self.template, {'cart': cart, 'form': form})\n\n order = form.save()\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n\n # Took form fields from Yandex docs https://goo.gl/afKfsz\n response_data = {\n 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required\n 'shopId': settings.SHOP['id'], # Required\n 'scid': settings.SHOP['scid'], # Required\n 'shopSuccessURL': settings.SHOP['success_url'],\n 'shopFailURL': settings.SHOP['fail_url'],\n 'customerNumber': order.id, # Required\n 'sum': order.total_price, # Required\n 'orderNumber': order.fake_order_number,\n 'cps_phone': order.phone,\n 'cps_email': order.email,\n 'paymentType': request.POST.get('payment_type'),\n }\n\n return JsonResponse(response_data)\n", "path": "shopelectro/views/ecommerce.py"}]}
1,840
233